mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-26 06:20:09 +01:00
radv: fix CP DMA with NULL PRT pages on GFX8-9
On GFX8-9 (starting from Polaris10), CP DMA is broken with NULL PRT pages. It doesn't read 0 and doesn't discard writes which can cause GPU hangs. Fix that by always using the compute path when a BO is sparse. Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/12828 Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35071>
This commit is contained in:
parent
235f70e475
commit
25eb836eec
8 changed files with 122 additions and 56 deletions
|
|
@ -67,6 +67,7 @@ struct radv_meta_saved_state {
|
|||
|
||||
enum radv_copy_flags {
|
||||
RADV_COPY_FLAGS_DEVICE_LOCAL = 1 << 0,
|
||||
RADV_COPY_FLAGS_SPARSE = 1 << 1,
|
||||
};
|
||||
|
||||
extern const VkFormat radv_fs_key_format_exemplars[NUM_META_FS_KEYS];
|
||||
|
|
|
|||
|
|
@ -16,18 +16,42 @@
|
|||
#include "radv_cs.h"
|
||||
#include "vk_common_entrypoints.h"
|
||||
|
||||
static enum radv_copy_flags
|
||||
radv_get_copy_flags_from_bo(const struct radeon_winsys_bo *bo)
|
||||
{
|
||||
enum radv_copy_flags copy_flags = 0;
|
||||
|
||||
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
copy_flags |= RADV_COPY_FLAGS_DEVICE_LOCAL;
|
||||
if (bo->is_virtual)
|
||||
copy_flags |= RADV_COPY_FLAGS_SPARSE;
|
||||
|
||||
return copy_flags;
|
||||
}
|
||||
|
||||
struct fill_constants {
|
||||
uint64_t addr;
|
||||
uint32_t max_offset;
|
||||
uint32_t data;
|
||||
};
|
||||
|
||||
struct radv_fill_memory_key {
|
||||
enum radv_meta_object_key_type type;
|
||||
bool use_16B_copy;
|
||||
};
|
||||
|
||||
static VkResult
|
||||
get_fill_memory_pipeline(struct radv_device *device, VkPipeline *pipeline_out, VkPipelineLayout *layout_out)
|
||||
get_fill_memory_pipeline(struct radv_device *device, uint64_t size, VkPipeline *pipeline_out,
|
||||
VkPipelineLayout *layout_out)
|
||||
{
|
||||
enum radv_meta_object_key_type key = RADV_META_OBJECT_KEY_FILL_MEMORY;
|
||||
const bool use_16B_copy = size >= 16;
|
||||
struct radv_fill_memory_key key;
|
||||
VkResult result;
|
||||
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.type = RADV_META_OBJECT_KEY_FILL_MEMORY;
|
||||
key.use_16B_copy = use_16B_copy;
|
||||
|
||||
const VkPushConstantRange pc_range = {
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.size = sizeof(struct fill_constants),
|
||||
|
|
@ -44,7 +68,7 @@ get_fill_memory_pipeline(struct radv_device *device, VkPipeline *pipeline_out, V
|
|||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
nir_shader *cs = radv_meta_nir_build_fill_memory_shader(device);
|
||||
nir_shader *cs = radv_meta_nir_build_fill_memory_shader(device, use_16B_copy ? 16 : 4);
|
||||
|
||||
const VkPipelineShaderStageCreateInfo stage_info = {
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||
|
|
@ -74,12 +98,29 @@ struct copy_constants {
|
|||
uint32_t max_offset;
|
||||
};
|
||||
|
||||
static VkResult
|
||||
get_copy_memory_pipeline(struct radv_device *device, VkPipeline *pipeline_out, VkPipelineLayout *layout_out)
|
||||
struct radv_copy_memory_key {
|
||||
enum radv_meta_object_key_type type;
|
||||
bool use_16B_copy;
|
||||
};
|
||||
|
||||
static bool
|
||||
radv_is_copy_memory_4B_aligned(uint64_t src_va, uint64_t dst_va, uint64_t size)
|
||||
{
|
||||
enum radv_meta_object_key_type key = RADV_META_OBJECT_KEY_COPY_MEMORY;
|
||||
return !(size & 3) && !(src_va & 3) && !(dst_va & 3);
|
||||
}
|
||||
|
||||
static VkResult
|
||||
get_copy_memory_pipeline(struct radv_device *device, uint64_t src_va, uint64_t dst_va, uint64_t size,
|
||||
VkPipeline *pipeline_out, VkPipelineLayout *layout_out)
|
||||
{
|
||||
const bool use_16B_copy = size >= 16 && radv_is_copy_memory_4B_aligned(src_va, dst_va, size);
|
||||
struct radv_copy_memory_key key;
|
||||
VkResult result;
|
||||
|
||||
memset(&key, 0, sizeof(key));
|
||||
key.type = RADV_META_OBJECT_KEY_COPY_MEMORY;
|
||||
key.use_16B_copy = use_16B_copy;
|
||||
|
||||
const VkPushConstantRange pc_range = {
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.size = sizeof(struct copy_constants),
|
||||
|
|
@ -96,7 +137,7 @@ get_copy_memory_pipeline(struct radv_device *device, VkPipeline *pipeline_out, V
|
|||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
nir_shader *cs = radv_meta_nir_build_copy_memory_shader(device);
|
||||
nir_shader *cs = radv_meta_nir_build_copy_memory_shader(device, use_16B_copy ? 16 : 1);
|
||||
|
||||
const VkPipelineShaderStageCreateInfo stage_info = {
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||
|
|
@ -129,7 +170,7 @@ radv_compute_fill_memory(struct radv_cmd_buffer *cmd_buffer, uint64_t va, uint64
|
|||
VkPipeline pipeline;
|
||||
VkResult result;
|
||||
|
||||
result = get_fill_memory_pipeline(device, &pipeline, &layout);
|
||||
result = get_fill_memory_pipeline(device, size, &pipeline, &layout);
|
||||
if (result != VK_SUCCESS) {
|
||||
vk_command_buffer_set_error(&cmd_buffer->vk, result);
|
||||
return;
|
||||
|
|
@ -139,18 +180,26 @@ radv_compute_fill_memory(struct radv_cmd_buffer *cmd_buffer, uint64_t va, uint64
|
|||
|
||||
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
|
||||
|
||||
assert(size >= 16 && size <= UINT32_MAX);
|
||||
assert(size <= UINT32_MAX);
|
||||
|
||||
struct fill_constants fill_consts = {
|
||||
.addr = va,
|
||||
.max_offset = size - 16,
|
||||
.data = data,
|
||||
};
|
||||
uint32_t dim_x;
|
||||
|
||||
if (size >= 16) {
|
||||
fill_consts.max_offset = size - 16;
|
||||
dim_x = DIV_ROUND_UP(size, 16);
|
||||
} else {
|
||||
fill_consts.max_offset = size - 4;
|
||||
dim_x = DIV_ROUND_UP(size, 4);
|
||||
}
|
||||
|
||||
vk_common_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), layout, VK_SHADER_STAGE_COMPUTE_BIT, 0,
|
||||
sizeof(fill_consts), &fill_consts);
|
||||
|
||||
radv_unaligned_dispatch(cmd_buffer, DIV_ROUND_UP(size, 16), 1, 1);
|
||||
radv_unaligned_dispatch(cmd_buffer, dim_x, 1, 1);
|
||||
|
||||
radv_meta_restore(&saved_state, cmd_buffer);
|
||||
}
|
||||
|
|
@ -159,12 +208,13 @@ static void
|
|||
radv_compute_copy_memory(struct radv_cmd_buffer *cmd_buffer, uint64_t src_va, uint64_t dst_va, uint64_t size)
|
||||
{
|
||||
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
|
||||
const bool use_16B_copy = size >= 16 && radv_is_copy_memory_4B_aligned(src_va, dst_va, size);
|
||||
struct radv_meta_saved_state saved_state;
|
||||
VkPipelineLayout layout;
|
||||
VkPipeline pipeline;
|
||||
VkResult result;
|
||||
|
||||
result = get_copy_memory_pipeline(device, &pipeline, &layout);
|
||||
result = get_copy_memory_pipeline(device, src_va, dst_va, size, &pipeline, &layout);
|
||||
if (result != VK_SUCCESS) {
|
||||
vk_command_buffer_set_error(&cmd_buffer->vk, result);
|
||||
return;
|
||||
|
|
@ -174,18 +224,26 @@ radv_compute_copy_memory(struct radv_cmd_buffer *cmd_buffer, uint64_t src_va, ui
|
|||
|
||||
radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
|
||||
|
||||
assert(size >= 16 && size <= UINT32_MAX);
|
||||
assert(size <= UINT32_MAX);
|
||||
|
||||
struct copy_constants copy_consts = {
|
||||
.src_addr = src_va,
|
||||
.dst_addr = dst_va,
|
||||
.max_offset = size - 16,
|
||||
};
|
||||
uint32_t dim_x;
|
||||
|
||||
if (use_16B_copy) {
|
||||
copy_consts.max_offset = size - 16;
|
||||
dim_x = DIV_ROUND_UP(size, 16);
|
||||
} else {
|
||||
copy_consts.max_offset = size;
|
||||
dim_x = size;
|
||||
}
|
||||
|
||||
vk_common_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), layout, VK_SHADER_STAGE_COMPUTE_BIT, 0,
|
||||
sizeof(copy_consts), ©_consts);
|
||||
|
||||
radv_unaligned_dispatch(cmd_buffer, DIV_ROUND_UP(size, 16), 1, 1);
|
||||
radv_unaligned_dispatch(cmd_buffer, dim_x, 1, 1);
|
||||
|
||||
radv_meta_restore(&saved_state, cmd_buffer);
|
||||
}
|
||||
|
|
@ -207,12 +265,24 @@ radv_prefer_compute_or_cp_dma(const struct radv_device *device, uint64_t size, e
|
|||
return use_compute;
|
||||
}
|
||||
|
||||
static bool
|
||||
radv_is_compute_required(const struct radv_device *device, enum radv_copy_flags src_copy_flags,
|
||||
enum radv_copy_flags dst_copy_flags)
|
||||
{
|
||||
const struct radv_physical_device *pdev = radv_device_physical(device);
|
||||
|
||||
/* On GFX8-9, CP DMA is broken with NULL PRT pages and the workaround is to use compute. */
|
||||
return pdev->info.has_cp_dma_with_null_prt_bug &&
|
||||
((src_copy_flags & RADV_COPY_FLAGS_SPARSE) || (dst_copy_flags & RADV_COPY_FLAGS_SPARSE));
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
radv_fill_memory_internal(struct radv_cmd_buffer *cmd_buffer, const struct radv_image *image, uint64_t va,
|
||||
uint64_t size, uint32_t value, enum radv_copy_flags copy_flags)
|
||||
{
|
||||
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
|
||||
bool use_compute = radv_prefer_compute_or_cp_dma(device, size, copy_flags, copy_flags);
|
||||
const bool use_compute = radv_is_compute_required(device, copy_flags, copy_flags) ||
|
||||
radv_prefer_compute_or_cp_dma(device, size, copy_flags, copy_flags);
|
||||
uint32_t flush_bits = 0;
|
||||
|
||||
assert(!(va & 3));
|
||||
|
|
@ -246,10 +316,7 @@ radv_fill_image(struct radv_cmd_buffer *cmd_buffer, const struct radv_image *ima
|
|||
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
|
||||
const uint64_t va = image->bindings[0].addr + offset;
|
||||
struct radeon_winsys_bo *bo = image->bindings[0].bo;
|
||||
enum radv_copy_flags copy_flags = 0;
|
||||
|
||||
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
copy_flags |= RADV_COPY_FLAGS_DEVICE_LOCAL;
|
||||
const enum radv_copy_flags copy_flags = radv_get_copy_flags_from_bo(bo);
|
||||
|
||||
radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
|
||||
|
||||
|
|
@ -261,10 +328,7 @@ radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer, struct radeon_winsys_bo *bo
|
|||
uint32_t value)
|
||||
{
|
||||
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
|
||||
enum radv_copy_flags copy_flags = 0;
|
||||
|
||||
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
copy_flags |= RADV_COPY_FLAGS_DEVICE_LOCAL;
|
||||
const enum radv_copy_flags copy_flags = radv_get_copy_flags_from_bo(bo);
|
||||
|
||||
radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
|
||||
|
||||
|
|
@ -292,8 +356,9 @@ radv_copy_memory(struct radv_cmd_buffer *cmd_buffer, uint64_t src_va, uint64_t d
|
|||
enum radv_copy_flags src_copy_flags, enum radv_copy_flags dst_copy_flags)
|
||||
{
|
||||
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
|
||||
const bool use_compute = !(size & 3) && !(src_va & 3) && !(dst_va & 3) &&
|
||||
radv_prefer_compute_or_cp_dma(device, size, src_copy_flags, dst_copy_flags);
|
||||
const bool use_compute = radv_is_compute_required(device, src_copy_flags, dst_copy_flags) ||
|
||||
(radv_is_copy_memory_4B_aligned(src_va, dst_va, size) &&
|
||||
radv_prefer_compute_or_cp_dma(device, size, src_copy_flags, dst_copy_flags));
|
||||
|
||||
if (cmd_buffer->qf == RADV_QUEUE_TRANSFER) {
|
||||
radv_sdma_copy_memory(device, cmd_buffer->cs, src_va, dst_va, size);
|
||||
|
|
@ -311,12 +376,9 @@ radv_CmdCopyBuffer2(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2 *pCop
|
|||
VK_FROM_HANDLE(radv_buffer, src_buffer, pCopyBufferInfo->srcBuffer);
|
||||
VK_FROM_HANDLE(radv_buffer, dst_buffer, pCopyBufferInfo->dstBuffer);
|
||||
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
|
||||
enum radv_copy_flags src_copy_flags = 0, dst_copy_flags = 0;
|
||||
|
||||
if (src_buffer->bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
src_copy_flags |= RADV_COPY_FLAGS_DEVICE_LOCAL;
|
||||
if (dst_buffer->bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
dst_copy_flags |= RADV_COPY_FLAGS_DEVICE_LOCAL;
|
||||
const enum radv_copy_flags src_copy_flags = radv_get_copy_flags_from_bo(src_buffer->bo);
|
||||
const enum radv_copy_flags dst_copy_flags = radv_get_copy_flags_from_bo(dst_buffer->bo);
|
||||
|
||||
radv_suspend_conditional_rendering(cmd_buffer);
|
||||
|
||||
|
|
@ -371,14 +433,11 @@ radv_update_memory(struct radv_cmd_buffer *cmd_buffer, uint64_t va, uint64_t siz
|
|||
if (size < RADV_BUFFER_UPDATE_THRESHOLD && cmd_buffer->qf != RADV_QUEUE_TRANSFER) {
|
||||
radv_update_memory_cp(cmd_buffer, va, data, size);
|
||||
} else {
|
||||
enum radv_copy_flags src_copy_flags = 0;
|
||||
uint32_t buf_offset;
|
||||
|
||||
radv_cmd_buffer_upload_data(cmd_buffer, size, data, &buf_offset);
|
||||
|
||||
if (cmd_buffer->upload.upload_bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
src_copy_flags |= RADV_COPY_FLAGS_DEVICE_LOCAL;
|
||||
|
||||
const enum radv_copy_flags src_copy_flags = radv_get_copy_flags_from_bo(cmd_buffer->upload.upload_bo);
|
||||
const uint64_t src_va = radv_buffer_get_va(cmd_buffer->upload.upload_bo) + buf_offset;
|
||||
|
||||
radv_copy_memory(cmd_buffer, src_va, va, size, src_copy_flags, dst_copy_flags);
|
||||
|
|
@ -393,10 +452,8 @@ radv_CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDevice
|
|||
VK_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
|
||||
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
|
||||
const uint64_t dst_va = vk_buffer_address(&dst_buffer->vk, dstOffset);
|
||||
enum radv_copy_flags dst_copy_flags = 0;
|
||||
|
||||
if (dst_buffer->bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
dst_copy_flags |= RADV_COPY_FLAGS_DEVICE_LOCAL;
|
||||
const enum radv_copy_flags dst_copy_flags = radv_get_copy_flags_from_bo(dst_buffer->bo);
|
||||
|
||||
radv_suspend_conditional_rendering(cmd_buffer);
|
||||
|
||||
|
|
|
|||
|
|
@ -123,21 +123,23 @@ radv_meta_nir_break_on_count(nir_builder *b, nir_variable *var, nir_def *count)
|
|||
}
|
||||
|
||||
nir_shader *
|
||||
radv_meta_nir_build_fill_memory_shader(struct radv_device *dev)
|
||||
radv_meta_nir_build_fill_memory_shader(struct radv_device *dev, uint32_t bytes_per_invocation)
|
||||
{
|
||||
nir_builder b = radv_meta_nir_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_fill");
|
||||
assert(bytes_per_invocation == 4 || bytes_per_invocation == 16);
|
||||
|
||||
nir_builder b = radv_meta_nir_init_shader(dev, MESA_SHADER_COMPUTE, "meta_fill_memory_%dB", bytes_per_invocation);
|
||||
b.shader->info.workgroup_size[0] = 64;
|
||||
|
||||
nir_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
|
||||
nir_def *buffer_addr = nir_pack_64_2x32(&b, nir_channels(&b, pconst, 0b0011));
|
||||
nir_def *max_offset = nir_channel(&b, pconst, 2);
|
||||
nir_def *data = nir_swizzle(&b, nir_channel(&b, pconst, 3), (unsigned[]){0, 0, 0, 0}, 4);
|
||||
nir_def *data = nir_swizzle(&b, nir_channel(&b, pconst, 3), (unsigned[]){0, 0, 0, 0}, bytes_per_invocation / 4);
|
||||
|
||||
nir_def *global_id =
|
||||
nir_iadd(&b, nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b), 0), b.shader->info.workgroup_size[0]),
|
||||
nir_load_local_invocation_index(&b));
|
||||
|
||||
nir_def *offset = nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset);
|
||||
nir_def *offset = nir_imin(&b, nir_imul_imm(&b, global_id, bytes_per_invocation), max_offset);
|
||||
nir_def *dst_addr = nir_iadd(&b, buffer_addr, nir_u2u64(&b, offset));
|
||||
nir_build_store_global(&b, data, dst_addr, .align_mul = 4);
|
||||
|
||||
|
|
@ -145,9 +147,14 @@ radv_meta_nir_build_fill_memory_shader(struct radv_device *dev)
|
|||
}
|
||||
|
||||
nir_shader *
|
||||
radv_meta_nir_build_copy_memory_shader(struct radv_device *dev)
|
||||
radv_meta_nir_build_copy_memory_shader(struct radv_device *dev, uint32_t bytes_per_invocation)
|
||||
{
|
||||
nir_builder b = radv_meta_nir_init_shader(dev, MESA_SHADER_COMPUTE, "meta_buffer_copy");
|
||||
assert(bytes_per_invocation == 1 || bytes_per_invocation == 16);
|
||||
|
||||
const uint32_t num_components = bytes_per_invocation == 1 ? 1 : 4;
|
||||
const uint32_t bit_size = bytes_per_invocation == 1 ? 8 : 32;
|
||||
|
||||
nir_builder b = radv_meta_nir_init_shader(dev, MESA_SHADER_COMPUTE, "meta_copy_memory_%dB", bytes_per_invocation);
|
||||
b.shader->info.workgroup_size[0] = 64;
|
||||
|
||||
nir_def *pconst = nir_load_push_constant(&b, 4, 32, nir_imm_int(&b, 0), .range = 16);
|
||||
|
|
@ -159,10 +166,11 @@ radv_meta_nir_build_copy_memory_shader(struct radv_device *dev)
|
|||
nir_iadd(&b, nir_imul_imm(&b, nir_channel(&b, nir_load_workgroup_id(&b), 0), b.shader->info.workgroup_size[0]),
|
||||
nir_load_local_invocation_index(&b));
|
||||
|
||||
nir_def *offset = nir_u2u64(&b, nir_imin(&b, nir_imul_imm(&b, global_id, 16), max_offset));
|
||||
nir_def *offset = nir_u2u64(&b, nir_imin(&b, nir_imul_imm(&b, global_id, bytes_per_invocation), max_offset));
|
||||
|
||||
nir_def *data = nir_build_load_global(&b, 4, 32, nir_iadd(&b, src_addr, offset), .align_mul = 4);
|
||||
nir_build_store_global(&b, data, nir_iadd(&b, dst_addr, offset), .align_mul = 4);
|
||||
nir_def *data =
|
||||
nir_build_load_global(&b, num_components, bit_size, nir_iadd(&b, src_addr, offset), .align_mul = bit_size / 8);
|
||||
nir_build_store_global(&b, data, nir_iadd(&b, dst_addr, offset), .align_mul = bit_size / 8);
|
||||
|
||||
return b.shader;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@ nir_def *radv_meta_nir_get_global_ids(nir_builder *b, unsigned num_components);
|
|||
|
||||
void radv_meta_nir_break_on_count(nir_builder *b, nir_variable *var, nir_def *count);
|
||||
|
||||
nir_shader *radv_meta_nir_build_fill_memory_shader(struct radv_device *dev);
|
||||
nir_shader *radv_meta_nir_build_copy_memory_shader(struct radv_device *dev);
|
||||
nir_shader *radv_meta_nir_build_fill_memory_shader(struct radv_device *dev, uint32_t bytes_per_invocation);
|
||||
nir_shader *radv_meta_nir_build_copy_memory_shader(struct radv_device *dev, uint32_t bytes_per_invocation);
|
||||
|
||||
nir_shader *radv_meta_nir_build_blit_vertex_shader(struct radv_device *dev);
|
||||
nir_shader *radv_meta_nir_build_blit_copy_fragment_shader(struct radv_device *dev, enum glsl_sampler_dim tex_dim);
|
||||
|
|
|
|||
|
|
@ -173,6 +173,7 @@ struct radeon_winsys_bo {
|
|||
/* buffer is added to the BO list of all submissions */
|
||||
bool use_global_list;
|
||||
bool gfx12_allow_dcc;
|
||||
bool is_virtual; /* sparse buffers */
|
||||
enum radeon_bo_domain initial_domain;
|
||||
uint64_t obj_id;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -135,8 +135,8 @@ radv_amdgpu_winsys_bo_virtual_bind(struct radeon_winsys *_ws, struct radeon_wins
|
|||
VkResult result;
|
||||
int r;
|
||||
|
||||
assert(parent->is_virtual);
|
||||
assert(!bo || !bo->is_virtual);
|
||||
assert(parent->base.is_virtual);
|
||||
assert(!bo || !bo->base.is_virtual);
|
||||
|
||||
/* When the BO is NULL, AMDGPU will reset the PTE VA range to the initial state. Otherwise, it
|
||||
* will first unmap all existing VA that overlap the requested range and then map.
|
||||
|
|
@ -283,7 +283,7 @@ radv_amdgpu_log_bo(struct radv_amdgpu_winsys *ws, struct radv_amdgpu_winsys_bo *
|
|||
bo_log->va = bo->base.va;
|
||||
bo_log->size = bo->base.size;
|
||||
bo_log->timestamp = os_time_get_nano();
|
||||
bo_log->is_virtual = bo->is_virtual;
|
||||
bo_log->is_virtual = bo->base.is_virtual;
|
||||
bo_log->destroyed = destroyed;
|
||||
|
||||
u_rwlock_wrlock(&ws->log_bo_list_lock);
|
||||
|
|
@ -336,7 +336,7 @@ radv_amdgpu_winsys_bo_destroy(struct radeon_winsys *_ws, struct radeon_winsys_bo
|
|||
|
||||
radv_amdgpu_log_bo(ws, bo, true);
|
||||
|
||||
if (bo->is_virtual) {
|
||||
if (bo->base.is_virtual) {
|
||||
int r;
|
||||
|
||||
/* Clear mappings of this PRT VA region. */
|
||||
|
|
@ -415,7 +415,7 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned
|
|||
bo->base.va = va;
|
||||
bo->base.size = size;
|
||||
bo->va_handle = va_handle;
|
||||
bo->is_virtual = !!(flags & RADEON_FLAG_VIRTUAL);
|
||||
bo->base.is_virtual = !!(flags & RADEON_FLAG_VIRTUAL);
|
||||
|
||||
if (flags & RADEON_FLAG_VIRTUAL) {
|
||||
ranges = realloc(NULL, sizeof(struct radv_amdgpu_map_range));
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ struct radv_amdgpu_map_range {
|
|||
struct radv_amdgpu_winsys_bo {
|
||||
struct radeon_winsys_bo base;
|
||||
amdgpu_va_handle va_handle;
|
||||
bool is_virtual;
|
||||
uint8_t priority;
|
||||
|
||||
union {
|
||||
|
|
|
|||
|
|
@ -714,7 +714,7 @@ radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs, struct radeon_winsys_bo *_b
|
|||
if (cs->status != VK_SUCCESS)
|
||||
return;
|
||||
|
||||
if (bo->is_virtual) {
|
||||
if (bo->base.is_virtual) {
|
||||
radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
|
||||
return;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue