diff --git a/src/intel/vulkan/anv_acceleration_structure.c b/src/intel/vulkan/anv_acceleration_structure.c index 20dc202f4d4..0e4802f6be8 100644 --- a/src/intel/vulkan/anv_acceleration_structure.c +++ b/src/intel/vulkan/anv_acceleration_structure.c @@ -121,7 +121,7 @@ anv_GetAccelerationStructureDeviceAddressKHR( pInfo->accelerationStructure); assert(!anv_address_is_null(accel->address)); - assert(accel->address.bo->flags & EXEC_OBJECT_PINNED); + assert(anv_bo_is_pinned(accel->address.bo)); return anv_address_physical(accel->address); } diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c index a35eefc370d..2da2df5e339 100644 --- a/src/intel/vulkan/anv_allocator.c +++ b/src/intel/vulkan/anv_allocator.c @@ -1602,9 +1602,7 @@ anv_bo_alloc_flags_to_bo_flags(struct anv_device *device, static void anv_bo_finish(struct anv_device *device, struct anv_bo *bo) { - if (bo->offset != 0 && - (bo->flags & EXEC_OBJECT_PINNED) && - !bo->has_fixed_address) + if (bo->offset != 0 && anv_bo_is_pinned(bo) && !bo->has_fixed_address) anv_vma_free(device, bo->offset, bo->size + bo->_ccs_size); if (bo->map && !bo->from_host_ptr) @@ -1620,7 +1618,7 @@ anv_bo_vma_alloc_or_close(struct anv_device *device, enum anv_bo_alloc_flags alloc_flags, uint64_t explicit_address) { - assert(bo->flags & EXEC_OBJECT_PINNED); + assert(anv_bo_is_pinned(bo)); assert(explicit_address == intel_48b_address(explicit_address)); uint32_t align = 4096; @@ -1746,7 +1744,7 @@ anv_device_alloc_bo(struct anv_device *device, } } - if (new_bo.flags & EXEC_OBJECT_PINNED) { + if (anv_bo_is_pinned(&new_bo)) { VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo, alloc_flags, explicit_address); @@ -1887,7 +1885,7 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device, (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0, }; - if (new_bo.flags & EXEC_OBJECT_PINNED) { + if (anv_bo_is_pinned(&new_bo)) { VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo, alloc_flags, client_address); @@ -2015,7 +2013,7 @@ anv_device_import_bo(struct anv_device *device, (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0, }; - if (new_bo.flags & EXEC_OBJECT_PINNED) { + if (anv_bo_is_pinned(&new_bo)) { assert(new_bo._ccs_size == 0); VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo, alloc_flags, diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c index 08377b4601a..ca5480707ac 100644 --- a/src/intel/vulkan/anv_batch_chain.c +++ b/src/intel/vulkan/anv_batch_chain.c @@ -181,7 +181,7 @@ anv_reloc_list_add_bo(struct anv_reloc_list *list, struct anv_bo *target_bo) { assert(!target_bo->is_wrapper); - assert(target_bo->flags & EXEC_OBJECT_PINNED); + assert(anv_bo_is_pinned(target_bo)); uint32_t idx = target_bo->gem_handle; VkResult result = anv_reloc_list_grow_deps(list, alloc, @@ -211,7 +211,7 @@ anv_reloc_list_add(struct anv_reloc_list *list, assert(unwrapped_target_bo->gem_handle > 0); assert(unwrapped_target_bo->refcount > 0); - if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED) + if (anv_bo_is_pinned(unwrapped_target_bo)) return anv_reloc_list_add_bo(list, alloc, unwrapped_target_bo); VkResult result = anv_reloc_list_grow(list, alloc, 1); @@ -492,8 +492,8 @@ anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer, /* Use a bogus presumed offset to force a relocation */ prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1; } else { - assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED); - assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED); + assert(anv_bo_is_pinned(prev_bbo->bo)); + assert(anv_bo_is_pinned(next_bbo->bo)); write_reloc(cmd_buffer->device, prev_bbo->bo->map + bb_start_offset + 4, @@ -2053,7 +2053,7 @@ anv_queue_execbuf_locked(struct anv_queue *queue, struct drm_i915_gem_exec_object2 *objects = execbuf.objects; for (uint32_t k = 0; k < execbuf.bo_count; k++) { - if (execbuf.bos[k]->flags & EXEC_OBJECT_PINNED) + if (anv_bo_is_pinned(execbuf.bos[k])) assert(execbuf.bos[k]->offset == objects[k].offset); execbuf.bos[k]->offset = objects[k].offset; } diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c index 0bda7c85fcb..7462a8c2d32 100644 --- a/src/intel/vulkan/anv_device.c +++ b/src/intel/vulkan/anv_device.c @@ -4408,7 +4408,7 @@ VkDeviceAddress anv_GetBufferDeviceAddress( ANV_FROM_HANDLE(anv_buffer, buffer, pInfo->buffer); assert(!anv_address_is_null(buffer->address)); - assert(buffer->address.bo->flags & EXEC_OBJECT_PINNED); + assert(anv_bo_is_pinned(buffer->address.bo)); return anv_address_physical(buffer->address); } @@ -4426,7 +4426,7 @@ uint64_t anv_GetDeviceMemoryOpaqueCaptureAddress( { ANV_FROM_HANDLE(anv_device_memory, memory, pInfo->memory); - assert(memory->bo->flags & EXEC_OBJECT_PINNED); + assert(anv_bo_is_pinned(memory->bo)); assert(memory->bo->has_client_visible_address); return intel_48b_address(memory->bo->offset); diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index 51dd4921179..896432be024 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -530,6 +530,28 @@ anv_bo_unwrap(struct anv_bo *bo) return bo; } +static inline bool +anv_bo_is_pinned(struct anv_bo *bo) +{ +#if defined(GFX_VERx10) && GFX_VERx10 >= 90 + /* Sky Lake and later always uses softpin */ + assert(bo->flags & EXEC_OBJECT_PINNED); + return true; +#elif defined(GFX_VERx10) && GFX_VERx10 < 80 + /* Haswell and earlier never use softpin */ + assert(!(bo->flags & EXEC_OBJECT_PINNED)); + assert(!bo->has_fixed_address); + return false; +#else + /* If we don't have a GFX_VERx10 #define, we need to look at the BO. Also, + * for GFX version 8, we need to look at the BO because Broadwell softpins + * but Cherryview doesn't. + */ + assert((bo->flags & EXEC_OBJECT_PINNED) || !bo->has_fixed_address); + return (bo->flags & EXEC_OBJECT_PINNED) != 0; +#endif +} + /* Represents a lock-free linked list of "free" things. This is used by * both the block pool and the state pools. Unfortunately, in order to * solve the ABA problem, we can't use a single uint32_t head. @@ -1653,9 +1675,7 @@ anv_address_is_null(struct anv_address addr) static inline uint64_t anv_address_physical(struct anv_address addr) { - if (addr.bo && (ANV_ALWAYS_SOFTPIN || - (addr.bo->flags & EXEC_OBJECT_PINNED))) { - assert(addr.bo->flags & EXEC_OBJECT_PINNED); + if (addr.bo && anv_bo_is_pinned(addr.bo)) { return intel_canonical_address(addr.bo->offset + addr.offset); } else { return intel_canonical_address(addr.offset); @@ -1692,7 +1712,7 @@ _anv_combine_address(struct anv_batch *batch, void *location, if (address.bo == NULL) { return address.offset + delta; } else if (batch == NULL) { - assert(address.bo->flags & EXEC_OBJECT_PINNED); + assert(anv_bo_is_pinned(address.bo)); return anv_address_physical(anv_address_add(address, delta)); } else { assert(batch->start <= location && location < batch->end); diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c index 42e9951fb4a..23b431eb1ad 100644 --- a/src/intel/vulkan/genX_cmd_buffer.c +++ b/src/intel/vulkan/genX_cmd_buffer.c @@ -5631,7 +5631,7 @@ genX(cmd_buffer_set_binding_for_gfx8_vb_flush)(struct anv_cmd_buffer *cmd_buffer return; } - assert(vb_address.bo && (vb_address.bo->flags & EXEC_OBJECT_PINNED)); + assert(vb_address.bo && anv_bo_is_pinned(vb_address.bo)); bound->start = intel_48b_address(anv_address_physical(vb_address)); bound->end = bound->start + vb_size; assert(bound->end > bound->start); /* No overflow */