anv: Delete softpin checks

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/18208>
This commit is contained in:
Kenneth Graunke 2022-08-30 16:09:05 -07:00 committed by Marge Bot
parent 215b1b69cb
commit 4b5c29bad0
9 changed files with 41 additions and 173 deletions

View file

@ -117,7 +117,6 @@ anv_GetAccelerationStructureDeviceAddressKHR(
pInfo->accelerationStructure);
assert(!anv_address_is_null(accel->address));
assert(anv_bo_is_pinned(accel->address.bo));
return anv_address_physical(accel->address);
}

View file

@ -1353,7 +1353,8 @@ anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
{
struct anv_physical_device *pdevice = device->physical;
uint64_t bo_flags = 0;
uint64_t bo_flags = EXEC_OBJECT_PINNED;
if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS) &&
pdevice->supports_48bit_addresses)
bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
@ -1369,16 +1370,13 @@ anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
if (!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC) && pdevice->has_exec_async)
bo_flags |= EXEC_OBJECT_ASYNC;
if (pdevice->use_softpin)
bo_flags |= EXEC_OBJECT_PINNED;
return bo_flags;
}
static void
anv_bo_finish(struct anv_device *device, struct anv_bo *bo)
{
if (bo->offset != 0 && anv_bo_is_pinned(bo) && !bo->has_fixed_address)
if (bo->offset != 0 && !bo->has_fixed_address)
anv_vma_free(device, bo->offset, bo->size + bo->_ccs_size);
if (bo->map && !bo->from_host_ptr)
@ -1394,7 +1392,6 @@ anv_bo_vma_alloc_or_close(struct anv_device *device,
enum anv_bo_alloc_flags alloc_flags,
uint64_t explicit_address)
{
assert(anv_bo_is_pinned(bo));
assert(explicit_address == intel_48b_address(explicit_address));
uint32_t align = 4096;
@ -1532,15 +1529,11 @@ anv_device_alloc_bo(struct anv_device *device,
}
}
if (anv_bo_is_pinned(&new_bo)) {
VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo,
alloc_flags,
explicit_address);
if (result != VK_SUCCESS)
return result;
} else {
assert(!new_bo.has_client_visible_address);
}
VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo,
alloc_flags,
explicit_address);
if (result != VK_SUCCESS)
return result;
if (new_bo._ccs_size > 0) {
assert(device->info->has_aux_map);
@ -1665,16 +1658,12 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
(alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
};
if (anv_bo_is_pinned(&new_bo)) {
VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo,
alloc_flags,
client_address);
if (result != VK_SUCCESS) {
pthread_mutex_unlock(&cache->mutex);
return result;
}
} else {
assert(!new_bo.has_client_visible_address);
VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo,
alloc_flags,
client_address);
if (result != VK_SUCCESS) {
pthread_mutex_unlock(&cache->mutex);
return result;
}
*bo = new_bo;
@ -1792,17 +1781,13 @@ anv_device_import_bo(struct anv_device *device,
(alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0,
};
if (anv_bo_is_pinned(&new_bo)) {
assert(new_bo._ccs_size == 0);
VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo,
alloc_flags,
client_address);
if (result != VK_SUCCESS) {
pthread_mutex_unlock(&cache->mutex);
return result;
}
} else {
assert(!new_bo.has_client_visible_address);
assert(new_bo._ccs_size == 0);
VkResult result = anv_bo_vma_alloc_or_close(device, &new_bo,
alloc_flags,
client_address);
if (result != VK_SUCCESS) {
pthread_mutex_unlock(&cache->mutex);
return result;
}
*bo = new_bo;

View file

@ -184,7 +184,6 @@ anv_reloc_list_add_bo(struct anv_reloc_list *list,
struct anv_bo *target_bo)
{
assert(!target_bo->is_wrapper);
assert(anv_bo_is_pinned(target_bo));
uint32_t idx = target_bo->gem_handle;
VkResult result = anv_reloc_list_grow_deps(list, alloc,
@ -203,9 +202,6 @@ anv_reloc_list_add(struct anv_reloc_list *list,
uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
uint64_t *address_u64_out)
{
struct drm_i915_gem_relocation_entry *entry;
int index;
struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
if (address_u64_out)
@ -214,26 +210,7 @@ anv_reloc_list_add(struct anv_reloc_list *list,
assert(unwrapped_target_bo->gem_handle > 0);
assert(unwrapped_target_bo->refcount > 0);
if (anv_bo_is_pinned(unwrapped_target_bo))
return anv_reloc_list_add_bo(list, alloc, unwrapped_target_bo);
VkResult result = anv_reloc_list_grow(list, alloc, 1);
if (result != VK_SUCCESS)
return result;
/* XXX: Can we use I915_EXEC_HANDLE_LUT? */
index = list->num_relocs++;
list->reloc_bos[index] = target_bo;
entry = &list->relocs[index];
entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
entry->delta = delta;
entry->offset = offset;
entry->presumed_offset = target_bo_offset;
entry->read_domains = 0;
entry->write_domain = 0;
VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
return VK_SUCCESS;
return anv_reloc_list_add_bo(list, alloc, unwrapped_target_bo);
}
static void
@ -454,9 +431,6 @@ anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
assert(((*bb_start >> 29) & 0x07) == 0);
assert(((*bb_start >> 23) & 0x3f) == 49);
assert(anv_bo_is_pinned(prev_bbo->bo));
assert(anv_bo_is_pinned(next_bbo->bo));
write_reloc(cmd_buffer->device,
prev_bbo->bo->map + bb_start_offset + 4,
next_bbo->bo->offset + next_bbo_offset, true);
@ -1861,13 +1835,6 @@ anv_queue_exec_utrace_locked(struct anv_queue *queue,
if (ret)
result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
for (uint32_t k = 0; k < execbuf.bo_count; k++) {
if (anv_bo_is_pinned(execbuf.bos[k]))
assert(execbuf.bos[k]->offset == objects[k].offset);
execbuf.bos[k]->offset = objects[k].offset;
}
error:
anv_execbuf_finish(&execbuf);
@ -2094,13 +2061,6 @@ anv_queue_exec_locked(struct anv_queue *queue,
result = vk_queue_set_lost(&queue->vk, "sync wait failed");
}
struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
for (uint32_t k = 0; k < execbuf.bo_count; k++) {
if (anv_bo_is_pinned(execbuf.bos[k]))
assert(execbuf.bos[k]->offset == objects[k].offset);
execbuf.bos[k]->offset = objects[k].offset;
}
error:
anv_execbuf_finish(&execbuf);

View file

@ -868,8 +868,6 @@ anv_physical_device_try_create(struct vk_instance *vk_instance,
if (result != VK_SUCCESS)
goto fail_base;
device->use_softpin = true;
device->has_context_isolation =
anv_gem_get_param(fd, I915_PARAM_HAS_CONTEXT_ISOLATION);
@ -903,13 +901,9 @@ anv_physical_device_try_create(struct vk_instance *vk_instance,
env_var_as_boolean("ANV_ALWAYS_BINDLESS", false);
device->use_call_secondary =
device->use_softpin &&
!env_var_as_boolean("ANV_DISABLE_SECONDARY_CMD_BUFFER_CALLS", false);
/* We first got the A64 messages on broadwell and we can only use them if
* we can pass addresses directly into the shader which requires softpin.
*/
device->has_a64_buffer_access = device->use_softpin;
device->has_a64_buffer_access = true;
device->has_bindless_images = true;
device->has_bindless_samplers = true;
@ -2882,8 +2876,7 @@ intel_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
return NULL;
struct anv_device *device = (struct anv_device*)driver_ctx;
assert(device->physical->supports_48bit_addresses &&
device->physical->use_softpin);
assert(device->physical->supports_48bit_addresses);
struct anv_state_pool *pool = &device->dynamic_state_pool;
buf->state = anv_state_pool_alloc(pool, size, size);
@ -4401,7 +4394,6 @@ VkDeviceAddress anv_GetBufferDeviceAddress(
ANV_FROM_HANDLE(anv_buffer, buffer, pInfo->buffer);
assert(!anv_address_is_null(buffer->address));
assert(anv_bo_is_pinned(buffer->address.bo));
return anv_address_physical(buffer->address);
}
@ -4419,7 +4411,6 @@ uint64_t anv_GetDeviceMemoryOpaqueCaptureAddress(
{
ANV_FROM_HANDLE(anv_device_memory, memory, pInfo->memory);
assert(anv_bo_is_pinned(memory->bo));
assert(memory->bo->has_client_visible_address);
return intel_48b_address(memory->bo->offset);

View file

@ -86,7 +86,7 @@ anv_shader_stage_to_nir(struct anv_device *device,
.int8 = true,
.int16 = true,
.int64 = true,
.int64_atomics = pdevice->use_softpin,
.int64_atomics = true,
.integer_functions2 = true,
.mesh_shading_nv = pdevice->vk.supported_extensions.NV_mesh_shader,
.min_lod = true,

View file

@ -565,28 +565,6 @@ anv_bo_unwrap(struct anv_bo *bo)
return bo;
}
static inline bool
anv_bo_is_pinned(struct anv_bo *bo)
{
#if defined(GFX_VERx10) && GFX_VERx10 >= 90
/* Sky Lake and later always uses softpin */
assert(bo->flags & EXEC_OBJECT_PINNED);
return true;
#elif defined(GFX_VERx10) && GFX_VERx10 < 80
/* Haswell and earlier never use softpin */
assert(!(bo->flags & EXEC_OBJECT_PINNED));
assert(!bo->has_fixed_address);
return false;
#else
/* If we don't have a GFX_VERx10 #define, we need to look at the BO. Also,
* for GFX version 8, we need to look at the BO because Broadwell softpins
* but Cherryview doesn't.
*/
assert((bo->flags & EXEC_OBJECT_PINNED) || !bo->has_fixed_address);
return (bo->flags & EXEC_OBJECT_PINNED) != 0;
#endif
}
struct anv_address {
struct anv_bo *bo;
int64_t offset;
@ -613,11 +591,8 @@ anv_address_is_null(struct anv_address addr)
static inline uint64_t
anv_address_physical(struct anv_address addr)
{
if (addr.bo && anv_bo_is_pinned(addr.bo)) {
return intel_canonical_address(addr.bo->offset + addr.offset);
} else {
return intel_canonical_address(addr.offset);
}
uint64_t address = (addr.bo ? addr.bo->offset : 0ull) + addr.offset;
return intel_canonical_address(address);
}
static inline struct anv_address
@ -962,7 +937,6 @@ struct anv_physical_device {
bool has_userptr_probe;
uint64_t gtt_size;
bool use_softpin;
bool always_use_bindless;
bool use_call_secondary;
@ -1224,12 +1198,6 @@ struct anv_device {
struct intel_ds_device ds;
};
#if defined(GFX_VERx10) && GFX_VERx10 >= 90
#define ANV_ALWAYS_SOFTPIN true
#else
#define ANV_ALWAYS_SOFTPIN false
#endif
static inline struct anv_state
anv_binding_table_pool_alloc(struct anv_device *device)
{
@ -1504,17 +1472,9 @@ static inline uint64_t
anv_batch_emit_reloc(struct anv_batch *batch,
void *location, struct anv_bo *bo, uint32_t delta)
{
uint64_t address_u64 = 0;
VkResult result;
uint64_t address_u64 = bo->offset + delta;
VkResult result = anv_reloc_list_add_bo(batch->relocs, batch->alloc, bo);
if (ANV_ALWAYS_SOFTPIN) {
address_u64 = bo->offset + delta;
result = anv_reloc_list_add_bo(batch->relocs, batch->alloc, bo);
} else {
result = anv_reloc_list_add(batch->relocs, batch->alloc,
location - batch->start, bo, delta,
&address_u64);
}
if (unlikely(result != VK_SUCCESS)) {
anv_batch_set_error(batch, result);
return 0;
@ -1540,7 +1500,6 @@ _anv_combine_address(struct anv_batch *batch, void *location,
if (address.bo == NULL) {
return address.offset + delta;
} else if (batch == NULL) {
assert(anv_bo_is_pinned(address.bo));
return anv_address_physical(anv_address_add(address, delta));
} else {
assert(batch->start <= location && location < batch->end);
@ -2505,7 +2464,7 @@ anv_gfx8_9_vb_cache_range_needs_workaround(struct anv_vb_cache_range *bound,
return false;
}
assert(vb_address.bo && anv_bo_is_pinned(vb_address.bo));
assert(vb_address.bo);
bound->start = intel_48b_address(anv_address_physical(vb_address));
bound->end = bound->start + vb_size;
assert(bound->end > bound->start); /* No overflow */

View file

@ -82,45 +82,23 @@ blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
struct blorp_address address, uint32_t delta)
{
struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
VkResult result;
if (ANV_ALWAYS_SOFTPIN) {
result = anv_reloc_list_add_bo(&cmd_buffer->surface_relocs,
&cmd_buffer->vk.pool->alloc,
address.buffer);
if (unlikely(result != VK_SUCCESS))
anv_batch_set_error(&cmd_buffer->batch, result);
return;
}
uint64_t address_u64 = 0;
result = anv_reloc_list_add(&cmd_buffer->surface_relocs,
&cmd_buffer->vk.pool->alloc,
ss_offset, address.buffer,
address.offset + delta,
&address_u64);
if (result != VK_SUCCESS)
VkResult result = anv_reloc_list_add_bo(&cmd_buffer->surface_relocs,
&cmd_buffer->vk.pool->alloc,
address.buffer);
if (unlikely(result != VK_SUCCESS))
anv_batch_set_error(&cmd_buffer->batch, result);
void *dest = anv_block_pool_map(
&cmd_buffer->device->surface_state_pool.block_pool, ss_offset, 8);
write_reloc(cmd_buffer->device, dest, address_u64, false);
}
static uint64_t
blorp_get_surface_address(struct blorp_batch *blorp_batch,
struct blorp_address address)
{
if (ANV_ALWAYS_SOFTPIN) {
struct anv_address anv_addr = {
.bo = address.buffer,
.offset = address.offset,
};
return anv_address_physical(anv_addr);
} else {
/* We'll let blorp_surface_reloc write the address. */
return 0;
}
struct anv_address anv_addr = {
.bo = address.buffer,
.offset = address.offset,
};
return anv_address_physical(anv_addr);
}
#if GFX_VER == 9

View file

@ -26,9 +26,7 @@
int main(void)
{
struct anv_physical_device physical_device = {
.use_softpin = true,
};
struct anv_physical_device physical_device = {};
struct anv_device device = {};
struct anv_block_pool pool;

View file

@ -26,9 +26,7 @@
int main(void)
{
struct anv_physical_device physical_device = {
.use_softpin = true,
};
struct anv_physical_device physical_device = {};
struct anv_device device = {};
struct anv_state_pool state_pool;