2015-05-08 22:32:37 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2015 Intel Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <unistd.h>
|
2016-11-03 13:44:47 +02:00
|
|
|
#include <limits.h>
|
2015-05-08 22:32:37 -07:00
|
|
|
#include <assert.h>
|
|
|
|
|
#include <linux/memfd.h>
|
|
|
|
|
#include <sys/mman.h>
|
|
|
|
|
|
2015-07-17 15:04:27 -07:00
|
|
|
#include "anv_private.h"
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2017-03-13 16:18:54 -07:00
|
|
|
#include "util/hash_table.h"
|
2017-10-16 18:06:49 +11:00
|
|
|
#include "util/simple_mtx.h"
|
2017-03-13 16:18:54 -07:00
|
|
|
|
2015-05-18 12:06:02 -07:00
|
|
|
#ifdef HAVE_VALGRIND
|
|
|
|
|
#define VG_NOACCESS_READ(__ptr) ({ \
|
|
|
|
|
VALGRIND_MAKE_MEM_DEFINED((__ptr), sizeof(*(__ptr))); \
|
|
|
|
|
__typeof(*(__ptr)) __val = *(__ptr); \
|
|
|
|
|
VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr)));\
|
|
|
|
|
__val; \
|
|
|
|
|
})
|
|
|
|
|
#define VG_NOACCESS_WRITE(__ptr, __val) ({ \
|
|
|
|
|
VALGRIND_MAKE_MEM_UNDEFINED((__ptr), sizeof(*(__ptr))); \
|
|
|
|
|
*(__ptr) = (__val); \
|
|
|
|
|
VALGRIND_MAKE_MEM_NOACCESS((__ptr), sizeof(*(__ptr))); \
|
|
|
|
|
})
|
|
|
|
|
#else
|
|
|
|
|
#define VG_NOACCESS_READ(__ptr) (*(__ptr))
|
|
|
|
|
#define VG_NOACCESS_WRITE(__ptr, __val) (*(__ptr) = (__val))
|
|
|
|
|
#endif
|
|
|
|
|
|
2015-05-08 22:32:37 -07:00
|
|
|
/* Design goals:
|
|
|
|
|
*
|
|
|
|
|
* - Lock free (except when resizing underlying bos)
|
|
|
|
|
*
|
|
|
|
|
* - Constant time allocation with typically only one atomic
|
|
|
|
|
*
|
|
|
|
|
* - Multiple allocation sizes without fragmentation
|
|
|
|
|
*
|
|
|
|
|
* - Can grow while keeping addresses and offset of contents stable
|
|
|
|
|
*
|
|
|
|
|
* - All allocations within one bo so we can point one of the
|
|
|
|
|
* STATE_BASE_ADDRESS pointers at it.
|
|
|
|
|
*
|
|
|
|
|
* The overall design is a two-level allocator: top level is a fixed size, big
|
|
|
|
|
* block (8k) allocator, which operates out of a bo. Allocation is done by
|
|
|
|
|
* either pulling a block from the free list or growing the used range of the
|
|
|
|
|
* bo. Growing the range may run out of space in the bo which we then need to
|
|
|
|
|
* grow. Growing the bo is tricky in a multi-threaded, lockless environment:
|
|
|
|
|
* we need to keep all pointers and contents in the old map valid. GEM bos in
|
|
|
|
|
* general can't grow, but we use a trick: we create a memfd and use ftruncate
|
|
|
|
|
* to grow it as necessary. We mmap the new size and then create a gem bo for
|
|
|
|
|
* it using the new gem userptr ioctl. Without heavy-handed locking around
|
|
|
|
|
* our allocation fast-path, there isn't really a way to munmap the old mmap,
|
|
|
|
|
* so we just keep it around until garbage collection time. While the block
|
|
|
|
|
* allocator is lockless for normal operations, we block other threads trying
|
|
|
|
|
* to allocate while we're growing the map. It sholdn't happen often, and
|
|
|
|
|
* growing is fast anyway.
|
|
|
|
|
*
|
|
|
|
|
* At the next level we can use various sub-allocators. The state pool is a
|
|
|
|
|
* pool of smaller, fixed size objects, which operates much like the block
|
|
|
|
|
* pool. It uses a free list for freeing objects, but when it runs out of
|
|
|
|
|
* space it just allocates a new block from the block pool. This allocator is
|
|
|
|
|
* intended for longer lived state objects such as SURFACE_STATE and most
|
|
|
|
|
* other persistent state objects in the API. We may need to track more info
|
|
|
|
|
* with these object and a pointer back to the CPU object (eg VkImage). In
|
|
|
|
|
* those cases we just allocate a slightly bigger object and put the extra
|
|
|
|
|
* state after the GPU state object.
|
|
|
|
|
*
|
|
|
|
|
* The state stream allocator works similar to how the i965 DRI driver streams
|
|
|
|
|
* all its state. Even with Vulkan, we need to emit transient state (whether
|
|
|
|
|
* surface state base or dynamic state base), and for that we can just get a
|
|
|
|
|
* block and fill it up. These cases are local to a command buffer and the
|
|
|
|
|
* sub-allocator need not be thread safe. The streaming allocator gets a new
|
|
|
|
|
* block when it runs out of space and chains them together so they can be
|
|
|
|
|
* easily freed.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* Allocations are always at least 64 byte aligned, so 1 is an invalid value.
|
|
|
|
|
* We use it to indicate the free list is empty. */
|
2018-12-20 10:00:41 -08:00
|
|
|
#define EMPTY UINT32_MAX
|
2018-11-28 11:43:03 -08:00
|
|
|
|
|
|
|
|
#define PAGE_SIZE 4096
|
2015-05-08 22:32:37 -07:00
|
|
|
|
|
|
|
|
struct anv_mmap_cleanup {
|
|
|
|
|
void *map;
|
|
|
|
|
size_t size;
|
|
|
|
|
uint32_t gem_handle;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define ANV_MMAP_CLEANUP_INIT ((struct anv_mmap_cleanup){0})
|
|
|
|
|
|
2017-11-28 23:16:58 -08:00
|
|
|
#ifndef HAVE_MEMFD_CREATE
|
2015-05-08 22:32:37 -07:00
|
|
|
static inline int
|
|
|
|
|
memfd_create(const char *name, unsigned int flags)
|
|
|
|
|
{
|
|
|
|
|
return syscall(SYS_memfd_create, name, flags);
|
|
|
|
|
}
|
2017-11-28 23:16:58 -08:00
|
|
|
#endif
|
2015-05-08 22:32:37 -07:00
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
|
ilog2_round_up(uint32_t value)
|
|
|
|
|
{
|
|
|
|
|
assert(value != 0);
|
|
|
|
|
return 32 - __builtin_clz(value - 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
|
round_to_power_of_two(uint32_t value)
|
|
|
|
|
{
|
|
|
|
|
return 1 << ilog2_round_up(value);
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-28 11:43:03 -08:00
|
|
|
struct anv_state_table_cleanup {
|
|
|
|
|
void *map;
|
|
|
|
|
size_t size;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define ANV_STATE_TABLE_CLEANUP_INIT ((struct anv_state_table_cleanup){0})
|
|
|
|
|
#define ANV_STATE_ENTRY_SIZE (sizeof(struct anv_free_entry))
|
|
|
|
|
|
|
|
|
|
static VkResult
|
|
|
|
|
anv_state_table_expand_range(struct anv_state_table *table, uint32_t size);
|
|
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
|
anv_state_table_init(struct anv_state_table *table,
|
|
|
|
|
struct anv_device *device,
|
|
|
|
|
uint32_t initial_entries)
|
|
|
|
|
{
|
|
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
|
|
table->device = device;
|
|
|
|
|
|
|
|
|
|
table->fd = memfd_create("state table", MFD_CLOEXEC);
|
|
|
|
|
if (table->fd == -1)
|
|
|
|
|
return vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
|
|
|
|
|
|
|
|
|
/* Just make it 2GB up-front. The Linux kernel won't actually back it
|
|
|
|
|
* with pages until we either map and fault on one of them or we use
|
|
|
|
|
* userptr and send a chunk of it off to the GPU.
|
|
|
|
|
*/
|
|
|
|
|
if (ftruncate(table->fd, BLOCK_POOL_MEMFD_SIZE) == -1) {
|
|
|
|
|
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
|
|
|
|
goto fail_fd;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-09 13:33:43 +01:00
|
|
|
if (!u_vector_init(&table->cleanups,
|
2018-11-28 11:43:03 -08:00
|
|
|
round_to_power_of_two(sizeof(struct anv_state_table_cleanup)),
|
|
|
|
|
128)) {
|
|
|
|
|
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
|
|
|
|
goto fail_fd;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
table->state.next = 0;
|
|
|
|
|
table->state.end = 0;
|
|
|
|
|
table->size = 0;
|
|
|
|
|
|
|
|
|
|
uint32_t initial_size = initial_entries * ANV_STATE_ENTRY_SIZE;
|
|
|
|
|
result = anv_state_table_expand_range(table, initial_size);
|
|
|
|
|
if (result != VK_SUCCESS)
|
2019-05-09 13:33:43 +01:00
|
|
|
goto fail_cleanups;
|
2018-11-28 11:43:03 -08:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
2019-05-09 13:33:43 +01:00
|
|
|
fail_cleanups:
|
|
|
|
|
u_vector_finish(&table->cleanups);
|
2018-11-28 11:43:03 -08:00
|
|
|
fail_fd:
|
|
|
|
|
close(table->fd);
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static VkResult
|
|
|
|
|
anv_state_table_expand_range(struct anv_state_table *table, uint32_t size)
|
|
|
|
|
{
|
|
|
|
|
void *map;
|
2019-05-09 13:33:43 +01:00
|
|
|
struct anv_state_table_cleanup *cleanup;
|
2018-11-28 11:43:03 -08:00
|
|
|
|
|
|
|
|
/* Assert that we only ever grow the pool */
|
|
|
|
|
assert(size >= table->state.end);
|
|
|
|
|
|
|
|
|
|
/* Make sure that we don't go outside the bounds of the memfd */
|
|
|
|
|
if (size > BLOCK_POOL_MEMFD_SIZE)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
2019-05-09 13:33:43 +01:00
|
|
|
cleanup = u_vector_add(&table->cleanups);
|
2018-11-28 11:43:03 -08:00
|
|
|
if (!cleanup)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
2019-05-09 13:33:43 +01:00
|
|
|
*cleanup = ANV_STATE_TABLE_CLEANUP_INIT;
|
2018-11-28 11:43:03 -08:00
|
|
|
|
|
|
|
|
/* Just leak the old map until we destroy the pool. We can't munmap it
|
|
|
|
|
* without races or imposing locking on the block allocate fast path. On
|
|
|
|
|
* the whole the leaked maps adds up to less than the size of the
|
|
|
|
|
* current map. MAP_POPULATE seems like the right thing to do, but we
|
|
|
|
|
* should try to get some numbers.
|
|
|
|
|
*/
|
|
|
|
|
map = mmap(NULL, size, PROT_READ | PROT_WRITE,
|
|
|
|
|
MAP_SHARED | MAP_POPULATE, table->fd, 0);
|
|
|
|
|
if (map == MAP_FAILED) {
|
|
|
|
|
return vk_errorf(table->device->instance, table->device,
|
|
|
|
|
VK_ERROR_OUT_OF_HOST_MEMORY, "mmap failed: %m");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cleanup->map = map;
|
|
|
|
|
cleanup->size = size;
|
|
|
|
|
|
|
|
|
|
table->map = map;
|
|
|
|
|
table->size = size;
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static VkResult
|
|
|
|
|
anv_state_table_grow(struct anv_state_table *table)
|
|
|
|
|
{
|
|
|
|
|
VkResult result = VK_SUCCESS;
|
|
|
|
|
|
|
|
|
|
uint32_t used = align_u32(table->state.next * ANV_STATE_ENTRY_SIZE,
|
|
|
|
|
PAGE_SIZE);
|
|
|
|
|
uint32_t old_size = table->size;
|
|
|
|
|
|
|
|
|
|
/* The block pool is always initialized to a nonzero size and this function
|
|
|
|
|
* is always called after initialization.
|
|
|
|
|
*/
|
|
|
|
|
assert(old_size > 0);
|
|
|
|
|
|
|
|
|
|
uint32_t required = MAX2(used, old_size);
|
|
|
|
|
if (used * 2 <= required) {
|
|
|
|
|
/* If we're in this case then this isn't the firsta allocation and we
|
|
|
|
|
* already have enough space on both sides to hold double what we
|
|
|
|
|
* have allocated. There's nothing for us to do.
|
|
|
|
|
*/
|
|
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t size = old_size * 2;
|
|
|
|
|
while (size < required)
|
|
|
|
|
size *= 2;
|
|
|
|
|
|
|
|
|
|
assert(size > table->size);
|
|
|
|
|
|
|
|
|
|
result = anv_state_table_expand_range(table, size);
|
|
|
|
|
|
|
|
|
|
done:
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
anv_state_table_finish(struct anv_state_table *table)
|
|
|
|
|
{
|
|
|
|
|
struct anv_state_table_cleanup *cleanup;
|
|
|
|
|
|
2019-05-09 13:33:43 +01:00
|
|
|
u_vector_foreach(cleanup, &table->cleanups) {
|
2018-11-28 11:43:03 -08:00
|
|
|
if (cleanup->map)
|
|
|
|
|
munmap(cleanup->map, cleanup->size);
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-09 13:33:43 +01:00
|
|
|
u_vector_finish(&table->cleanups);
|
2018-11-28 11:43:03 -08:00
|
|
|
|
|
|
|
|
close(table->fd);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
|
anv_state_table_add(struct anv_state_table *table, uint32_t *idx,
|
|
|
|
|
uint32_t count)
|
|
|
|
|
{
|
|
|
|
|
struct anv_block_state state, old, new;
|
|
|
|
|
VkResult result;
|
|
|
|
|
|
|
|
|
|
assert(idx);
|
|
|
|
|
|
|
|
|
|
while(1) {
|
|
|
|
|
state.u64 = __sync_fetch_and_add(&table->state.u64, count);
|
|
|
|
|
if (state.next + count <= state.end) {
|
|
|
|
|
assert(table->map);
|
|
|
|
|
struct anv_free_entry *entry = &table->map[state.next];
|
|
|
|
|
for (int i = 0; i < count; i++) {
|
|
|
|
|
entry[i].state.idx = state.next + i;
|
|
|
|
|
}
|
|
|
|
|
*idx = state.next;
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
} else if (state.next <= state.end) {
|
|
|
|
|
/* We allocated the first block outside the pool so we have to grow
|
|
|
|
|
* the pool. pool_state->next acts a mutex: threads who try to
|
|
|
|
|
* allocate now will get block indexes above the current limit and
|
|
|
|
|
* hit futex_wait below.
|
|
|
|
|
*/
|
|
|
|
|
new.next = state.next + count;
|
|
|
|
|
do {
|
|
|
|
|
result = anv_state_table_grow(table);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return result;
|
|
|
|
|
new.end = table->size / ANV_STATE_ENTRY_SIZE;
|
|
|
|
|
} while (new.end < new.next);
|
|
|
|
|
|
|
|
|
|
old.u64 = __sync_lock_test_and_set(&table->state.u64, new.u64);
|
|
|
|
|
if (old.next != state.next)
|
|
|
|
|
futex_wake(&table->state.end, INT_MAX);
|
|
|
|
|
} else {
|
|
|
|
|
futex_wait(&table->state.end, state.end, NULL);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2018-12-20 10:00:41 -08:00
|
|
|
anv_free_list_push(union anv_free_list *list,
|
|
|
|
|
struct anv_state_table *table,
|
|
|
|
|
uint32_t first, uint32_t count)
|
2018-11-28 11:43:03 -08:00
|
|
|
{
|
2018-12-20 10:00:41 -08:00
|
|
|
union anv_free_list current, old, new;
|
2018-11-28 11:43:03 -08:00
|
|
|
uint32_t last = first;
|
|
|
|
|
|
|
|
|
|
for (uint32_t i = 1; i < count; i++, last++)
|
|
|
|
|
table->map[last].next = last + 1;
|
|
|
|
|
|
|
|
|
|
old = *list;
|
|
|
|
|
do {
|
|
|
|
|
current = old;
|
|
|
|
|
table->map[last].next = current.offset;
|
|
|
|
|
new.offset = first;
|
|
|
|
|
new.count = current.count + 1;
|
|
|
|
|
old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
|
|
|
|
|
} while (old.u64 != current.u64);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct anv_state *
|
2018-12-20 10:00:41 -08:00
|
|
|
anv_free_list_pop(union anv_free_list *list,
|
|
|
|
|
struct anv_state_table *table)
|
2018-11-28 11:43:03 -08:00
|
|
|
{
|
2018-12-20 10:00:41 -08:00
|
|
|
union anv_free_list current, new, old;
|
2018-11-28 11:43:03 -08:00
|
|
|
|
|
|
|
|
current.u64 = list->u64;
|
2018-12-20 10:00:41 -08:00
|
|
|
while (current.offset != EMPTY) {
|
2018-11-28 11:43:03 -08:00
|
|
|
__sync_synchronize();
|
|
|
|
|
new.offset = table->map[current.offset].next;
|
|
|
|
|
new.count = current.count + 1;
|
|
|
|
|
old.u64 = __sync_val_compare_and_swap(&list->u64, current.u64, new.u64);
|
|
|
|
|
if (old.u64 == current.u64) {
|
|
|
|
|
struct anv_free_entry *entry = &table->map[current.offset];
|
|
|
|
|
return &entry->state;
|
|
|
|
|
}
|
|
|
|
|
current = old;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-25 15:29:10 -07:00
|
|
|
/* All pointers in the ptr_free_list are assumed to be page-aligned. This
|
|
|
|
|
* means that the bottom 12 bits should all be zero.
|
|
|
|
|
*/
|
|
|
|
|
#define PFL_COUNT(x) ((uintptr_t)(x) & 0xfff)
|
2016-03-07 21:27:55 -08:00
|
|
|
#define PFL_PTR(x) ((void *)((uintptr_t)(x) & ~(uintptr_t)0xfff))
|
2015-05-25 15:29:10 -07:00
|
|
|
#define PFL_PACK(ptr, count) ({ \
|
2016-03-07 21:27:55 -08:00
|
|
|
(void *)(((uintptr_t)(ptr) & ~(uintptr_t)0xfff) | ((count) & 0xfff)); \
|
2015-05-25 15:29:10 -07:00
|
|
|
})
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
anv_ptr_free_list_pop(void **list, void **elem)
|
|
|
|
|
{
|
|
|
|
|
void *current = *list;
|
|
|
|
|
while (PFL_PTR(current) != NULL) {
|
|
|
|
|
void **next_ptr = PFL_PTR(current);
|
|
|
|
|
void *new_ptr = VG_NOACCESS_READ(next_ptr);
|
|
|
|
|
unsigned new_count = PFL_COUNT(current) + 1;
|
|
|
|
|
void *new = PFL_PACK(new_ptr, new_count);
|
|
|
|
|
void *old = __sync_val_compare_and_swap(list, current, new);
|
|
|
|
|
if (old == current) {
|
|
|
|
|
*elem = PFL_PTR(current);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
current = old;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
anv_ptr_free_list_push(void **list, void *elem)
|
|
|
|
|
{
|
|
|
|
|
void *old, *current;
|
|
|
|
|
void **next_ptr = elem;
|
|
|
|
|
|
2016-03-07 21:22:46 -08:00
|
|
|
/* The pointer-based free list requires that the pointer be
|
|
|
|
|
* page-aligned. This is because we use the bottom 12 bits of the
|
|
|
|
|
* pointer to store a counter to solve the ABA concurrency problem.
|
|
|
|
|
*/
|
|
|
|
|
assert(((uintptr_t)elem & 0xfff) == 0);
|
|
|
|
|
|
2015-05-25 15:29:10 -07:00
|
|
|
old = *list;
|
|
|
|
|
do {
|
|
|
|
|
current = old;
|
|
|
|
|
VG_NOACCESS_WRITE(next_ptr, PFL_PTR(current));
|
|
|
|
|
unsigned new_count = PFL_COUNT(current) + 1;
|
|
|
|
|
void *new = PFL_PACK(elem, new_count);
|
|
|
|
|
old = __sync_val_compare_and_swap(list, current, new);
|
|
|
|
|
} while (old != current);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-26 01:27:33 -07:00
|
|
|
static VkResult
|
|
|
|
|
anv_block_pool_expand_range(struct anv_block_pool *pool,
|
|
|
|
|
uint32_t center_bo_offset, uint32_t size);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2016-11-25 23:34:42 +09:00
|
|
|
VkResult
|
2015-05-08 22:32:37 -07:00
|
|
|
anv_block_pool_init(struct anv_block_pool *pool,
|
2017-04-26 01:27:33 -07:00
|
|
|
struct anv_device *device,
|
2018-03-01 09:25:44 -08:00
|
|
|
uint64_t start_address,
|
2017-11-17 17:26:59 +00:00
|
|
|
uint32_t initial_size,
|
|
|
|
|
uint64_t bo_flags)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2016-11-25 23:34:42 +09:00
|
|
|
VkResult result;
|
|
|
|
|
|
2015-05-08 22:32:37 -07:00
|
|
|
pool->device = device;
|
2017-11-17 17:26:59 +00:00
|
|
|
pool->bo_flags = bo_flags;
|
2018-12-07 11:03:38 -08:00
|
|
|
pool->nbos = 0;
|
|
|
|
|
pool->size = 0;
|
2019-01-23 12:36:39 -08:00
|
|
|
pool->center_bo_offset = 0;
|
2018-03-01 09:25:44 -08:00
|
|
|
pool->start_address = gen_canonical_address(start_address);
|
2019-01-23 12:36:39 -08:00
|
|
|
pool->map = NULL;
|
2018-03-01 09:25:44 -08:00
|
|
|
|
2018-12-07 11:03:38 -08:00
|
|
|
/* This pointer will always point to the first BO in the list */
|
|
|
|
|
pool->bo = &pool->bos[0];
|
2018-11-21 11:36:49 -08:00
|
|
|
|
|
|
|
|
anv_bo_init(pool->bo, 0, 0);
|
2015-09-15 16:54:56 -07:00
|
|
|
|
2018-11-08 11:35:59 -08:00
|
|
|
if (!(pool->bo_flags & EXEC_OBJECT_PINNED)) {
|
|
|
|
|
pool->fd = memfd_create("block pool", MFD_CLOEXEC);
|
|
|
|
|
if (pool->fd == -1)
|
|
|
|
|
return vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
|
|
|
|
|
|
|
|
|
/* Just make it 2GB up-front. The Linux kernel won't actually back it
|
|
|
|
|
* with pages until we either map and fault on one of them or we use
|
|
|
|
|
* userptr and send a chunk of it off to the GPU.
|
|
|
|
|
*/
|
|
|
|
|
if (ftruncate(pool->fd, BLOCK_POOL_MEMFD_SIZE) == -1) {
|
|
|
|
|
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
|
|
|
|
goto fail_fd;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
pool->fd = -1;
|
2016-11-25 23:34:42 +09:00
|
|
|
}
|
2015-09-15 16:54:56 -07:00
|
|
|
|
2016-11-25 23:34:42 +09:00
|
|
|
if (!u_vector_init(&pool->mmap_cleanups,
|
|
|
|
|
round_to_power_of_two(sizeof(struct anv_mmap_cleanup)),
|
|
|
|
|
128)) {
|
|
|
|
|
result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
|
|
|
|
|
goto fail_fd;
|
|
|
|
|
}
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-08-03 01:19:34 -07:00
|
|
|
pool->state.next = 0;
|
2015-09-15 17:46:09 -07:00
|
|
|
pool->state.end = 0;
|
|
|
|
|
pool->back_state.next = 0;
|
|
|
|
|
pool->back_state.end = 0;
|
|
|
|
|
|
2017-04-26 01:27:33 -07:00
|
|
|
result = anv_block_pool_expand_range(pool, 0, initial_size);
|
|
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
goto fail_mmap_cleanups;
|
2016-11-25 23:34:42 +09:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
|
2017-04-26 01:27:33 -07:00
|
|
|
fail_mmap_cleanups:
|
|
|
|
|
u_vector_finish(&pool->mmap_cleanups);
|
2016-11-25 23:34:42 +09:00
|
|
|
fail_fd:
|
2018-11-08 11:35:59 -08:00
|
|
|
if (!(pool->bo_flags & EXEC_OBJECT_PINNED))
|
|
|
|
|
close(pool->fd);
|
2016-11-25 23:34:42 +09:00
|
|
|
|
|
|
|
|
return result;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
anv_block_pool_finish(struct anv_block_pool *pool)
|
|
|
|
|
{
|
|
|
|
|
struct anv_mmap_cleanup *cleanup;
|
2019-03-06 10:49:21 +02:00
|
|
|
const bool use_softpin = !!(pool->bo_flags & EXEC_OBJECT_PINNED);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2016-10-14 12:57:44 +10:00
|
|
|
u_vector_foreach(cleanup, &pool->mmap_cleanups) {
|
2019-03-06 10:49:21 +02:00
|
|
|
if (use_softpin)
|
|
|
|
|
anv_gem_munmap(cleanup->map, cleanup->size);
|
|
|
|
|
else
|
2015-05-08 22:32:37 -07:00
|
|
|
munmap(cleanup->map, cleanup->size);
|
2019-03-06 10:49:21 +02:00
|
|
|
|
2015-05-08 22:32:37 -07:00
|
|
|
if (cleanup->gem_handle)
|
|
|
|
|
anv_gem_close(pool->device, cleanup->gem_handle);
|
|
|
|
|
}
|
|
|
|
|
|
2016-10-14 12:57:44 +10:00
|
|
|
u_vector_finish(&pool->mmap_cleanups);
|
2018-11-08 11:35:59 -08:00
|
|
|
if (!(pool->bo_flags & EXEC_OBJECT_PINNED))
|
|
|
|
|
close(pool->fd);
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2017-04-26 06:01:01 -07:00
|
|
|
static VkResult
|
|
|
|
|
anv_block_pool_expand_range(struct anv_block_pool *pool,
|
|
|
|
|
uint32_t center_bo_offset, uint32_t size)
|
|
|
|
|
{
|
|
|
|
|
void *map;
|
|
|
|
|
uint32_t gem_handle;
|
|
|
|
|
struct anv_mmap_cleanup *cleanup;
|
2018-11-08 11:35:59 -08:00
|
|
|
const bool use_softpin = !!(pool->bo_flags & EXEC_OBJECT_PINNED);
|
2017-04-26 06:01:01 -07:00
|
|
|
|
|
|
|
|
/* Assert that we only ever grow the pool */
|
|
|
|
|
assert(center_bo_offset >= pool->back_state.end);
|
|
|
|
|
assert(size - center_bo_offset >= pool->state.end);
|
|
|
|
|
|
2017-05-03 09:43:58 -07:00
|
|
|
/* Assert that we don't go outside the bounds of the memfd */
|
|
|
|
|
assert(center_bo_offset <= BLOCK_POOL_MEMFD_CENTER);
|
2018-11-08 11:35:59 -08:00
|
|
|
assert(use_softpin ||
|
|
|
|
|
size - center_bo_offset <=
|
2017-05-03 09:43:58 -07:00
|
|
|
BLOCK_POOL_MEMFD_SIZE - BLOCK_POOL_MEMFD_CENTER);
|
|
|
|
|
|
2017-04-26 06:01:01 -07:00
|
|
|
cleanup = u_vector_add(&pool->mmap_cleanups);
|
|
|
|
|
if (!cleanup)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
|
|
*cleanup = ANV_MMAP_CLEANUP_INIT;
|
|
|
|
|
|
2018-11-08 11:35:59 -08:00
|
|
|
uint32_t newbo_size = size - pool->size;
|
|
|
|
|
if (use_softpin) {
|
|
|
|
|
gem_handle = anv_gem_create(pool->device, newbo_size);
|
|
|
|
|
map = anv_gem_mmap(pool->device, gem_handle, 0, newbo_size, 0);
|
|
|
|
|
if (map == MAP_FAILED)
|
|
|
|
|
return vk_errorf(pool->device->instance, pool->device,
|
|
|
|
|
VK_ERROR_MEMORY_MAP_FAILED, "gem mmap failed: %m");
|
2019-01-23 12:36:39 -08:00
|
|
|
assert(center_bo_offset == 0);
|
2018-11-08 11:35:59 -08:00
|
|
|
} else {
|
|
|
|
|
/* Just leak the old map until we destroy the pool. We can't munmap it
|
|
|
|
|
* without races or imposing locking on the block allocate fast path. On
|
|
|
|
|
* the whole the leaked maps adds up to less than the size of the
|
|
|
|
|
* current map. MAP_POPULATE seems like the right thing to do, but we
|
|
|
|
|
* should try to get some numbers.
|
|
|
|
|
*/
|
|
|
|
|
map = mmap(NULL, size, PROT_READ | PROT_WRITE,
|
|
|
|
|
MAP_SHARED | MAP_POPULATE, pool->fd,
|
|
|
|
|
BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
|
|
|
|
|
if (map == MAP_FAILED)
|
|
|
|
|
return vk_errorf(pool->device->instance, pool->device,
|
|
|
|
|
VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
|
2019-01-23 12:36:39 -08:00
|
|
|
|
|
|
|
|
/* Now that we mapped the new memory, we can write the new
|
|
|
|
|
* center_bo_offset back into pool and update pool->map. */
|
|
|
|
|
pool->center_bo_offset = center_bo_offset;
|
|
|
|
|
pool->map = map + center_bo_offset;
|
2018-11-08 11:35:59 -08:00
|
|
|
gem_handle = anv_gem_userptr(pool->device, map, size);
|
|
|
|
|
if (gem_handle == 0) {
|
|
|
|
|
munmap(map, size);
|
|
|
|
|
return vk_errorf(pool->device->instance, pool->device,
|
|
|
|
|
VK_ERROR_TOO_MANY_OBJECTS, "userptr failed: %m");
|
|
|
|
|
}
|
2017-04-26 06:01:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cleanup->map = map;
|
2018-11-08 11:35:59 -08:00
|
|
|
cleanup->size = use_softpin ? newbo_size : size;
|
2017-04-26 06:01:01 -07:00
|
|
|
cleanup->gem_handle = gem_handle;
|
|
|
|
|
|
|
|
|
|
/* Regular objects are created I915_CACHING_CACHED on LLC platforms and
|
|
|
|
|
* I915_CACHING_NONE on non-LLC platforms. However, userptr objects are
|
|
|
|
|
* always created as I915_CACHING_CACHED, which on non-LLC means
|
2019-01-15 13:57:00 -08:00
|
|
|
* snooped.
|
|
|
|
|
*
|
|
|
|
|
* On platforms that support softpin, we are not going to use userptr
|
|
|
|
|
* anymore, but we still want to rely on the snooped states. So make sure
|
|
|
|
|
* everything is set to I915_CACHING_CACHED.
|
|
|
|
|
*/
|
|
|
|
|
if (!pool->device->info.has_llc)
|
|
|
|
|
anv_gem_set_caching(pool->device, gem_handle, I915_CACHING_CACHED);
|
2017-04-26 06:01:01 -07:00
|
|
|
|
|
|
|
|
/* For block pool BOs we have to be a bit careful about where we place them
|
|
|
|
|
* in the GTT. There are two documented workarounds for state base address
|
|
|
|
|
* placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
|
|
|
|
|
* which state that those two base addresses do not support 48-bit
|
|
|
|
|
* addresses and need to be placed in the bottom 32-bit range.
|
|
|
|
|
* Unfortunately, this is not quite accurate.
|
|
|
|
|
*
|
|
|
|
|
* The real problem is that we always set the size of our state pools in
|
|
|
|
|
* STATE_BASE_ADDRESS to 0xfffff (the maximum) even though the BO is most
|
|
|
|
|
* likely significantly smaller. We do this because we do not no at the
|
|
|
|
|
* time we emit STATE_BASE_ADDRESS whether or not we will need to expand
|
|
|
|
|
* the pool during command buffer building so we don't actually have a
|
|
|
|
|
* valid final size. If the address + size, as seen by STATE_BASE_ADDRESS
|
|
|
|
|
* overflows 48 bits, the GPU appears to treat all accesses to the buffer
|
|
|
|
|
* as being out of bounds and returns zero. For dynamic state, this
|
|
|
|
|
* usually just leads to rendering corruptions, but shaders that are all
|
|
|
|
|
* zero hang the GPU immediately.
|
|
|
|
|
*
|
|
|
|
|
* The easiest solution to do is exactly what the bogus workarounds say to
|
|
|
|
|
* do: restrict these buffers to 32-bit addresses. We could also pin the
|
|
|
|
|
* BO to some particular location of our choosing, but that's significantly
|
|
|
|
|
* more work than just not setting a flag. So, we explicitly DO NOT set
|
|
|
|
|
* the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
|
|
|
|
|
* hard work for us.
|
|
|
|
|
*/
|
2018-12-07 11:03:38 -08:00
|
|
|
struct anv_bo *bo;
|
2018-11-08 11:35:59 -08:00
|
|
|
uint32_t bo_size;
|
|
|
|
|
uint64_t bo_offset;
|
2018-12-07 11:03:38 -08:00
|
|
|
|
|
|
|
|
assert(pool->nbos < ANV_MAX_BLOCK_POOL_BOS);
|
|
|
|
|
|
2018-11-08 11:35:59 -08:00
|
|
|
if (use_softpin) {
|
|
|
|
|
/* With softpin, we add a new BO to the pool, and set its offset to right
|
|
|
|
|
* where the previous BO ends (the end of the pool).
|
|
|
|
|
*/
|
|
|
|
|
bo = &pool->bos[pool->nbos++];
|
|
|
|
|
bo_size = newbo_size;
|
|
|
|
|
bo_offset = pool->start_address + pool->size;
|
|
|
|
|
} else {
|
|
|
|
|
/* Without softpin, we just need one BO, and we already have a pointer to
|
|
|
|
|
* it. Simply "allocate" it from our array if we didn't do it before.
|
|
|
|
|
* The offset doesn't matter since we are not pinning the BO anyway.
|
|
|
|
|
*/
|
|
|
|
|
if (pool->nbos == 0)
|
|
|
|
|
pool->nbos++;
|
|
|
|
|
bo = pool->bo;
|
|
|
|
|
bo_size = size;
|
|
|
|
|
bo_offset = 0;
|
2018-03-01 09:25:44 -08:00
|
|
|
}
|
2018-11-08 11:35:59 -08:00
|
|
|
|
|
|
|
|
anv_bo_init(bo, gem_handle, bo_size);
|
|
|
|
|
bo->offset = bo_offset;
|
2018-12-07 11:03:38 -08:00
|
|
|
bo->flags = pool->bo_flags;
|
|
|
|
|
bo->map = map;
|
|
|
|
|
pool->size = size;
|
2017-04-26 06:01:01 -07:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-07 11:03:38 -08:00
|
|
|
static struct anv_bo *
|
|
|
|
|
anv_block_pool_get_bo(struct anv_block_pool *pool, int32_t *offset)
|
|
|
|
|
{
|
|
|
|
|
struct anv_bo *bo, *bo_found = NULL;
|
|
|
|
|
int32_t cur_offset = 0;
|
|
|
|
|
|
|
|
|
|
assert(offset);
|
|
|
|
|
|
|
|
|
|
if (!(pool->bo_flags & EXEC_OBJECT_PINNED))
|
|
|
|
|
return pool->bo;
|
|
|
|
|
|
|
|
|
|
anv_block_pool_foreach_bo(bo, pool) {
|
|
|
|
|
if (*offset < cur_offset + bo->size) {
|
|
|
|
|
bo_found = bo;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
cur_offset += bo->size;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(bo_found != NULL);
|
|
|
|
|
*offset -= cur_offset;
|
|
|
|
|
|
|
|
|
|
return bo_found;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-21 11:24:59 -08:00
|
|
|
/** Returns current memory map of the block pool.
|
|
|
|
|
*
|
|
|
|
|
* The returned pointer points to the map for the memory at the specified
|
|
|
|
|
* offset. The offset parameter is relative to the "center" of the block pool
|
|
|
|
|
* rather than the start of the block pool BO map.
|
|
|
|
|
*/
|
|
|
|
|
void*
|
|
|
|
|
anv_block_pool_map(struct anv_block_pool *pool, int32_t offset)
|
|
|
|
|
{
|
2019-01-23 12:36:39 -08:00
|
|
|
if (pool->bo_flags & EXEC_OBJECT_PINNED) {
|
|
|
|
|
struct anv_bo *bo = anv_block_pool_get_bo(pool, &offset);
|
|
|
|
|
return bo->map + offset;
|
|
|
|
|
} else {
|
|
|
|
|
return pool->map + offset;
|
|
|
|
|
}
|
2018-11-21 11:24:59 -08:00
|
|
|
}
|
|
|
|
|
|
2015-09-15 17:46:09 -07:00
|
|
|
/** Grows and re-centers the block pool.
|
|
|
|
|
*
|
|
|
|
|
* We grow the block pool in one or both directions in such a way that the
|
|
|
|
|
* following conditions are met:
|
|
|
|
|
*
|
|
|
|
|
* 1) The size of the entire pool is always a power of two.
|
|
|
|
|
*
|
|
|
|
|
* 2) The pool only grows on both ends. Neither end can get
|
|
|
|
|
* shortened.
|
|
|
|
|
*
|
|
|
|
|
* 3) At the end of the allocation, we have about twice as much space
|
|
|
|
|
* allocated for each end as we have used. This way the pool doesn't
|
|
|
|
|
* grow too far in one direction or the other.
|
|
|
|
|
*
|
|
|
|
|
* 4) If the _alloc_back() has never been called, then the back portion of
|
|
|
|
|
* the pool retains a size of zero. (This makes it easier for users of
|
|
|
|
|
* the block pool that only want a one-sided pool.)
|
|
|
|
|
*
|
|
|
|
|
* 5) We have enough space allocated for at least one more block in
|
|
|
|
|
* whichever side `state` points to.
|
|
|
|
|
*
|
|
|
|
|
* 6) The center of the pool is always aligned to both the block_size of
|
|
|
|
|
* the pool and a 4K CPU page.
|
|
|
|
|
*/
|
2015-08-03 01:03:53 -07:00
|
|
|
static uint32_t
|
2017-04-26 00:49:40 -07:00
|
|
|
anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2017-04-26 06:01:01 -07:00
|
|
|
VkResult result = VK_SUCCESS;
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-09-15 14:52:26 -07:00
|
|
|
pthread_mutex_lock(&pool->device->mutex);
|
|
|
|
|
|
2015-09-15 17:46:09 -07:00
|
|
|
assert(state == &pool->state || state == &pool->back_state);
|
|
|
|
|
|
|
|
|
|
/* Gather a little usage information on the pool. Since we may have
|
|
|
|
|
* threadsd waiting in queue to get some storage while we resize, it's
|
|
|
|
|
* actually possible that total_used will be larger than old_size. In
|
|
|
|
|
* particular, block_pool_alloc() increments state->next prior to
|
|
|
|
|
* calling block_pool_grow, so this ensures that we get enough space for
|
|
|
|
|
* which ever side tries to grow the pool.
|
|
|
|
|
*
|
|
|
|
|
* We align to a page size because it makes it easier to do our
|
|
|
|
|
* calculations later in such a way that we state page-aigned.
|
|
|
|
|
*/
|
|
|
|
|
uint32_t back_used = align_u32(pool->back_state.next, PAGE_SIZE);
|
|
|
|
|
uint32_t front_used = align_u32(pool->state.next, PAGE_SIZE);
|
|
|
|
|
uint32_t total_used = front_used + back_used;
|
|
|
|
|
|
|
|
|
|
assert(state == &pool->state || back_used > 0);
|
|
|
|
|
|
2018-12-07 11:03:38 -08:00
|
|
|
uint32_t old_size = pool->size;
|
2015-09-15 17:46:09 -07:00
|
|
|
|
2017-04-26 09:53:45 -07:00
|
|
|
/* The block pool is always initialized to a nonzero size and this function
|
|
|
|
|
* is always called after initialization.
|
|
|
|
|
*/
|
|
|
|
|
assert(old_size > 0);
|
|
|
|
|
|
|
|
|
|
/* The back_used and front_used may actually be smaller than the actual
|
|
|
|
|
* requirement because they are based on the next pointers which are
|
|
|
|
|
* updated prior to calling this function.
|
|
|
|
|
*/
|
|
|
|
|
uint32_t back_required = MAX2(back_used, pool->center_bo_offset);
|
|
|
|
|
uint32_t front_required = MAX2(front_used, old_size - pool->center_bo_offset);
|
|
|
|
|
|
|
|
|
|
if (back_used * 2 <= back_required && front_used * 2 <= front_required) {
|
2015-09-15 17:46:09 -07:00
|
|
|
/* If we're in this case then this isn't the firsta allocation and we
|
|
|
|
|
* already have enough space on both sides to hold double what we
|
|
|
|
|
* have allocated. There's nothing for us to do.
|
|
|
|
|
*/
|
|
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-26 09:53:45 -07:00
|
|
|
uint32_t size = old_size * 2;
|
|
|
|
|
while (size < back_required + front_required)
|
|
|
|
|
size *= 2;
|
|
|
|
|
|
2018-12-07 11:03:38 -08:00
|
|
|
assert(size > pool->size);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2015-09-15 17:46:09 -07:00
|
|
|
/* We compute a new center_bo_offset such that, when we double the size
|
|
|
|
|
* of the pool, we maintain the ratio of how much is used by each side.
|
|
|
|
|
* This way things should remain more-or-less balanced.
|
|
|
|
|
*/
|
|
|
|
|
uint32_t center_bo_offset;
|
|
|
|
|
if (back_used == 0) {
|
|
|
|
|
/* If we're in this case then we have never called alloc_back(). In
|
|
|
|
|
* this case, we want keep the offset at 0 to make things as simple
|
|
|
|
|
* as possible for users that don't care about back allocations.
|
|
|
|
|
*/
|
|
|
|
|
center_bo_offset = 0;
|
|
|
|
|
} else {
|
2015-09-23 14:20:04 -07:00
|
|
|
/* Try to "center" the allocation based on how much is currently in
|
|
|
|
|
* use on each side of the center line.
|
|
|
|
|
*/
|
2015-09-15 17:46:09 -07:00
|
|
|
center_bo_offset = ((uint64_t)size * back_used) / total_used;
|
|
|
|
|
|
2017-04-26 00:49:40 -07:00
|
|
|
/* Align down to a multiple of the page size */
|
|
|
|
|
center_bo_offset &= ~(PAGE_SIZE - 1);
|
2015-09-15 17:46:09 -07:00
|
|
|
|
|
|
|
|
assert(center_bo_offset >= back_used);
|
2015-09-23 14:20:04 -07:00
|
|
|
|
|
|
|
|
/* Make sure we don't shrink the back end of the pool */
|
2018-04-20 21:52:41 -07:00
|
|
|
if (center_bo_offset < back_required)
|
|
|
|
|
center_bo_offset = back_required;
|
2015-09-23 14:20:04 -07:00
|
|
|
|
|
|
|
|
/* Make sure that we don't shrink the front end of the pool */
|
2018-04-20 21:52:41 -07:00
|
|
|
if (size - center_bo_offset < front_required)
|
|
|
|
|
center_bo_offset = size - front_required;
|
2015-09-15 17:46:09 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(center_bo_offset % PAGE_SIZE == 0);
|
|
|
|
|
|
2017-04-26 06:01:01 -07:00
|
|
|
result = anv_block_pool_expand_range(pool, center_bo_offset, size);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2018-11-21 11:36:49 -08:00
|
|
|
pool->bo->flags = pool->bo_flags;
|
2017-04-13 16:30:19 -07:00
|
|
|
|
2015-09-15 17:46:09 -07:00
|
|
|
done:
|
2015-09-15 14:52:26 -07:00
|
|
|
pthread_mutex_unlock(&pool->device->mutex);
|
|
|
|
|
|
2017-04-26 06:01:01 -07:00
|
|
|
if (result == VK_SUCCESS) {
|
|
|
|
|
/* Return the appropriate new size. This function never actually
|
|
|
|
|
* updates state->next. Instead, we let the caller do that because it
|
|
|
|
|
* needs to do so in order to maintain its concurrency model.
|
|
|
|
|
*/
|
|
|
|
|
if (state == &pool->state) {
|
2018-12-07 11:03:38 -08:00
|
|
|
return pool->size - pool->center_bo_offset;
|
2017-04-26 06:01:01 -07:00
|
|
|
} else {
|
|
|
|
|
assert(pool->center_bo_offset > 0);
|
|
|
|
|
return pool->center_bo_offset;
|
|
|
|
|
}
|
2015-09-15 17:46:09 -07:00
|
|
|
} else {
|
2017-04-26 06:01:01 -07:00
|
|
|
return 0;
|
2015-09-15 17:46:09 -07:00
|
|
|
}
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2015-09-15 17:43:47 -07:00
|
|
|
static uint32_t
|
|
|
|
|
anv_block_pool_alloc_new(struct anv_block_pool *pool,
|
2017-04-26 01:27:33 -07:00
|
|
|
struct anv_block_state *pool_state,
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
uint32_t block_size, uint32_t *padding)
|
2015-09-15 17:43:47 -07:00
|
|
|
{
|
|
|
|
|
struct anv_block_state state, old, new;
|
|
|
|
|
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
/* Most allocations won't generate any padding */
|
|
|
|
|
if (padding)
|
|
|
|
|
*padding = 0;
|
|
|
|
|
|
2015-09-15 17:43:47 -07:00
|
|
|
while (1) {
|
2017-04-26 01:27:33 -07:00
|
|
|
state.u64 = __sync_fetch_and_add(&pool_state->u64, block_size);
|
2017-04-26 00:49:40 -07:00
|
|
|
if (state.next + block_size <= state.end) {
|
2015-09-15 17:43:47 -07:00
|
|
|
return state.next;
|
2017-04-26 00:49:40 -07:00
|
|
|
} else if (state.next <= state.end) {
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
if (pool->bo_flags & EXEC_OBJECT_PINNED && state.next < state.end) {
|
|
|
|
|
/* We need to grow the block pool, but still have some leftover
|
|
|
|
|
* space that can't be used by that particular allocation. So we
|
|
|
|
|
* add that as a "padding", and return it.
|
|
|
|
|
*/
|
|
|
|
|
uint32_t leftover = state.end - state.next;
|
|
|
|
|
|
|
|
|
|
/* If there is some leftover space in the pool, the caller must
|
|
|
|
|
* deal with it.
|
|
|
|
|
*/
|
|
|
|
|
assert(leftover == 0 || padding);
|
|
|
|
|
if (padding)
|
|
|
|
|
*padding = leftover;
|
|
|
|
|
state.next += leftover;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-26 02:28:24 -07:00
|
|
|
/* We allocated the first block outside the pool so we have to grow
|
|
|
|
|
* the pool. pool_state->next acts a mutex: threads who try to
|
|
|
|
|
* allocate now will get block indexes above the current limit and
|
|
|
|
|
* hit futex_wait below.
|
|
|
|
|
*/
|
2017-04-26 01:27:33 -07:00
|
|
|
new.next = state.next + block_size;
|
2017-04-26 00:49:40 -07:00
|
|
|
do {
|
|
|
|
|
new.end = anv_block_pool_grow(pool, pool_state);
|
|
|
|
|
} while (new.end < new.next);
|
|
|
|
|
|
2015-09-15 17:43:47 -07:00
|
|
|
old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
|
|
|
|
|
if (old.next != state.next)
|
|
|
|
|
futex_wake(&pool_state->end, INT_MAX);
|
|
|
|
|
return state.next;
|
|
|
|
|
} else {
|
2017-11-09 14:49:19 +01:00
|
|
|
futex_wait(&pool_state->end, state.end, NULL);
|
2015-09-15 17:43:47 -07:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-15 17:46:09 -07:00
|
|
|
int32_t
|
2017-04-26 01:27:33 -07:00
|
|
|
anv_block_pool_alloc(struct anv_block_pool *pool,
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
uint32_t block_size, uint32_t *padding)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
uint32_t offset;
|
|
|
|
|
|
|
|
|
|
offset = anv_block_pool_alloc_new(pool, &pool->state, block_size, padding);
|
|
|
|
|
|
|
|
|
|
return offset;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2015-09-15 17:46:09 -07:00
|
|
|
/* Allocates a block out of the back of the block pool.
|
|
|
|
|
*
|
|
|
|
|
* This will allocated a block earlier than the "start" of the block pool.
|
|
|
|
|
* The offsets returned from this function will be negative but will still
|
|
|
|
|
* be correct relative to the block pool's map pointer.
|
|
|
|
|
*
|
|
|
|
|
* If you ever use anv_block_pool_alloc_back, then you will have to do
|
|
|
|
|
* gymnastics with the block pool's BO when doing relocations.
|
|
|
|
|
*/
|
|
|
|
|
int32_t
|
2017-04-26 01:27:33 -07:00
|
|
|
anv_block_pool_alloc_back(struct anv_block_pool *pool,
|
|
|
|
|
uint32_t block_size)
|
2015-09-15 17:46:09 -07:00
|
|
|
{
|
2017-04-24 03:11:02 -07:00
|
|
|
int32_t offset = anv_block_pool_alloc_new(pool, &pool->back_state,
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
block_size, NULL);
|
2015-09-15 17:46:09 -07:00
|
|
|
|
|
|
|
|
/* The offset we get out of anv_block_pool_alloc_new() is actually the
|
|
|
|
|
* number of bytes downwards from the middle to the end of the block.
|
|
|
|
|
* We need to turn it into a (negative) offset from the middle to the
|
|
|
|
|
* start of the block.
|
|
|
|
|
*/
|
|
|
|
|
assert(offset >= 0);
|
2017-04-26 01:27:33 -07:00
|
|
|
return -(offset + block_size);
|
2015-09-15 17:46:09 -07:00
|
|
|
}
|
|
|
|
|
|
2017-04-24 08:50:23 -07:00
|
|
|
VkResult
|
2017-04-23 18:48:21 -07:00
|
|
|
anv_state_pool_init(struct anv_state_pool *pool,
|
2017-04-24 08:50:23 -07:00
|
|
|
struct anv_device *device,
|
2018-03-01 09:25:44 -08:00
|
|
|
uint64_t start_address,
|
2017-11-17 17:26:59 +00:00
|
|
|
uint32_t block_size,
|
|
|
|
|
uint64_t bo_flags)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2017-04-24 08:50:23 -07:00
|
|
|
VkResult result = anv_block_pool_init(&pool->block_pool, device,
|
2018-03-01 09:25:44 -08:00
|
|
|
start_address,
|
2017-11-17 17:26:59 +00:00
|
|
|
block_size * 16,
|
|
|
|
|
bo_flags);
|
2017-04-24 08:50:23 -07:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return result;
|
|
|
|
|
|
2018-11-29 10:49:31 -08:00
|
|
|
result = anv_state_table_init(&pool->table, device, 64);
|
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
|
anv_block_pool_finish(&pool->block_pool);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-13 11:17:41 -08:00
|
|
|
assert(util_is_power_of_two_or_zero(block_size));
|
2017-04-26 01:27:33 -07:00
|
|
|
pool->block_size = block_size;
|
2018-12-20 10:00:41 -08:00
|
|
|
pool->back_alloc_free_list = ANV_FREE_LIST_EMPTY;
|
2017-04-23 18:48:21 -07:00
|
|
|
for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
|
2018-12-20 10:00:41 -08:00
|
|
|
pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY;
|
2017-04-23 18:48:21 -07:00
|
|
|
pool->buckets[i].block.next = 0;
|
|
|
|
|
pool->buckets[i].block.end = 0;
|
|
|
|
|
}
|
|
|
|
|
VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
|
2017-04-24 08:50:23 -07:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2017-04-23 18:48:21 -07:00
|
|
|
void
|
|
|
|
|
anv_state_pool_finish(struct anv_state_pool *pool)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2017-04-23 18:48:21 -07:00
|
|
|
VG(VALGRIND_DESTROY_MEMPOOL(pool));
|
2018-11-29 10:49:31 -08:00
|
|
|
anv_state_table_finish(&pool->table);
|
2017-04-24 08:50:23 -07:00
|
|
|
anv_block_pool_finish(&pool->block_pool);
|
2017-04-23 18:48:21 -07:00
|
|
|
}
|
2017-04-23 18:36:33 -07:00
|
|
|
|
2017-04-23 18:48:21 -07:00
|
|
|
static uint32_t
|
|
|
|
|
anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool,
|
|
|
|
|
struct anv_block_pool *block_pool,
|
2017-04-26 01:27:33 -07:00
|
|
|
uint32_t state_size,
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
uint32_t block_size,
|
|
|
|
|
uint32_t *padding)
|
2017-04-23 18:48:21 -07:00
|
|
|
{
|
2015-05-08 22:32:37 -07:00
|
|
|
struct anv_block_state block, old, new;
|
2017-04-23 18:48:21 -07:00
|
|
|
uint32_t offset;
|
2015-05-08 22:32:37 -07:00
|
|
|
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
/* We don't always use anv_block_pool_alloc(), which would set *padding to
|
|
|
|
|
* zero for us. So if we have a pointer to padding, we must zero it out
|
|
|
|
|
* ourselves here, to make sure we always return some sensible value.
|
|
|
|
|
*/
|
|
|
|
|
if (padding)
|
|
|
|
|
*padding = 0;
|
|
|
|
|
|
2017-04-24 01:51:51 -07:00
|
|
|
/* If our state is large, we don't need any sub-allocation from a block.
|
|
|
|
|
* Instead, we just grab whole (potentially large) blocks.
|
|
|
|
|
*/
|
|
|
|
|
if (state_size >= block_size)
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
return anv_block_pool_alloc(block_pool, state_size, padding);
|
2017-04-24 01:51:51 -07:00
|
|
|
|
2015-05-08 22:32:37 -07:00
|
|
|
restart:
|
2017-04-23 18:36:33 -07:00
|
|
|
block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
|
|
|
|
if (block.next < block.end) {
|
|
|
|
|
return block.next;
|
|
|
|
|
} else if (block.next == block.end) {
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
offset = anv_block_pool_alloc(block_pool, block_size, padding);
|
2017-04-23 18:36:33 -07:00
|
|
|
new.next = offset + state_size;
|
2017-04-26 01:27:33 -07:00
|
|
|
new.end = offset + block_size;
|
2015-08-03 00:38:48 -07:00
|
|
|
old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
|
2015-05-08 22:32:37 -07:00
|
|
|
if (old.next != block.next)
|
|
|
|
|
futex_wake(&pool->block.end, INT_MAX);
|
2015-08-03 00:38:48 -07:00
|
|
|
return offset;
|
2015-05-08 22:32:37 -07:00
|
|
|
} else {
|
2017-11-09 14:49:19 +01:00
|
|
|
futex_wait(&pool->block.end, block.end, NULL);
|
2015-05-08 22:32:37 -07:00
|
|
|
goto restart;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-23 19:15:42 -07:00
|
|
|
static uint32_t
|
|
|
|
|
anv_state_pool_get_bucket(uint32_t size)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2017-04-23 19:15:42 -07:00
|
|
|
unsigned size_log2 = ilog2_round_up(size);
|
2015-05-08 22:32:37 -07:00
|
|
|
assert(size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
|
|
|
|
|
if (size_log2 < ANV_MIN_STATE_SIZE_LOG2)
|
|
|
|
|
size_log2 = ANV_MIN_STATE_SIZE_LOG2;
|
2017-04-23 19:15:42 -07:00
|
|
|
return size_log2 - ANV_MIN_STATE_SIZE_LOG2;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
|
anv_state_pool_get_bucket_size(uint32_t bucket)
|
|
|
|
|
{
|
|
|
|
|
uint32_t size_log2 = bucket + ANV_MIN_STATE_SIZE_LOG2;
|
|
|
|
|
return 1 << size_log2;
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-11 15:37:15 -08:00
|
|
|
/** Helper to push a chunk into the state table.
|
|
|
|
|
*
|
|
|
|
|
* It creates 'count' entries into the state table and update their sizes,
|
|
|
|
|
* offsets and maps, also pushing them as "free" states.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
anv_state_pool_return_blocks(struct anv_state_pool *pool,
|
|
|
|
|
uint32_t chunk_offset, uint32_t count,
|
|
|
|
|
uint32_t block_size)
|
|
|
|
|
{
|
2018-12-06 16:12:40 -08:00
|
|
|
/* Disallow returning 0 chunks */
|
|
|
|
|
assert(count != 0);
|
2018-12-11 15:37:15 -08:00
|
|
|
|
|
|
|
|
/* Make sure we always return chunks aligned to the block_size */
|
|
|
|
|
assert(chunk_offset % block_size == 0);
|
|
|
|
|
|
|
|
|
|
uint32_t st_idx;
|
2019-02-14 14:37:51 -06:00
|
|
|
UNUSED VkResult result = anv_state_table_add(&pool->table, &st_idx, count);
|
2018-12-11 15:37:15 -08:00
|
|
|
assert(result == VK_SUCCESS);
|
|
|
|
|
for (int i = 0; i < count; i++) {
|
|
|
|
|
/* update states that were added back to the state table */
|
|
|
|
|
struct anv_state *state_i = anv_state_table_get(&pool->table,
|
|
|
|
|
st_idx + i);
|
|
|
|
|
state_i->alloc_size = block_size;
|
|
|
|
|
state_i->offset = chunk_offset + block_size * i;
|
|
|
|
|
state_i->map = anv_block_pool_map(&pool->block_pool, state_i->offset);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t block_bucket = anv_state_pool_get_bucket(block_size);
|
2018-12-20 10:00:41 -08:00
|
|
|
anv_free_list_push(&pool->buckets[block_bucket].free_list,
|
|
|
|
|
&pool->table, st_idx, count);
|
2018-12-11 15:37:15 -08:00
|
|
|
}
|
|
|
|
|
|
2018-12-06 16:12:40 -08:00
|
|
|
/** Returns a chunk of memory back to the state pool.
|
|
|
|
|
*
|
|
|
|
|
* Do a two-level split. If chunk_size is bigger than divisor
|
|
|
|
|
* (pool->block_size), we return as many divisor sized blocks as we can, from
|
|
|
|
|
* the end of the chunk.
|
|
|
|
|
*
|
|
|
|
|
* The remaining is then split into smaller blocks (starting at small_size if
|
|
|
|
|
* it is non-zero), with larger blocks always being taken from the end of the
|
|
|
|
|
* chunk.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
anv_state_pool_return_chunk(struct anv_state_pool *pool,
|
|
|
|
|
uint32_t chunk_offset, uint32_t chunk_size,
|
|
|
|
|
uint32_t small_size)
|
|
|
|
|
{
|
|
|
|
|
uint32_t divisor = pool->block_size;
|
|
|
|
|
uint32_t nblocks = chunk_size / divisor;
|
|
|
|
|
uint32_t rest = chunk_size - nblocks * divisor;
|
|
|
|
|
|
|
|
|
|
if (nblocks > 0) {
|
|
|
|
|
/* First return divisor aligned and sized chunks. We start returning
|
|
|
|
|
* larger blocks from the end fo the chunk, since they should already be
|
|
|
|
|
* aligned to divisor. Also anv_state_pool_return_blocks() only accepts
|
|
|
|
|
* aligned chunks.
|
|
|
|
|
*/
|
|
|
|
|
uint32_t offset = chunk_offset + rest;
|
|
|
|
|
anv_state_pool_return_blocks(pool, offset, nblocks, divisor);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
chunk_size = rest;
|
|
|
|
|
divisor /= 2;
|
|
|
|
|
|
|
|
|
|
if (small_size > 0 && small_size < divisor)
|
|
|
|
|
divisor = small_size;
|
|
|
|
|
|
|
|
|
|
uint32_t min_size = 1 << ANV_MIN_STATE_SIZE_LOG2;
|
|
|
|
|
|
|
|
|
|
/* Just as before, return larger divisor aligned blocks from the end of the
|
|
|
|
|
* chunk first.
|
|
|
|
|
*/
|
|
|
|
|
while (chunk_size > 0 && divisor >= min_size) {
|
|
|
|
|
nblocks = chunk_size / divisor;
|
|
|
|
|
rest = chunk_size - nblocks * divisor;
|
|
|
|
|
if (nblocks > 0) {
|
|
|
|
|
anv_state_pool_return_blocks(pool, chunk_offset + rest,
|
|
|
|
|
nblocks, divisor);
|
|
|
|
|
chunk_size = rest;
|
|
|
|
|
}
|
|
|
|
|
divisor /= 2;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-23 19:15:42 -07:00
|
|
|
static struct anv_state
|
|
|
|
|
anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
|
|
|
|
|
uint32_t size, uint32_t align)
|
|
|
|
|
{
|
|
|
|
|
uint32_t bucket = anv_state_pool_get_bucket(MAX2(size, align));
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2018-11-29 10:49:31 -08:00
|
|
|
struct anv_state *state;
|
|
|
|
|
uint32_t alloc_size = anv_state_pool_get_bucket_size(bucket);
|
|
|
|
|
int32_t offset;
|
2017-04-23 18:48:21 -07:00
|
|
|
|
|
|
|
|
/* Try free list first. */
|
2018-12-20 10:00:41 -08:00
|
|
|
state = anv_free_list_pop(&pool->buckets[bucket].free_list,
|
|
|
|
|
&pool->table);
|
2018-11-29 10:49:31 -08:00
|
|
|
if (state) {
|
|
|
|
|
assert(state->offset >= 0);
|
2017-04-23 18:48:21 -07:00
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-24 01:51:51 -07:00
|
|
|
/* Try to grab a chunk from some larger bucket and split it up */
|
|
|
|
|
for (unsigned b = bucket + 1; b < ANV_STATE_BUCKETS; b++) {
|
2018-12-20 10:00:41 -08:00
|
|
|
state = anv_free_list_pop(&pool->buckets[b].free_list, &pool->table);
|
2018-11-29 10:49:31 -08:00
|
|
|
if (state) {
|
2017-04-24 01:51:51 -07:00
|
|
|
unsigned chunk_size = anv_state_pool_get_bucket_size(b);
|
2018-11-29 10:49:31 -08:00
|
|
|
int32_t chunk_offset = state->offset;
|
|
|
|
|
|
|
|
|
|
/* First lets update the state we got to its new size. offset and map
|
|
|
|
|
* remain the same.
|
|
|
|
|
*/
|
|
|
|
|
state->alloc_size = alloc_size;
|
2017-04-24 01:51:51 -07:00
|
|
|
|
2018-12-06 16:12:40 -08:00
|
|
|
/* Now return the unused part of the chunk back to the pool as free
|
|
|
|
|
* blocks
|
|
|
|
|
*
|
2017-04-24 01:51:51 -07:00
|
|
|
* There are a couple of options as to what we do with it:
|
|
|
|
|
*
|
|
|
|
|
* 1) We could fully split the chunk into state.alloc_size sized
|
|
|
|
|
* pieces. However, this would mean that allocating a 16B
|
|
|
|
|
* state could potentially split a 2MB chunk into 512K smaller
|
|
|
|
|
* chunks. This would lead to unnecessary fragmentation.
|
|
|
|
|
*
|
|
|
|
|
* 2) The classic "buddy allocator" method would have us split the
|
|
|
|
|
* chunk in half and return one half. Then we would split the
|
|
|
|
|
* remaining half in half and return one half, and repeat as
|
|
|
|
|
* needed until we get down to the size we want. However, if
|
|
|
|
|
* you are allocating a bunch of the same size state (which is
|
|
|
|
|
* the common case), this means that every other allocation has
|
|
|
|
|
* to go up a level and every fourth goes up two levels, etc.
|
|
|
|
|
* This is not nearly as efficient as it could be if we did a
|
|
|
|
|
* little more work up-front.
|
|
|
|
|
*
|
|
|
|
|
* 3) Split the difference between (1) and (2) by doing a
|
|
|
|
|
* two-level split. If it's bigger than some fixed block_size,
|
|
|
|
|
* we split it into block_size sized chunks and return all but
|
|
|
|
|
* one of them. Then we split what remains into
|
2018-12-06 16:12:40 -08:00
|
|
|
* state.alloc_size sized chunks and return them.
|
2017-04-24 01:51:51 -07:00
|
|
|
*
|
2018-12-06 16:12:40 -08:00
|
|
|
* We choose something close to option (3), which is implemented with
|
|
|
|
|
* anv_state_pool_return_chunk(). That is done by returning the
|
|
|
|
|
* remaining of the chunk, with alloc_size as a hint of the size that
|
|
|
|
|
* we want the smaller chunk split into.
|
2017-04-24 01:51:51 -07:00
|
|
|
*/
|
2018-12-06 16:12:40 -08:00
|
|
|
anv_state_pool_return_chunk(pool, chunk_offset + alloc_size,
|
|
|
|
|
chunk_size - alloc_size, alloc_size);
|
2017-04-24 01:51:51 -07:00
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
uint32_t padding;
|
2018-11-29 10:49:31 -08:00
|
|
|
offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
|
|
|
|
|
&pool->block_pool,
|
|
|
|
|
alloc_size,
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
pool->block_size,
|
|
|
|
|
&padding);
|
2018-11-29 10:49:31 -08:00
|
|
|
/* Everytime we allocate a new state, add it to the state pool */
|
|
|
|
|
uint32_t idx;
|
2019-02-14 14:37:51 -06:00
|
|
|
UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1);
|
2018-11-29 10:49:31 -08:00
|
|
|
assert(result == VK_SUCCESS);
|
|
|
|
|
|
|
|
|
|
state = anv_state_table_get(&pool->table, idx);
|
|
|
|
|
state->offset = offset;
|
|
|
|
|
state->alloc_size = alloc_size;
|
|
|
|
|
state->map = anv_block_pool_map(&pool->block_pool, offset);
|
2017-04-23 18:48:21 -07:00
|
|
|
|
anv/allocator: Add padding information.
It's possible that we still have some space left in the block pool, but
we try to allocate a state larger than that state. This means such state
would start somewhere within the range of the old block_pool, and end
after that range, within the range of the new size.
That's fine when we use userptr, since the memory in the block pool is
CPU mapped continuously. However, by the end of this series, we will
have the block_pool split into different BOs, with different CPU
mapping ranges that are not necessarily continuous. So we must avoid
such case of a given state being part of two different BOs in the block
pool.
This commit solves the issue by detecting that we are growing the
block_pool even though we are not at the end of the range. If that
happens, we don't use the space left at the end of the old size, and
consider it as "padding" that can't be used in the allocation. We update
the size requested from the block pool to take the padding into account,
and return the offset after the padding, which happens to be at the
start of the new address range.
Additionally, we return the amount of padding we used, so the caller
knows that this happens and can return that padding back into a list of
free states, that can be reused later. This way we hopefully don't waste
any space, but also avoid having a state split between two different
BOs.
v3:
- Calculate offset + padding at anv_block_pool_alloc_new (Jason).
v4:
- Remove extra "leftover".
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
2018-12-04 15:37:33 -08:00
|
|
|
if (padding > 0) {
|
|
|
|
|
uint32_t return_offset = offset - padding;
|
|
|
|
|
anv_state_pool_return_chunk(pool, return_offset, padding, 0);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-23 18:48:21 -07:00
|
|
|
done:
|
2018-11-29 10:49:31 -08:00
|
|
|
return *state;
|
2017-04-26 03:28:52 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct anv_state
|
2017-04-23 18:25:17 -07:00
|
|
|
anv_state_pool_alloc(struct anv_state_pool *pool, uint32_t size, uint32_t align)
|
2017-04-26 03:28:52 -07:00
|
|
|
{
|
2017-04-26 03:32:06 -07:00
|
|
|
if (size == 0)
|
|
|
|
|
return ANV_STATE_NULL;
|
|
|
|
|
|
2017-04-26 03:28:52 -07:00
|
|
|
struct anv_state state = anv_state_pool_alloc_no_vg(pool, size, align);
|
2015-07-31 10:36:51 -07:00
|
|
|
VG(VALGRIND_MEMPOOL_ALLOC(pool, state.map, size));
|
2015-05-08 22:32:37 -07:00
|
|
|
return state;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-24 02:17:43 -07:00
|
|
|
struct anv_state
|
|
|
|
|
anv_state_pool_alloc_back(struct anv_state_pool *pool)
|
|
|
|
|
{
|
2018-11-29 14:31:15 -08:00
|
|
|
struct anv_state *state;
|
|
|
|
|
uint32_t alloc_size = pool->block_size;
|
2017-04-24 02:17:43 -07:00
|
|
|
|
2018-12-20 10:00:41 -08:00
|
|
|
state = anv_free_list_pop(&pool->back_alloc_free_list, &pool->table);
|
2018-11-29 14:31:15 -08:00
|
|
|
if (state) {
|
|
|
|
|
assert(state->offset < 0);
|
2017-04-24 02:17:43 -07:00
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-29 14:31:15 -08:00
|
|
|
int32_t offset;
|
|
|
|
|
offset = anv_block_pool_alloc_back(&pool->block_pool,
|
|
|
|
|
pool->block_size);
|
|
|
|
|
uint32_t idx;
|
2019-02-14 14:37:51 -06:00
|
|
|
UNUSED VkResult result = anv_state_table_add(&pool->table, &idx, 1);
|
2018-11-29 14:31:15 -08:00
|
|
|
assert(result == VK_SUCCESS);
|
|
|
|
|
|
|
|
|
|
state = anv_state_table_get(&pool->table, idx);
|
|
|
|
|
state->offset = offset;
|
|
|
|
|
state->alloc_size = alloc_size;
|
2018-12-19 10:26:44 -08:00
|
|
|
state->map = anv_block_pool_map(&pool->block_pool, state->offset);
|
2017-04-24 02:17:43 -07:00
|
|
|
|
|
|
|
|
done:
|
2018-11-29 14:31:15 -08:00
|
|
|
VG(VALGRIND_MEMPOOL_ALLOC(pool, state->map, state->alloc_size));
|
|
|
|
|
return *state;
|
2017-04-24 02:17:43 -07:00
|
|
|
}
|
|
|
|
|
|
2017-04-26 03:28:52 -07:00
|
|
|
static void
|
|
|
|
|
anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2017-11-13 11:17:41 -08:00
|
|
|
assert(util_is_power_of_two_or_zero(state.alloc_size));
|
2017-04-23 19:15:42 -07:00
|
|
|
unsigned bucket = anv_state_pool_get_bucket(state.alloc_size);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2017-04-24 02:17:43 -07:00
|
|
|
if (state.offset < 0) {
|
|
|
|
|
assert(state.alloc_size == pool->block_size);
|
2018-12-20 10:00:41 -08:00
|
|
|
anv_free_list_push(&pool->back_alloc_free_list,
|
|
|
|
|
&pool->table, state.idx, 1);
|
2017-04-24 02:17:43 -07:00
|
|
|
} else {
|
2018-12-20 10:00:41 -08:00
|
|
|
anv_free_list_push(&pool->buckets[bucket].free_list,
|
|
|
|
|
&pool->table, state.idx, 1);
|
2017-04-24 02:17:43 -07:00
|
|
|
}
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2017-04-26 03:28:52 -07:00
|
|
|
void
|
|
|
|
|
anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state)
|
|
|
|
|
{
|
2017-04-26 03:32:06 -07:00
|
|
|
if (state.alloc_size == 0)
|
|
|
|
|
return;
|
|
|
|
|
|
2017-04-26 03:28:52 -07:00
|
|
|
VG(VALGRIND_MEMPOOL_FREE(pool, state.map));
|
|
|
|
|
anv_state_pool_free_no_vg(pool, state);
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-30 11:40:38 -08:00
|
|
|
struct anv_state_stream_block {
|
2017-04-23 17:22:26 -07:00
|
|
|
struct anv_state block;
|
|
|
|
|
|
2015-12-30 11:40:38 -08:00
|
|
|
/* The next block */
|
|
|
|
|
struct anv_state_stream_block *next;
|
2015-05-18 12:06:02 -07:00
|
|
|
|
|
|
|
|
#ifdef HAVE_VALGRIND
|
2015-12-30 11:40:38 -08:00
|
|
|
/* A pointer to the first user-allocated thing in this block. This is
|
|
|
|
|
* what valgrind sees as the start of the block.
|
2015-12-30 10:33:09 -08:00
|
|
|
*/
|
2015-05-18 12:06:02 -07:00
|
|
|
void *_vg_ptr;
|
|
|
|
|
#endif
|
2015-05-08 22:32:37 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* The state stream allocator is a one-shot, single threaded allocator for
|
|
|
|
|
* variable sized blocks. We use it for allocating dynamic state.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
anv_state_stream_init(struct anv_state_stream *stream,
|
2017-04-23 17:22:26 -07:00
|
|
|
struct anv_state_pool *state_pool,
|
|
|
|
|
uint32_t block_size)
|
2015-05-08 22:32:37 -07:00
|
|
|
{
|
2017-04-23 17:22:26 -07:00
|
|
|
stream->state_pool = state_pool;
|
|
|
|
|
stream->block_size = block_size;
|
|
|
|
|
|
|
|
|
|
stream->block = ANV_STATE_NULL;
|
|
|
|
|
|
|
|
|
|
stream->block_list = NULL;
|
2015-12-30 11:40:38 -08:00
|
|
|
|
2017-04-23 17:22:26 -07:00
|
|
|
/* Ensure that next + whatever > block_size. This way the first call to
|
2015-12-30 11:40:38 -08:00
|
|
|
* state_stream_alloc fetches a new block.
|
|
|
|
|
*/
|
2017-04-23 17:22:26 -07:00
|
|
|
stream->next = block_size;
|
2015-07-31 10:36:51 -07:00
|
|
|
|
|
|
|
|
VG(VALGRIND_CREATE_MEMPOOL(stream, 0, false));
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
anv_state_stream_finish(struct anv_state_stream *stream)
|
|
|
|
|
{
|
2017-04-23 17:22:26 -07:00
|
|
|
struct anv_state_stream_block *next = stream->block_list;
|
2015-12-30 11:40:38 -08:00
|
|
|
while (next != NULL) {
|
|
|
|
|
struct anv_state_stream_block sb = VG_NOACCESS_READ(next);
|
|
|
|
|
VG(VALGRIND_MEMPOOL_FREE(stream, sb._vg_ptr));
|
2017-04-23 17:22:26 -07:00
|
|
|
VG(VALGRIND_MAKE_MEM_UNDEFINED(next, stream->block_size));
|
|
|
|
|
anv_state_pool_free_no_vg(stream->state_pool, sb.block);
|
2015-12-30 11:40:38 -08:00
|
|
|
next = sb.next;
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
2015-07-31 10:36:51 -07:00
|
|
|
|
|
|
|
|
VG(VALGRIND_DESTROY_MEMPOOL(stream));
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct anv_state
|
|
|
|
|
anv_state_stream_alloc(struct anv_state_stream *stream,
|
|
|
|
|
uint32_t size, uint32_t alignment)
|
|
|
|
|
{
|
2017-04-26 03:32:06 -07:00
|
|
|
if (size == 0)
|
|
|
|
|
return ANV_STATE_NULL;
|
|
|
|
|
|
2017-04-23 17:22:26 -07:00
|
|
|
assert(alignment <= PAGE_SIZE);
|
2015-12-30 11:40:38 -08:00
|
|
|
|
2017-04-23 17:22:26 -07:00
|
|
|
uint32_t offset = align_u32(stream->next, alignment);
|
2017-04-26 09:54:41 -07:00
|
|
|
if (offset + size > stream->block.alloc_size) {
|
|
|
|
|
uint32_t block_size = stream->block_size;
|
|
|
|
|
if (block_size < size)
|
|
|
|
|
block_size = round_to_power_of_two(size);
|
|
|
|
|
|
2017-04-23 17:22:26 -07:00
|
|
|
stream->block = anv_state_pool_alloc_no_vg(stream->state_pool,
|
2017-04-26 09:54:41 -07:00
|
|
|
block_size, PAGE_SIZE);
|
2015-05-08 22:32:37 -07:00
|
|
|
|
2017-04-23 17:22:26 -07:00
|
|
|
struct anv_state_stream_block *sb = stream->block.map;
|
|
|
|
|
VG_NOACCESS_WRITE(&sb->block, stream->block);
|
|
|
|
|
VG_NOACCESS_WRITE(&sb->next, stream->block_list);
|
|
|
|
|
stream->block_list = sb;
|
2017-05-05 12:11:42 -07:00
|
|
|
VG(VG_NOACCESS_WRITE(&sb->_vg_ptr, NULL));
|
2015-12-30 10:33:09 -08:00
|
|
|
|
2017-04-23 17:22:26 -07:00
|
|
|
VG(VALGRIND_MAKE_MEM_NOACCESS(stream->block.map, stream->block_size));
|
2015-12-30 10:33:09 -08:00
|
|
|
|
2017-04-23 17:22:26 -07:00
|
|
|
/* Reset back to the start plus space for the header */
|
|
|
|
|
stream->next = sizeof(*sb);
|
2015-12-30 11:40:38 -08:00
|
|
|
|
2017-04-23 17:22:26 -07:00
|
|
|
offset = align_u32(stream->next, alignment);
|
2017-04-26 09:54:41 -07:00
|
|
|
assert(offset + size <= stream->block.alloc_size);
|
2015-05-08 22:32:37 -07:00
|
|
|
}
|
|
|
|
|
|
2017-04-23 17:22:26 -07:00
|
|
|
struct anv_state state = stream->block;
|
|
|
|
|
state.offset += offset;
|
2015-05-08 22:32:37 -07:00
|
|
|
state.alloc_size = size;
|
2017-04-23 17:22:26 -07:00
|
|
|
state.map += offset;
|
|
|
|
|
|
|
|
|
|
stream->next = offset + size;
|
2015-05-18 12:06:02 -07:00
|
|
|
|
|
|
|
|
#ifdef HAVE_VALGRIND
|
2017-04-23 17:22:26 -07:00
|
|
|
struct anv_state_stream_block *sb = stream->block_list;
|
2015-05-18 12:06:02 -07:00
|
|
|
void *vg_ptr = VG_NOACCESS_READ(&sb->_vg_ptr);
|
|
|
|
|
if (vg_ptr == NULL) {
|
|
|
|
|
vg_ptr = state.map;
|
|
|
|
|
VG_NOACCESS_WRITE(&sb->_vg_ptr, vg_ptr);
|
2015-07-31 10:36:51 -07:00
|
|
|
VALGRIND_MEMPOOL_ALLOC(stream, vg_ptr, size);
|
2015-05-18 12:06:02 -07:00
|
|
|
} else {
|
2015-12-30 11:40:38 -08:00
|
|
|
void *state_end = state.map + state.alloc_size;
|
2015-12-30 10:33:09 -08:00
|
|
|
/* This only updates the mempool. The newly allocated chunk is still
|
|
|
|
|
* marked as NOACCESS. */
|
2015-12-30 11:40:38 -08:00
|
|
|
VALGRIND_MEMPOOL_CHANGE(stream, vg_ptr, vg_ptr, state_end - vg_ptr);
|
2015-12-30 10:33:09 -08:00
|
|
|
/* Mark the newly allocated chunk as undefined */
|
|
|
|
|
VALGRIND_MAKE_MEM_UNDEFINED(state.map, state.alloc_size);
|
2015-05-18 12:06:02 -07:00
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2015-05-08 22:32:37 -07:00
|
|
|
return state;
|
|
|
|
|
}
|
2015-05-25 15:29:27 -07:00
|
|
|
|
|
|
|
|
struct bo_pool_bo_link {
|
|
|
|
|
struct bo_pool_bo_link *next;
|
|
|
|
|
struct anv_bo bo;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
void
|
2017-11-17 17:26:59 +00:00
|
|
|
anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device,
|
|
|
|
|
uint64_t bo_flags)
|
2015-05-25 15:29:27 -07:00
|
|
|
{
|
|
|
|
|
pool->device = device;
|
2017-11-17 17:26:59 +00:00
|
|
|
pool->bo_flags = bo_flags;
|
2016-03-18 13:06:08 -07:00
|
|
|
memset(pool->free_list, 0, sizeof(pool->free_list));
|
2015-07-31 10:36:51 -07:00
|
|
|
|
|
|
|
|
VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
|
2015-05-25 15:29:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
anv_bo_pool_finish(struct anv_bo_pool *pool)
|
|
|
|
|
{
|
2016-03-18 13:06:08 -07:00
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(pool->free_list); i++) {
|
|
|
|
|
struct bo_pool_bo_link *link = PFL_PTR(pool->free_list[i]);
|
|
|
|
|
while (link != NULL) {
|
|
|
|
|
struct bo_pool_bo_link link_copy = VG_NOACCESS_READ(link);
|
|
|
|
|
|
|
|
|
|
anv_gem_munmap(link_copy.bo.map, link_copy.bo.size);
|
2018-05-30 20:16:04 -07:00
|
|
|
anv_vma_free(pool->device, &link_copy.bo);
|
2016-03-18 13:06:08 -07:00
|
|
|
anv_gem_close(pool->device, link_copy.bo.gem_handle);
|
|
|
|
|
link = link_copy.next;
|
|
|
|
|
}
|
2015-05-25 15:29:27 -07:00
|
|
|
}
|
2015-07-31 10:36:51 -07:00
|
|
|
|
|
|
|
|
VG(VALGRIND_DESTROY_MEMPOOL(pool));
|
2015-05-25 15:29:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkResult
|
2016-03-18 11:50:53 -07:00
|
|
|
anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo, uint32_t size)
|
2015-05-25 15:29:27 -07:00
|
|
|
{
|
|
|
|
|
VkResult result;
|
|
|
|
|
|
2016-03-18 13:06:08 -07:00
|
|
|
const unsigned size_log2 = size < 4096 ? 12 : ilog2_round_up(size);
|
|
|
|
|
const unsigned pow2_size = 1 << size_log2;
|
|
|
|
|
const unsigned bucket = size_log2 - 12;
|
|
|
|
|
assert(bucket < ARRAY_SIZE(pool->free_list));
|
2016-03-18 11:50:53 -07:00
|
|
|
|
2015-05-25 15:29:27 -07:00
|
|
|
void *next_free_void;
|
2016-03-18 13:06:08 -07:00
|
|
|
if (anv_ptr_free_list_pop(&pool->free_list[bucket], &next_free_void)) {
|
2015-05-25 15:29:27 -07:00
|
|
|
struct bo_pool_bo_link *next_free = next_free_void;
|
|
|
|
|
*bo = VG_NOACCESS_READ(&next_free->bo);
|
2016-10-26 22:57:19 -07:00
|
|
|
assert(bo->gem_handle);
|
2015-05-25 15:29:27 -07:00
|
|
|
assert(bo->map == next_free);
|
2016-03-18 13:06:08 -07:00
|
|
|
assert(size <= bo->size);
|
2015-05-25 15:29:27 -07:00
|
|
|
|
2016-03-18 13:06:08 -07:00
|
|
|
VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size));
|
2015-05-25 15:29:27 -07:00
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct anv_bo new_bo;
|
|
|
|
|
|
2016-03-18 13:06:08 -07:00
|
|
|
result = anv_bo_init_new(&new_bo, pool->device, pow2_size);
|
2015-05-25 15:29:27 -07:00
|
|
|
if (result != VK_SUCCESS)
|
|
|
|
|
return result;
|
|
|
|
|
|
2017-11-17 17:26:59 +00:00
|
|
|
new_bo.flags = pool->bo_flags;
|
2017-05-17 11:31:02 -07:00
|
|
|
|
2018-05-30 20:16:04 -07:00
|
|
|
if (!anv_vma_alloc(pool->device, &new_bo))
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
|
|
|
|
|
2016-03-18 13:06:08 -07:00
|
|
|
assert(new_bo.size == pow2_size);
|
2015-05-25 15:29:27 -07:00
|
|
|
|
2016-03-18 13:06:08 -07:00
|
|
|
new_bo.map = anv_gem_mmap(pool->device, new_bo.gem_handle, 0, pow2_size, 0);
|
2017-05-03 08:02:21 +02:00
|
|
|
if (new_bo.map == MAP_FAILED) {
|
2015-05-25 15:29:27 -07:00
|
|
|
anv_gem_close(pool->device, new_bo.gem_handle);
|
2018-05-30 20:16:04 -07:00
|
|
|
anv_vma_free(pool->device, &new_bo);
|
2015-05-25 15:29:27 -07:00
|
|
|
return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-15 13:57:00 -08:00
|
|
|
/* We are removing the state flushes, so lets make sure that these buffers
|
|
|
|
|
* are cached/snooped.
|
|
|
|
|
*/
|
|
|
|
|
if (!pool->device->info.has_llc) {
|
|
|
|
|
anv_gem_set_caching(pool->device, new_bo.gem_handle,
|
|
|
|
|
I915_CACHING_CACHED);
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-25 15:29:27 -07:00
|
|
|
*bo = new_bo;
|
2015-07-31 10:36:51 -07:00
|
|
|
|
2016-03-18 13:06:08 -07:00
|
|
|
VG(VALGRIND_MEMPOOL_ALLOC(pool, bo->map, size));
|
2015-07-31 10:36:51 -07:00
|
|
|
|
2015-05-25 15:29:27 -07:00
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
2016-03-07 14:56:58 -08:00
|
|
|
anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo_in)
|
2015-05-25 15:29:27 -07:00
|
|
|
{
|
2016-03-07 14:56:58 -08:00
|
|
|
/* Make a copy in case the anv_bo happens to be storred in the BO */
|
|
|
|
|
struct anv_bo bo = *bo_in;
|
2016-09-07 21:33:48 -07:00
|
|
|
|
|
|
|
|
VG(VALGRIND_MEMPOOL_FREE(pool, bo.map));
|
|
|
|
|
|
2016-03-07 14:56:58 -08:00
|
|
|
struct bo_pool_bo_link *link = bo.map;
|
2016-09-07 21:33:48 -07:00
|
|
|
VG_NOACCESS_WRITE(&link->bo, bo);
|
2015-05-25 15:29:27 -07:00
|
|
|
|
2017-11-13 11:17:41 -08:00
|
|
|
assert(util_is_power_of_two_or_zero(bo.size));
|
2016-03-18 13:06:08 -07:00
|
|
|
const unsigned size_log2 = ilog2_round_up(bo.size);
|
|
|
|
|
const unsigned bucket = size_log2 - 12;
|
|
|
|
|
assert(bucket < ARRAY_SIZE(pool->free_list));
|
|
|
|
|
|
|
|
|
|
anv_ptr_free_list_push(&pool->free_list[bucket], link);
|
2015-05-25 15:29:27 -07:00
|
|
|
}
|
2016-06-16 14:43:41 -07:00
|
|
|
|
|
|
|
|
// Scratch pool
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
anv_scratch_pool_init(struct anv_device *device, struct anv_scratch_pool *pool)
|
|
|
|
|
{
|
|
|
|
|
memset(pool, 0, sizeof(*pool));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
anv_scratch_pool_finish(struct anv_device *device, struct anv_scratch_pool *pool)
|
|
|
|
|
{
|
|
|
|
|
for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
|
|
|
|
|
for (unsigned i = 0; i < 16; i++) {
|
2016-11-01 13:10:11 -07:00
|
|
|
struct anv_scratch_bo *bo = &pool->bos[i][s];
|
2018-05-30 20:16:30 -07:00
|
|
|
if (bo->exists > 0) {
|
|
|
|
|
anv_vma_free(device, &bo->bo);
|
2016-11-01 13:10:11 -07:00
|
|
|
anv_gem_close(device, bo->bo.gem_handle);
|
2018-05-30 20:16:30 -07:00
|
|
|
}
|
2016-06-16 14:43:41 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct anv_bo *
|
|
|
|
|
anv_scratch_pool_alloc(struct anv_device *device, struct anv_scratch_pool *pool,
|
|
|
|
|
gl_shader_stage stage, unsigned per_thread_scratch)
|
|
|
|
|
{
|
|
|
|
|
if (per_thread_scratch == 0)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
unsigned scratch_size_log2 = ffs(per_thread_scratch / 2048);
|
|
|
|
|
assert(scratch_size_log2 < 16);
|
|
|
|
|
|
2016-11-01 13:10:11 -07:00
|
|
|
struct anv_scratch_bo *bo = &pool->bos[scratch_size_log2][stage];
|
2016-06-16 14:43:41 -07:00
|
|
|
|
2016-11-01 13:10:11 -07:00
|
|
|
/* We can use "exists" to shortcut and ignore the critical section */
|
|
|
|
|
if (bo->exists)
|
|
|
|
|
return &bo->bo;
|
2016-06-16 14:43:41 -07:00
|
|
|
|
2016-11-01 13:10:11 -07:00
|
|
|
pthread_mutex_lock(&device->mutex);
|
|
|
|
|
|
|
|
|
|
__sync_synchronize();
|
2018-01-04 11:28:46 +00:00
|
|
|
if (bo->exists) {
|
|
|
|
|
pthread_mutex_unlock(&device->mutex);
|
2016-11-01 13:10:11 -07:00
|
|
|
return &bo->bo;
|
2018-01-04 11:28:46 +00:00
|
|
|
}
|
2016-09-07 17:32:49 +01:00
|
|
|
|
2016-11-01 13:10:11 -07:00
|
|
|
const struct anv_physical_device *physical_device =
|
|
|
|
|
&device->instance->physicalDevice;
|
|
|
|
|
const struct gen_device_info *devinfo = &physical_device->info;
|
|
|
|
|
|
|
|
|
|
const unsigned subslices = MAX2(physical_device->subslice_total, 1);
|
2018-03-06 23:28:00 -08:00
|
|
|
|
|
|
|
|
unsigned scratch_ids_per_subslice;
|
|
|
|
|
if (devinfo->is_haswell) {
|
|
|
|
|
/* WaCSScratchSize:hsw
|
|
|
|
|
*
|
|
|
|
|
* Haswell's scratch space address calculation appears to be sparse
|
|
|
|
|
* rather than tightly packed. The Thread ID has bits indicating
|
|
|
|
|
* which subslice, EU within a subslice, and thread within an EU it
|
|
|
|
|
* is. There's a maximum of two slices and two subslices, so these
|
|
|
|
|
* can be stored with a single bit. Even though there are only 10 EUs
|
|
|
|
|
* per subslice, this is stored in 4 bits, so there's an effective
|
|
|
|
|
* maximum value of 16 EUs. Similarly, although there are only 7
|
|
|
|
|
* threads per EU, this is stored in a 3 bit number, giving an
|
|
|
|
|
* effective maximum value of 8 threads per EU.
|
|
|
|
|
*
|
|
|
|
|
* This means that we need to use 16 * 8 instead of 10 * 7 for the
|
|
|
|
|
* number of threads per subslice.
|
|
|
|
|
*/
|
|
|
|
|
scratch_ids_per_subslice = 16 * 8;
|
|
|
|
|
} else if (devinfo->is_cherryview) {
|
|
|
|
|
/* Cherryview devices have either 6 or 8 EUs per subslice, and each EU
|
|
|
|
|
* has 7 threads. The 6 EU devices appear to calculate thread IDs as if
|
|
|
|
|
* it had 8 EUs.
|
|
|
|
|
*/
|
|
|
|
|
scratch_ids_per_subslice = 8 * 7;
|
|
|
|
|
} else {
|
|
|
|
|
scratch_ids_per_subslice = devinfo->max_cs_threads;
|
|
|
|
|
}
|
2016-06-16 14:43:41 -07:00
|
|
|
|
2016-11-01 13:10:11 -07:00
|
|
|
uint32_t max_threads[] = {
|
|
|
|
|
[MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
|
|
|
|
|
[MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
|
|
|
|
|
[MESA_SHADER_TESS_EVAL] = devinfo->max_tes_threads,
|
|
|
|
|
[MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
|
|
|
|
|
[MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
|
|
|
|
|
[MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslices,
|
|
|
|
|
};
|
2016-06-16 14:43:41 -07:00
|
|
|
|
2016-11-01 13:10:11 -07:00
|
|
|
uint32_t size = per_thread_scratch * max_threads[stage];
|
2016-06-16 14:43:41 -07:00
|
|
|
|
2016-11-01 13:10:11 -07:00
|
|
|
anv_bo_init_new(&bo->bo, device, size);
|
2016-06-16 14:43:41 -07:00
|
|
|
|
2017-04-22 15:51:01 -07:00
|
|
|
/* Even though the Scratch base pointers in 3DSTATE_*S are 64 bits, they
|
|
|
|
|
* are still relative to the general state base address. When we emit
|
|
|
|
|
* STATE_BASE_ADDRESS, we set general state base address to 0 and the size
|
|
|
|
|
* to the maximum (1 page under 4GB). This allows us to just place the
|
|
|
|
|
* scratch buffers anywhere we wish in the bottom 32 bits of address space
|
|
|
|
|
* and just set the scratch base pointer in 3DSTATE_*S using a relocation.
|
|
|
|
|
* However, in order to do so, we need to ensure that the kernel does not
|
|
|
|
|
* place the scratch BO above the 32-bit boundary.
|
|
|
|
|
*
|
|
|
|
|
* NOTE: Technically, it can't go "anywhere" because the top page is off
|
|
|
|
|
* limits. However, when EXEC_OBJECT_SUPPORTS_48B_ADDRESS is set, the
|
|
|
|
|
* kernel allocates space using
|
|
|
|
|
*
|
|
|
|
|
* end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
|
|
|
|
|
*
|
|
|
|
|
* so nothing will ever touch the top page.
|
|
|
|
|
*/
|
2017-05-17 11:31:02 -07:00
|
|
|
assert(!(bo->bo.flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS));
|
|
|
|
|
|
|
|
|
|
if (device->instance->physicalDevice.has_exec_async)
|
|
|
|
|
bo->bo.flags |= EXEC_OBJECT_ASYNC;
|
2017-04-22 15:51:01 -07:00
|
|
|
|
2018-05-30 20:16:30 -07:00
|
|
|
if (device->instance->physicalDevice.use_softpin)
|
|
|
|
|
bo->bo.flags |= EXEC_OBJECT_PINNED;
|
|
|
|
|
|
|
|
|
|
anv_vma_alloc(device, &bo->bo);
|
|
|
|
|
|
2016-11-01 13:10:11 -07:00
|
|
|
/* Set the exists last because it may be read by other threads */
|
|
|
|
|
__sync_synchronize();
|
|
|
|
|
bo->exists = true;
|
2016-06-16 14:43:41 -07:00
|
|
|
|
2016-11-01 13:10:11 -07:00
|
|
|
pthread_mutex_unlock(&device->mutex);
|
2016-06-16 14:43:41 -07:00
|
|
|
|
2016-11-01 13:10:11 -07:00
|
|
|
return &bo->bo;
|
2016-06-16 14:43:41 -07:00
|
|
|
}
|
2017-03-13 16:18:54 -07:00
|
|
|
|
|
|
|
|
struct anv_cached_bo {
|
|
|
|
|
struct anv_bo bo;
|
|
|
|
|
|
|
|
|
|
uint32_t refcount;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
|
anv_bo_cache_init(struct anv_bo_cache *cache)
|
|
|
|
|
{
|
2019-01-11 12:01:30 -08:00
|
|
|
cache->bo_map = _mesa_pointer_hash_table_create(NULL);
|
2017-03-13 16:18:54 -07:00
|
|
|
if (!cache->bo_map)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
|
|
if (pthread_mutex_init(&cache->mutex, NULL)) {
|
|
|
|
|
_mesa_hash_table_destroy(cache->bo_map, NULL);
|
2017-08-25 09:55:39 +03:00
|
|
|
return vk_errorf(NULL, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
|
2017-03-13 16:18:54 -07:00
|
|
|
"pthread_mutex_init failed: %m");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
anv_bo_cache_finish(struct anv_bo_cache *cache)
|
|
|
|
|
{
|
|
|
|
|
_mesa_hash_table_destroy(cache->bo_map, NULL);
|
|
|
|
|
pthread_mutex_destroy(&cache->mutex);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct anv_cached_bo *
|
|
|
|
|
anv_bo_cache_lookup_locked(struct anv_bo_cache *cache, uint32_t gem_handle)
|
|
|
|
|
{
|
|
|
|
|
struct hash_entry *entry =
|
|
|
|
|
_mesa_hash_table_search(cache->bo_map,
|
|
|
|
|
(const void *)(uintptr_t)gem_handle);
|
|
|
|
|
if (!entry)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
struct anv_cached_bo *bo = (struct anv_cached_bo *)entry->data;
|
|
|
|
|
assert(bo->bo.gem_handle == gem_handle);
|
|
|
|
|
|
|
|
|
|
return bo;
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-25 20:40:01 -07:00
|
|
|
UNUSED static struct anv_bo *
|
2017-03-13 16:18:54 -07:00
|
|
|
anv_bo_cache_lookup(struct anv_bo_cache *cache, uint32_t gem_handle)
|
|
|
|
|
{
|
|
|
|
|
pthread_mutex_lock(&cache->mutex);
|
|
|
|
|
|
|
|
|
|
struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
|
|
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
|
|
|
|
|
return bo ? &bo->bo : NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-30 15:34:25 -07:00
|
|
|
#define ANV_BO_CACHE_SUPPORTED_FLAGS \
|
|
|
|
|
(EXEC_OBJECT_WRITE | \
|
|
|
|
|
EXEC_OBJECT_ASYNC | \
|
2018-05-30 15:25:04 -07:00
|
|
|
EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
|
2018-07-09 14:21:33 -07:00
|
|
|
EXEC_OBJECT_PINNED | \
|
|
|
|
|
ANV_BO_EXTERNAL)
|
2018-05-30 15:34:25 -07:00
|
|
|
|
2017-03-13 16:18:54 -07:00
|
|
|
VkResult
|
|
|
|
|
anv_bo_cache_alloc(struct anv_device *device,
|
|
|
|
|
struct anv_bo_cache *cache,
|
2018-05-30 15:34:25 -07:00
|
|
|
uint64_t size, uint64_t bo_flags,
|
|
|
|
|
struct anv_bo **bo_out)
|
2017-03-13 16:18:54 -07:00
|
|
|
{
|
2018-05-30 15:34:25 -07:00
|
|
|
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
|
|
|
|
|
|
2017-03-13 16:18:54 -07:00
|
|
|
struct anv_cached_bo *bo =
|
|
|
|
|
vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
|
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
|
if (!bo)
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
|
|
|
|
|
bo->refcount = 1;
|
|
|
|
|
|
|
|
|
|
/* The kernel is going to give us whole pages anyway */
|
|
|
|
|
size = align_u64(size, 4096);
|
|
|
|
|
|
|
|
|
|
VkResult result = anv_bo_init_new(&bo->bo, device, size);
|
|
|
|
|
if (result != VK_SUCCESS) {
|
|
|
|
|
vk_free(&device->alloc, bo);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-30 15:34:25 -07:00
|
|
|
bo->bo.flags = bo_flags;
|
|
|
|
|
|
2018-05-30 15:25:04 -07:00
|
|
|
if (!anv_vma_alloc(device, &bo->bo)) {
|
|
|
|
|
anv_gem_close(device, bo->bo.gem_handle);
|
|
|
|
|
vk_free(&device->alloc, bo);
|
|
|
|
|
return vk_errorf(device->instance, NULL,
|
|
|
|
|
VK_ERROR_OUT_OF_DEVICE_MEMORY,
|
|
|
|
|
"failed to allocate virtual address for BO");
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-13 16:18:54 -07:00
|
|
|
assert(bo->bo.gem_handle);
|
|
|
|
|
|
|
|
|
|
pthread_mutex_lock(&cache->mutex);
|
|
|
|
|
|
|
|
|
|
_mesa_hash_table_insert(cache->bo_map,
|
|
|
|
|
(void *)(uintptr_t)bo->bo.gem_handle, bo);
|
|
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
|
|
|
|
|
*bo_out = &bo->bo;
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-01 13:15:31 -08:00
|
|
|
VkResult
|
|
|
|
|
anv_bo_cache_import_host_ptr(struct anv_device *device,
|
|
|
|
|
struct anv_bo_cache *cache,
|
|
|
|
|
void *host_ptr, uint32_t size,
|
|
|
|
|
uint64_t bo_flags, struct anv_bo **bo_out)
|
|
|
|
|
{
|
|
|
|
|
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
|
|
|
|
|
assert((bo_flags & ANV_BO_EXTERNAL) == 0);
|
|
|
|
|
|
|
|
|
|
uint32_t gem_handle = anv_gem_userptr(device, host_ptr, size);
|
|
|
|
|
if (!gem_handle)
|
|
|
|
|
return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
|
|
|
|
|
|
|
|
|
|
pthread_mutex_lock(&cache->mutex);
|
|
|
|
|
|
|
|
|
|
struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
|
|
|
|
|
if (bo) {
|
|
|
|
|
/* VK_EXT_external_memory_host doesn't require handling importing the
|
|
|
|
|
* same pointer twice at the same time, but we don't get in the way. If
|
|
|
|
|
* kernel gives us the same gem_handle, only succeed if the flags match.
|
|
|
|
|
*/
|
|
|
|
|
if (bo_flags != bo->bo.flags) {
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
return vk_errorf(device->instance, NULL,
|
|
|
|
|
VK_ERROR_INVALID_EXTERNAL_HANDLE,
|
|
|
|
|
"same host pointer imported two different ways");
|
|
|
|
|
}
|
|
|
|
|
__sync_fetch_and_add(&bo->refcount, 1);
|
|
|
|
|
} else {
|
|
|
|
|
bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
|
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
|
if (!bo) {
|
|
|
|
|
anv_gem_close(device, gem_handle);
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bo->refcount = 1;
|
|
|
|
|
|
|
|
|
|
anv_bo_init(&bo->bo, gem_handle, size);
|
|
|
|
|
bo->bo.flags = bo_flags;
|
|
|
|
|
|
|
|
|
|
if (!anv_vma_alloc(device, &bo->bo)) {
|
|
|
|
|
anv_gem_close(device, bo->bo.gem_handle);
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
vk_free(&device->alloc, bo);
|
|
|
|
|
return vk_errorf(device->instance, NULL,
|
|
|
|
|
VK_ERROR_OUT_OF_DEVICE_MEMORY,
|
|
|
|
|
"failed to allocate virtual address for BO");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
*bo_out = &bo->bo;
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-13 16:18:54 -07:00
|
|
|
VkResult
|
|
|
|
|
anv_bo_cache_import(struct anv_device *device,
|
|
|
|
|
struct anv_bo_cache *cache,
|
2018-05-30 15:34:25 -07:00
|
|
|
int fd, uint64_t bo_flags,
|
|
|
|
|
struct anv_bo **bo_out)
|
2017-03-13 16:18:54 -07:00
|
|
|
{
|
2018-05-30 15:34:25 -07:00
|
|
|
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
|
2018-07-09 14:21:33 -07:00
|
|
|
assert(bo_flags & ANV_BO_EXTERNAL);
|
2018-05-30 15:34:25 -07:00
|
|
|
|
2017-03-13 16:18:54 -07:00
|
|
|
pthread_mutex_lock(&cache->mutex);
|
|
|
|
|
|
|
|
|
|
uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
|
|
|
|
|
if (!gem_handle) {
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
2019-01-08 18:04:54 +00:00
|
|
|
return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
|
2017-03-13 16:18:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct anv_cached_bo *bo = anv_bo_cache_lookup_locked(cache, gem_handle);
|
|
|
|
|
if (bo) {
|
2018-05-30 15:34:25 -07:00
|
|
|
/* We have to be careful how we combine flags so that it makes sense.
|
|
|
|
|
* Really, though, if we get to this case and it actually matters, the
|
|
|
|
|
* client has imported a BO twice in different ways and they get what
|
|
|
|
|
* they have coming.
|
|
|
|
|
*/
|
2018-07-09 14:21:33 -07:00
|
|
|
uint64_t new_flags = ANV_BO_EXTERNAL;
|
2018-05-30 15:34:25 -07:00
|
|
|
new_flags |= (bo->bo.flags | bo_flags) & EXEC_OBJECT_WRITE;
|
|
|
|
|
new_flags |= (bo->bo.flags & bo_flags) & EXEC_OBJECT_ASYNC;
|
|
|
|
|
new_flags |= (bo->bo.flags & bo_flags) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
|
2018-05-30 15:25:04 -07:00
|
|
|
new_flags |= (bo->bo.flags | bo_flags) & EXEC_OBJECT_PINNED;
|
|
|
|
|
|
|
|
|
|
/* It's theoretically possible for a BO to get imported such that it's
|
|
|
|
|
* both pinned and not pinned. The only way this can happen is if it
|
|
|
|
|
* gets imported as both a semaphore and a memory object and that would
|
|
|
|
|
* be an application error. Just fail out in that case.
|
|
|
|
|
*/
|
|
|
|
|
if ((bo->bo.flags & EXEC_OBJECT_PINNED) !=
|
|
|
|
|
(bo_flags & EXEC_OBJECT_PINNED)) {
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
return vk_errorf(device->instance, NULL,
|
|
|
|
|
VK_ERROR_INVALID_EXTERNAL_HANDLE,
|
|
|
|
|
"The same BO was imported two different ways");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* It's also theoretically possible that someone could export a BO from
|
|
|
|
|
* one heap and import it into another or to import the same BO into two
|
|
|
|
|
* different heaps. If this happens, we could potentially end up both
|
|
|
|
|
* allowing and disallowing 48-bit addresses. There's not much we can
|
|
|
|
|
* do about it if we're pinning so we just throw an error and hope no
|
|
|
|
|
* app is actually that stupid.
|
|
|
|
|
*/
|
|
|
|
|
if ((new_flags & EXEC_OBJECT_PINNED) &&
|
|
|
|
|
(bo->bo.flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
|
|
|
|
|
(bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) {
|
2018-10-05 09:56:19 +10:00
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
2018-05-30 15:25:04 -07:00
|
|
|
return vk_errorf(device->instance, NULL,
|
|
|
|
|
VK_ERROR_INVALID_EXTERNAL_HANDLE,
|
|
|
|
|
"The same BO was imported on two different heaps");
|
|
|
|
|
}
|
2018-05-30 15:34:25 -07:00
|
|
|
|
|
|
|
|
bo->bo.flags = new_flags;
|
|
|
|
|
|
2017-03-13 16:18:54 -07:00
|
|
|
__sync_fetch_and_add(&bo->refcount, 1);
|
|
|
|
|
} else {
|
anv: Move size check from anv_bo_cache_import() to caller (v2)
This change prepares for VK_ANDROID_native_buffer. When the user imports
a gralloc hande into a VkImage using VK_ANDROID_native_buffer, the user
provides no size. The driver must infer the size from the internals of
the gralloc buffer.
The patch is essentially a refactor patch, but it does change behavior
in some edge cases, described below. In what follows, the "nominal size"
of the bo refers to anv_bo::size, which may not match the bo's "actual
size" according to the kernel.
Post-patch, the nominal size of the bo returned from
anv_bo_cache_import() is always the size of imported dma-buf according
to lseek(). Pre-patch, the bo's nominal size was difficult to predict.
If the imported dma-buf's gem handle was not resident in the cache, then
the bo's nominal size was align(VkMemoryAllocateInfo::allocationSize,
4096). If it *was* resident, then the bo's nominal size was whatever
the cache returned. As a consequence, the first cache insert decided the
bo's nominal size, which could be significantly smaller compared to the
dma-buf's actual size, as the nominal size was determined by
VkMemoryAllocationInfo::allocationSize and not lseek().
I believe this patch cleans up that messy behavior. For an imported or
exported VkDeviceMemory, anv_bo::size should now be the true size of the
bo, if I correctly understand the problem (which I possibly don't).
v2:
- Preserve behavior of aligning size to 4096 before checking. [for
jekstrand]
- Check size with < instead of <=, to match behavior of commit c0a4f56
"anv: bo_cache: allow importing a BO larger than needed". [for
chadv]
2017-09-12 14:05:08 -07:00
|
|
|
off_t size = lseek(fd, 0, SEEK_END);
|
|
|
|
|
if (size == (off_t)-1) {
|
2017-03-13 16:18:54 -07:00
|
|
|
anv_gem_close(device, gem_handle);
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
2019-01-08 18:04:54 +00:00
|
|
|
return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
|
2017-03-13 16:18:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bo = vk_alloc(&device->alloc, sizeof(struct anv_cached_bo), 8,
|
|
|
|
|
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
|
|
|
|
if (!bo) {
|
|
|
|
|
anv_gem_close(device, gem_handle);
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bo->refcount = 1;
|
|
|
|
|
|
|
|
|
|
anv_bo_init(&bo->bo, gem_handle, size);
|
2018-05-30 15:34:25 -07:00
|
|
|
bo->bo.flags = bo_flags;
|
2017-03-13 16:18:54 -07:00
|
|
|
|
2018-05-30 15:25:04 -07:00
|
|
|
if (!anv_vma_alloc(device, &bo->bo)) {
|
|
|
|
|
anv_gem_close(device, bo->bo.gem_handle);
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
vk_free(&device->alloc, bo);
|
|
|
|
|
return vk_errorf(device->instance, NULL,
|
|
|
|
|
VK_ERROR_OUT_OF_DEVICE_MEMORY,
|
|
|
|
|
"failed to allocate virtual address for BO");
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-13 16:18:54 -07:00
|
|
|
_mesa_hash_table_insert(cache->bo_map, (void *)(uintptr_t)gem_handle, bo);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
*bo_out = &bo->bo;
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkResult
|
|
|
|
|
anv_bo_cache_export(struct anv_device *device,
|
|
|
|
|
struct anv_bo_cache *cache,
|
|
|
|
|
struct anv_bo *bo_in, int *fd_out)
|
|
|
|
|
{
|
|
|
|
|
assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
|
|
|
|
|
struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
|
|
|
|
|
|
2018-07-09 14:21:33 -07:00
|
|
|
/* This BO must have been flagged external in order for us to be able
|
|
|
|
|
* to export it. This is done based on external options passed into
|
|
|
|
|
* anv_AllocateMemory.
|
|
|
|
|
*/
|
|
|
|
|
assert(bo->bo.flags & ANV_BO_EXTERNAL);
|
|
|
|
|
|
2017-03-13 16:18:54 -07:00
|
|
|
int fd = anv_gem_handle_to_fd(device, bo->bo.gem_handle);
|
|
|
|
|
if (fd < 0)
|
|
|
|
|
return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
|
|
|
|
|
|
|
|
|
|
*fd_out = fd;
|
|
|
|
|
|
|
|
|
|
return VK_SUCCESS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
atomic_dec_not_one(uint32_t *counter)
|
|
|
|
|
{
|
|
|
|
|
uint32_t old, val;
|
|
|
|
|
|
|
|
|
|
val = *counter;
|
|
|
|
|
while (1) {
|
|
|
|
|
if (val == 1)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
old = __sync_val_compare_and_swap(counter, val, val - 1);
|
|
|
|
|
if (old == val)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
val = old;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
anv_bo_cache_release(struct anv_device *device,
|
|
|
|
|
struct anv_bo_cache *cache,
|
|
|
|
|
struct anv_bo *bo_in)
|
|
|
|
|
{
|
|
|
|
|
assert(anv_bo_cache_lookup(cache, bo_in->gem_handle) == bo_in);
|
|
|
|
|
struct anv_cached_bo *bo = (struct anv_cached_bo *)bo_in;
|
|
|
|
|
|
|
|
|
|
/* Try to decrement the counter but don't go below one. If this succeeds
|
|
|
|
|
* then the refcount has been decremented and we are not the last
|
|
|
|
|
* reference.
|
|
|
|
|
*/
|
|
|
|
|
if (atomic_dec_not_one(&bo->refcount))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
pthread_mutex_lock(&cache->mutex);
|
|
|
|
|
|
|
|
|
|
/* We are probably the last reference since our attempt to decrement above
|
|
|
|
|
* failed. However, we can't actually know until we are inside the mutex.
|
|
|
|
|
* Otherwise, someone could import the BO between the decrement and our
|
|
|
|
|
* taking the mutex.
|
|
|
|
|
*/
|
|
|
|
|
if (unlikely(__sync_sub_and_fetch(&bo->refcount, 1) > 0)) {
|
|
|
|
|
/* Turns out we're not the last reference. Unlock and bail. */
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct hash_entry *entry =
|
|
|
|
|
_mesa_hash_table_search(cache->bo_map,
|
|
|
|
|
(const void *)(uintptr_t)bo->bo.gem_handle);
|
|
|
|
|
assert(entry);
|
|
|
|
|
_mesa_hash_table_remove(cache->bo_map, entry);
|
|
|
|
|
|
|
|
|
|
if (bo->bo.map)
|
|
|
|
|
anv_gem_munmap(bo->bo.map, bo->bo.size);
|
|
|
|
|
|
2018-05-30 15:25:04 -07:00
|
|
|
anv_vma_free(device, &bo->bo);
|
|
|
|
|
|
2017-03-13 16:18:54 -07:00
|
|
|
anv_gem_close(device, bo->bo.gem_handle);
|
|
|
|
|
|
|
|
|
|
/* Don't unlock until we've actually closed the BO. The whole point of
|
|
|
|
|
* the BO cache is to ensure that we correctly handle races with creating
|
|
|
|
|
* and releasing GEM handles and we don't want to let someone import the BO
|
|
|
|
|
* again between mutex unlock and closing the GEM handle.
|
|
|
|
|
*/
|
|
|
|
|
pthread_mutex_unlock(&cache->mutex);
|
|
|
|
|
|
|
|
|
|
vk_free(&device->alloc, bo);
|
|
|
|
|
}
|