anv/allocator: Roll fixed_size_state_pool into state_pool

The helper functions aren't really gaining us as much as they claim and
are actually about to be in the way.

Reviewed-by: Juan A. Suarez Romero <jasuarez@igalia.com>
This commit is contained in:
Jason Ekstrand 2017-04-23 18:48:21 -07:00
parent 6d02ef011e
commit c73ce41a48

View file

@ -616,33 +616,33 @@ anv_block_pool_free(struct anv_block_pool *pool, int32_t offset)
} }
} }
static void void
anv_fixed_size_state_pool_init(struct anv_fixed_size_state_pool *pool) anv_state_pool_init(struct anv_state_pool *pool,
struct anv_block_pool *block_pool)
{ {
/* At least a cache line and must divide the block size. */ pool->block_pool = block_pool;
pool->free_list = ANV_FREE_LIST_EMPTY; for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
pool->block.next = 0; pool->buckets[i].free_list = ANV_FREE_LIST_EMPTY;
pool->block.end = 0; pool->buckets[i].block.next = 0;
pool->buckets[i].block.end = 0;
}
VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
}
void
anv_state_pool_finish(struct anv_state_pool *pool)
{
VG(VALGRIND_DESTROY_MEMPOOL(pool));
} }
static uint32_t static uint32_t
anv_fixed_size_state_pool_alloc(struct anv_fixed_size_state_pool *pool, anv_fixed_size_state_pool_alloc_new(struct anv_fixed_size_state_pool *pool,
struct anv_block_pool *block_pool, struct anv_block_pool *block_pool,
uint32_t state_size) uint32_t state_size)
{ {
assert(state_size >= 64 && util_is_power_of_two(state_size));
int32_t offset;
struct anv_block_state block, old, new; struct anv_block_state block, old, new;
uint32_t offset;
/* Try free list first. */
if (anv_free_list_pop(&pool->free_list, &block_pool->map, &offset)) {
assert(offset >= 0);
return offset;
}
/* If free list was empty (or somebody raced us and took the items) we
* allocate a new item from the end of the block */
restart: restart:
block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size); block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size);
@ -662,31 +662,6 @@ anv_fixed_size_state_pool_alloc(struct anv_fixed_size_state_pool *pool,
} }
} }
static void
anv_fixed_size_state_pool_free(struct anv_fixed_size_state_pool *pool,
struct anv_block_pool *block_pool,
uint32_t offset)
{
anv_free_list_push(&pool->free_list, block_pool->map, offset);
}
void
anv_state_pool_init(struct anv_state_pool *pool,
struct anv_block_pool *block_pool)
{
pool->block_pool = block_pool;
for (unsigned i = 0; i < ANV_STATE_BUCKETS; i++) {
anv_fixed_size_state_pool_init(&pool->buckets[i]);
}
VG(VALGRIND_CREATE_MEMPOOL(pool, 0, false));
}
void
anv_state_pool_finish(struct anv_state_pool *pool)
{
VG(VALGRIND_DESTROY_MEMPOOL(pool));
}
static struct anv_state static struct anv_state
anv_state_pool_alloc_no_vg(struct anv_state_pool *pool, anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
uint32_t size, uint32_t align) uint32_t size, uint32_t align)
@ -699,9 +674,19 @@ anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
struct anv_state state; struct anv_state state;
state.alloc_size = 1 << size_log2; state.alloc_size = 1 << size_log2;
state.offset = anv_fixed_size_state_pool_alloc(&pool->buckets[bucket],
/* Try free list first. */
if (anv_free_list_pop(&pool->buckets[bucket].free_list,
&pool->block_pool->map, &state.offset)) {
assert(state.offset >= 0);
goto done;
}
state.offset = anv_fixed_size_state_pool_alloc_new(&pool->buckets[bucket],
pool->block_pool, pool->block_pool,
state.alloc_size); state.alloc_size);
done:
state.map = pool->block_pool->map + state.offset; state.map = pool->block_pool->map + state.offset;
return state; return state;
} }
@ -726,8 +711,8 @@ anv_state_pool_free_no_vg(struct anv_state_pool *pool, struct anv_state state)
size_log2 <= ANV_MAX_STATE_SIZE_LOG2); size_log2 <= ANV_MAX_STATE_SIZE_LOG2);
unsigned bucket = size_log2 - ANV_MIN_STATE_SIZE_LOG2; unsigned bucket = size_log2 - ANV_MIN_STATE_SIZE_LOG2;
anv_fixed_size_state_pool_free(&pool->buckets[bucket], anv_free_list_push(&pool->buckets[bucket].free_list,
pool->block_pool, state.offset); pool->block_pool->map, state.offset);
} }
void void