anv/allocator: Add a start_offset to anv_state_pool

This allows a pool's allocations to start somewhere other than the base
address.  Our first real use of this will be to use a negative offset
for the binding table pool to make it so that the offset is baked into
the pool and the code in anv_batch_chain.c doesn't have to understand
pool offsetting.

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4897>
This commit is contained in:
Jason Ekstrand 2020-05-04 17:27:22 -05:00 committed by Marge Bot
parent 772b15ad32
commit d11e4738a8
8 changed files with 35 additions and 25 deletions

View file

@ -823,15 +823,21 @@ anv_block_pool_alloc_back(struct anv_block_pool *pool,
VkResult
anv_state_pool_init(struct anv_state_pool *pool,
struct anv_device *device,
uint64_t start_address,
uint64_t base_address,
int32_t start_offset,
uint32_t block_size)
{
/* We don't want to ever see signed overflow */
assert(start_offset < INT32_MAX - (int32_t)BLOCK_POOL_MEMFD_SIZE);
VkResult result = anv_block_pool_init(&pool->block_pool, device,
start_address,
base_address + start_offset,
block_size * 16);
if (result != VK_SUCCESS)
return result;
pool->start_offset = start_offset;
result = anv_state_table_init(&pool->table, device, 64);
if (result != VK_SUCCESS) {
anv_block_pool_finish(&pool->block_pool);
@ -942,7 +948,7 @@ anv_state_pool_return_blocks(struct anv_state_pool *pool,
struct anv_state *state_i = anv_state_table_get(&pool->table,
st_idx + i);
state_i->alloc_size = block_size;
state_i->offset = chunk_offset + block_size * i;
state_i->offset = pool->start_offset + chunk_offset + block_size * i;
state_i->map = anv_block_pool_map(&pool->block_pool,
state_i->offset,
state_i->alloc_size);
@ -1084,7 +1090,7 @@ anv_state_pool_alloc_no_vg(struct anv_state_pool *pool,
assert(result == VK_SUCCESS);
state = anv_state_table_get(&pool->table, idx);
state->offset = offset;
state->offset = pool->start_offset + offset;
state->alloc_size = alloc_size;
state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);
@ -1128,7 +1134,7 @@ anv_state_pool_alloc_back(struct anv_state_pool *pool)
assert(result == VK_SUCCESS);
state = anv_state_table_get(&pool->table, idx);
state->offset = offset;
state->offset = pool->start_offset + offset;
state->alloc_size = alloc_size;
state->map = anv_block_pool_map(&pool->block_pool, offset, alloc_size);

View file

@ -539,10 +539,11 @@ anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
struct anv_address
anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device);
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
return (struct anv_address) {
.bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
.offset = bt_block->offset,
.bo = pool->block_pool.bo,
.offset = bt_block->offset - pool->start_offset,
};
}
@ -708,7 +709,6 @@ struct anv_state
anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
uint32_t entries, uint32_t *state_offset)
{
struct anv_device *device = cmd_buffer->device;
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
uint32_t bt_size = align_u32(entries * 4, 32);
@ -722,14 +722,8 @@ anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
cmd_buffer->bt_next.map += bt_size;
cmd_buffer->bt_next.alloc_size -= bt_size;
if (device->physical->use_softpin) {
assert(bt_block->offset >= 0);
*state_offset = device->surface_state_pool.block_pool.start_address -
device->binding_table_pool.block_pool.start_address - bt_block->offset;
} else {
assert(bt_block->offset < 0);
*state_offset = -bt_block->offset;
}
assert(bt_block->offset < 0);
*state_offset = -bt_block->offset;
return state;
}

View file

@ -2867,23 +2867,27 @@ VkResult anv_CreateDevice(
anv_bo_pool_init(&device->batch_bo_pool, device);
result = anv_state_pool_init(&device->dynamic_state_pool, device,
DYNAMIC_STATE_POOL_MIN_ADDRESS, 16384);
DYNAMIC_STATE_POOL_MIN_ADDRESS, 0, 16384);
if (result != VK_SUCCESS)
goto fail_batch_bo_pool;
result = anv_state_pool_init(&device->instruction_state_pool, device,
INSTRUCTION_STATE_POOL_MIN_ADDRESS, 16384);
INSTRUCTION_STATE_POOL_MIN_ADDRESS, 0, 16384);
if (result != VK_SUCCESS)
goto fail_dynamic_state_pool;
result = anv_state_pool_init(&device->surface_state_pool, device,
SURFACE_STATE_POOL_MIN_ADDRESS, 4096);
SURFACE_STATE_POOL_MIN_ADDRESS, 0, 4096);
if (result != VK_SUCCESS)
goto fail_instruction_state_pool;
if (physical_device->use_softpin) {
int64_t bt_pool_offset = (int64_t)BINDING_TABLE_POOL_MIN_ADDRESS -
(int64_t)SURFACE_STATE_POOL_MIN_ADDRESS;
assert(INT32_MIN < bt_pool_offset && bt_pool_offset < 0);
result = anv_state_pool_init(&device->binding_table_pool, device,
BINDING_TABLE_POOL_MIN_ADDRESS, 4096);
SURFACE_STATE_POOL_MIN_ADDRESS,
bt_pool_offset, 4096);
if (result != VK_SUCCESS)
goto fail_surface_state_pool;
}

View file

@ -881,6 +881,11 @@ struct anv_state_table {
struct anv_state_pool {
struct anv_block_pool block_pool;
/* Offset into the relevant state base address where the state pool starts
* allocating memory.
*/
int32_t start_offset;
struct anv_state_table table;
/* The size of blocks which will be allocated from the block pool */
@ -925,7 +930,8 @@ size);
VkResult anv_state_pool_init(struct anv_state_pool *pool,
struct anv_device *device,
uint64_t start_address,
uint64_t base_address,
int32_t start_offset,
uint32_t block_size);
void anv_state_pool_finish(struct anv_state_pool *pool);
struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,

View file

@ -46,7 +46,7 @@ int main(int argc, char **argv)
anv_bo_cache_init(&device.bo_cache);
for (unsigned i = 0; i < NUM_RUNS; i++) {
anv_state_pool_init(&state_pool, &device, 4096, 256);
anv_state_pool_init(&state_pool, &device, 4096, 0, 256);
/* Grab one so a zero offset is impossible */
anv_state_pool_alloc(&state_pool, 16, 16);

View file

@ -43,7 +43,7 @@ int main(int argc, char **argv)
pthread_mutex_init(&device.mutex, NULL);
anv_bo_cache_init(&device.bo_cache);
anv_state_pool_init(&state_pool, &device, 4096, 4096);
anv_state_pool_init(&state_pool, &device, 4096, 0, 4096);
/* Grab one so a zero offset is impossible */
anv_state_pool_alloc(&state_pool, 16, 16);

View file

@ -64,7 +64,7 @@ static void run_test()
pthread_mutex_init(&device.mutex, NULL);
anv_bo_cache_init(&device.bo_cache);
anv_state_pool_init(&state_pool, &device, 4096, 64);
anv_state_pool_init(&state_pool, &device, 4096, 0, 64);
pthread_barrier_init(&barrier, NULL, NUM_THREADS);

View file

@ -37,7 +37,7 @@ int main(int argc, char **argv)
pthread_mutex_init(&device.mutex, NULL);
anv_bo_cache_init(&device.bo_cache);
anv_state_pool_init(&state_pool, &device, 4096, 4096);
anv_state_pool_init(&state_pool, &device, 4096, 0, 4096);
/* Get the size of the underlying block_pool */
struct anv_block_pool *bp = &state_pool.block_pool;