r600g/compute: Defrag the pool if it's necesary

This patch adds a new member to the pool to track its status.
For now it is used only for the 'fragmented' status, but if
needed it could be used for more statuses.

The pool will be considered fragmented if: An item that isn't
the last is freed or demoted.

This 'strategy' has a problem, although it shouldn't cause any bug.
If for example we have two items, A and B. We choose to free A first,
now the pool will have the 'fragmented' status. If we now free B,
the pool will retain its 'fragmented' status even if it isn't
fragmented.

Reviewed-by: Tom Stellard <thomas.stellard@amd.com>
This commit is contained in:
Bruno Jiménez 2014-07-16 23:12:45 +02:00 committed by Tom Stellard
parent d8b6f0dacb
commit 90d7b09ed2
2 changed files with 19 additions and 17 deletions

View file

@ -262,23 +262,10 @@ int compute_memory_finalize_pending(struct compute_memory_pool* pool,
unallocated += align(item->size_in_dw, ITEM_ALIGNMENT);
}
/* If we require more space than the size of the pool, then grow the
* pool.
*
* XXX: I'm pretty sure this won't work. Imagine this scenario:
*
* Offset Item Size
* 0 A 50
* 200 B 50
* 400 C 50
*
* Total size = 450
* Allocated size = 150
* Pending Item D Size = 200
*
* In this case, there are 300 units of free space in the pool, but
* they aren't contiguous, so it will be impossible to allocate Item D.
*/
if (pool->status & POOL_FRAGMENTED) {
compute_memory_defrag(pool, pipe);
}
if (pool->size_in_dw < allocated + unallocated) {
err = compute_memory_grow_pool(pool, pipe, allocated + unallocated);
if (err == -1)
@ -324,6 +311,8 @@ void compute_memory_defrag(struct compute_memory_pool *pool,
last_pos += align(item->size_in_dw, ITEM_ALIGNMENT);
}
pool->status &= ~POOL_FRAGMENTED;
}
int compute_memory_promote_item(struct compute_memory_pool *pool,
@ -430,6 +419,10 @@ void compute_memory_demote_item(struct compute_memory_pool *pool,
/* Remember to mark the buffer as 'pending' by setting start_in_dw to -1 */
item->start_in_dw = -1;
if (item->link.next != pool->item_list) {
pool->status |= POOL_FRAGMENTED;
}
}
/**
@ -533,6 +526,11 @@ void compute_memory_free(struct compute_memory_pool* pool, int64_t id)
LIST_FOR_EACH_ENTRY_SAFE(item, next, pool->item_list, link) {
if (item->id == id) {
if (item->link.next != pool->item_list) {
pool->status |= POOL_FRAGMENTED;
}
list_del(&item->link);
if (item->real_buffer) {

View file

@ -32,6 +32,8 @@
#define ITEM_FOR_PROMOTING (1<<2)
#define ITEM_FOR_DEMOTING (1<<3)
#define POOL_FRAGMENTED (1<<0)
struct compute_memory_pool;
struct compute_memory_item
@ -60,6 +62,8 @@ struct compute_memory_pool
uint32_t *shadow; ///host copy of the pool, used for defragmentation
uint32_t status; /**< Status of the pool */
struct list_head *item_list; ///Allocated memory chunks in the buffer,they must be ordered by "start_in_dw"
struct list_head *unallocated_list; ///Unallocated memory chunks
};