panfrost: Avoid accessing pan_pool fields directly

Having panfrost_batch access the pan_pool fields directly makes it hard
to change pan_pool internals, like for instance, changing the hash table
for a dynamic array. Let's hide pan_pool internals behind helpers that do
what we need.

Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6494>
This commit is contained in:
Boris Brezillon 2020-08-24 11:48:10 +02:00 committed by Marge Bot
parent 4047c691bf
commit d87ab72ce9
3 changed files with 54 additions and 11 deletions

View file

@ -175,11 +175,8 @@ panfrost_free_batch(struct panfrost_batch *batch)
hash_table_foreach(batch->bos, entry)
panfrost_bo_unreference((struct panfrost_bo *)entry->key);
hash_table_foreach(batch->pool.bos, entry)
panfrost_bo_unreference((struct panfrost_bo *)entry->key);
hash_table_foreach(batch->invisible_pool.bos, entry)
panfrost_bo_unreference((struct panfrost_bo *)entry->key);
panfrost_pool_cleanup(&batch->pool);
panfrost_pool_cleanup(&batch->invisible_pool);
util_dynarray_foreach(&batch->dependencies,
struct panfrost_batch_fence *, dep) {
@ -980,17 +977,19 @@ panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
submit.jc = first_job_desc;
submit.requirements = reqs;
bo_handles = calloc(batch->pool.bos->entries + batch->invisible_pool.bos->entries + batch->bos->entries + 1, sizeof(*bo_handles));
bo_handles = calloc(panfrost_pool_num_bos(&batch->pool) +
panfrost_pool_num_bos(&batch->invisible_pool) +
batch->bos->entries + 1,
sizeof(*bo_handles));
assert(bo_handles);
hash_table_foreach(batch->bos, entry)
panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
hash_table_foreach(batch->pool.bos, entry)
panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
hash_table_foreach(batch->invisible_pool.bos, entry)
panfrost_batch_record_bo(entry, bo_handles, submit.bo_handle_count++);
panfrost_pool_get_bo_handles(&batch->pool, bo_handles + submit.bo_handle_count);
submit.bo_handle_count += panfrost_pool_num_bos(&batch->pool);
panfrost_pool_get_bo_handles(&batch->invisible_pool, bo_handles + submit.bo_handle_count);
submit.bo_handle_count += panfrost_pool_num_bos(&batch->invisible_pool);
/* Used by all tiler jobs (XXX: skip for compute-only) */
if (!(reqs & PANFROST_JD_REQ_FS))

View file

@ -71,6 +71,38 @@ panfrost_pool_init(struct pan_pool *pool, void *memctx,
panfrost_pool_alloc_backing(pool, TRANSIENT_SLAB_SIZE);
}
static void delete_bo_entry(struct hash_entry *entry)
{
panfrost_bo_unreference((struct panfrost_bo *)entry->key);
}
void
panfrost_pool_cleanup(struct pan_pool *pool)
{
_mesa_hash_table_destroy(pool->bos, delete_bo_entry);
}
void
panfrost_pool_get_bo_handles(struct pan_pool *pool, uint32_t *handles)
{
unsigned idx = 0;
hash_table_foreach(pool->bos, entry) {
struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
assert(bo->gem_handle > 0);
handles[idx++] = bo->gem_handle;
/* Update the BO access flags so that panfrost_bo_wait() knows
* about all pending accesses.
* We only keep the READ/WRITE info since this is all the BO
* wait logic cares about.
* We also preserve existing flags as this batch might not
* be the first one to access the BO.
*/
bo->gpu_access |= PAN_BO_ACCESS_RW;
}
}
struct panfrost_transfer
panfrost_pool_alloc_aligned(struct pan_pool *pool, size_t sz, unsigned alignment)
{

View file

@ -55,6 +55,18 @@ panfrost_pool_init(struct pan_pool *pool, void *memctx,
struct panfrost_device *dev, unsigned create_flags,
bool prealloc);
void
panfrost_pool_cleanup(struct pan_pool *pool);
static inline unsigned
panfrost_pool_num_bos(struct pan_pool *pool)
{
return pool->bos->entries;
}
void
panfrost_pool_get_bo_handles(struct pan_pool *pool, uint32_t *handles);
/* Represents a fat pointer for GPU-mapped memory, returned from the transient
* allocator and not used for much else */