gallium/pb_slab: move group_index and entry_size from pb_slab_entry to pb_slab

This removes 8 bytes from every slab entry, and thus amdgpu_bo_slab.

Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26643>
This commit is contained in:
Marek Olšák 2023-12-08 23:39:36 -05:00 committed by Marge Bot
parent 5a3bacc376
commit 9431c33899
6 changed files with 14 additions and 14 deletions

View file

@ -61,7 +61,7 @@ pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
/* Add slab to the group's list if it isn't already linked. */
if (!list_is_linked(&slab->head)) {
struct pb_slab_group *group = &slabs->groups[entry->group_index];
struct pb_slab_group *group = &slabs->groups[entry->slab->group_index];
list_addtail(&slab->head, &group->slabs);
}

View file

@ -62,8 +62,6 @@ struct pb_slab_entry
{
struct list_head head;
struct pb_slab *slab; /* the slab that contains this buffer */
unsigned group_index; /* index into pb_slabs::groups */
unsigned entry_size;
};
/* Descriptor of a slab from which many entries are carved out.
@ -78,6 +76,8 @@ struct pb_slab
struct list_head free; /* list of free pb_slab_entry structures */
unsigned num_free; /* number of entries in free list */
unsigned num_entries; /* total number of entries */
unsigned group_index; /* index into pb_slabs::groups */
unsigned entry_size;
};
/* Callback function that is called when a new slab needs to be allocated

View file

@ -793,6 +793,8 @@ iris_slab_alloc(void *priv,
slab->base.num_entries = slab_size / entry_size;
slab->base.num_free = slab->base.num_entries;
slab->base.group_index = group_index;
slab->base.entry_size = entry_size;
slab->entry_size = entry_size;
slab->entries = calloc(slab->base.num_entries, sizeof(*slab->entries));
if (!slab->entries)
@ -815,8 +817,6 @@ iris_slab_alloc(void *priv,
bo->zeroed = slab->bo->zeroed;
bo->slab.entry.slab = &slab->base;
bo->slab.entry.group_index = group_index;
bo->slab.entry.entry_size = entry_size;
bo->slab.real = iris_get_backing_bo(slab->bo);

View file

@ -1242,6 +1242,8 @@ bo_slab_alloc(void *priv, unsigned mem_type_idx, unsigned entry_size, unsigned g
slab->base.num_entries = slab_size / entry_size;
slab->base.num_free = slab->base.num_entries;
slab->base.group_index = group_index;
slab->base.entry_size = entry_size;
slab->entry_size = entry_size;
slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
if (!slab->entries)
@ -1260,8 +1262,6 @@ bo_slab_alloc(void *priv, unsigned mem_type_idx, unsigned entry_size, unsigned g
bo->offset = slab->buffer->offset + i * entry_size;
bo->unique_id = base_id + i;
bo->u.slab.entry.slab = &slab->base;
bo->u.slab.entry.group_index = group_index;
bo->u.slab.entry.entry_size = entry_size;
if (slab->buffer->mem) {
/* The slab is not suballocated. */

View file

@ -638,11 +638,11 @@ static struct pb_slabs *get_slabs(struct amdgpu_winsys *ws, uint64_t size)
static unsigned get_slab_wasted_size(struct amdgpu_winsys *ws, struct amdgpu_bo_slab *bo)
{
assert(bo->b.base.size <= bo->entry.entry_size);
assert(bo->b.base.size <= bo->entry.slab->entry_size);
assert(bo->b.base.size < (1 << bo->b.base.alignment_log2) ||
bo->b.base.size < 1 << ws->bo_slabs[0].min_order ||
bo->b.base.size > bo->entry.entry_size / 2);
return bo->entry.entry_size - bo->b.base.size;
bo->b.base.size > bo->entry.slab->entry_size / 2);
return bo->entry.slab->entry_size - bo->b.base.size;
}
static void amdgpu_bo_slab_destroy(struct radeon_winsys *rws, struct pb_buffer *_buf)
@ -744,6 +744,8 @@ struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, unsigned entry_s
slab->base.num_entries = slab_size / entry_size;
slab->base.num_free = slab->base.num_entries;
slab->base.group_index = group_index;
slab->base.entry_size = entry_size;
slab->entry_size = entry_size;
slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
if (!slab->entries)
@ -773,8 +775,6 @@ struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, unsigned entry_s
}
bo->entry.slab = &slab->base;
bo->entry.group_index = group_index;
bo->entry.entry_size = entry_size;
list_addtail(&bo->entry.head, &slab->base.free);
}

View file

@ -774,6 +774,8 @@ struct pb_slab *radeon_bo_slab_alloc(void *priv, unsigned heap,
slab->base.num_entries = slab->buffer->base.size / entry_size;
slab->base.num_free = slab->base.num_entries;
slab->base.group_index = group_index;
slab->base.entry_size = entry_size;
slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
if (!slab->entries)
goto fail_buffer;
@ -794,8 +796,6 @@ struct pb_slab *radeon_bo_slab_alloc(void *priv, unsigned heap,
bo->initial_domain = domains;
bo->hash = base_hash + i;
bo->u.slab.entry.slab = &slab->base;
bo->u.slab.entry.group_index = group_index;
bo->u.slab.entry.entry_size = entry_size;
bo->u.slab.real = slab->buffer;
list_addtail(&bo->u.slab.entry.head, &slab->base.free);