iris: Replace BO_ALLOC_* macros by a enum

This changes makes it strongly typed and gives more context.
No changes in behavior expected here.

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30723>
This commit is contained in:
José Roberto de Souza 2024-10-31 07:05:35 -07:00 committed by Marge Bot
parent a13a6656dd
commit f0f896ef21
6 changed files with 50 additions and 47 deletions

View file

@ -54,7 +54,7 @@ static uint32_t
i915_gem_create(struct iris_bufmgr *bufmgr,
const struct intel_memory_class_instance **regions,
uint16_t regions_count, uint64_t size,
enum iris_heap heap, unsigned alloc_flags)
enum iris_heap heap, enum bo_alloc_flags alloc_flags)
{
const struct intel_device_info *devinfo =
iris_bufmgr_get_device_info(bufmgr);
@ -415,7 +415,7 @@ i915_batch_submit(struct iris_batch *batch)
}
static bool
i915_gem_vm_bind(struct iris_bo *bo, unsigned flags)
i915_gem_vm_bind(struct iris_bo *bo, enum bo_alloc_flags flags)
{
/*
* i915 does not support VM_BIND yet. The binding operation happens at

View file

@ -286,7 +286,7 @@ find_and_ref_external_bo(struct hash_table *ht, unsigned int key)
*/
static struct bo_cache_bucket *
bucket_for_size(struct iris_bufmgr *bufmgr, uint64_t size,
enum iris_heap heap, unsigned flags)
enum iris_heap heap, enum bo_alloc_flags flags)
{
if (flags & BO_ALLOC_PROTECTED)
return NULL;
@ -723,7 +723,7 @@ iris_slab_alloc(void *priv,
{
struct iris_bufmgr *bufmgr = priv;
struct iris_slab *slab = calloc(1, sizeof(struct iris_slab));
uint32_t flags = BO_ALLOC_NO_SUBALLOC;
enum bo_alloc_flags flags = BO_ALLOC_NO_SUBALLOC;
unsigned slab_size = 0;
/* We only support slab allocation for IRIS_MEMZONE_OTHER */
enum iris_memory_zone memzone = IRIS_MEMZONE_OTHER;
@ -847,7 +847,7 @@ fail:
* This determines the cacheability, coherency, and mmap mode settings.
*/
static enum iris_heap
flags_to_heap(struct iris_bufmgr *bufmgr, unsigned flags)
flags_to_heap(struct iris_bufmgr *bufmgr, enum bo_alloc_flags flags)
{
const struct intel_device_info *devinfo = &bufmgr->devinfo;
@ -895,7 +895,7 @@ flags_to_heap(struct iris_bufmgr *bufmgr, unsigned flags)
static bool
zero_bo(struct iris_bufmgr *bufmgr,
unsigned flags,
enum bo_alloc_flags flags,
struct iris_bo *bo)
{
assert(flags & BO_ALLOC_ZEROED);
@ -925,7 +925,7 @@ alloc_bo_from_slabs(struct iris_bufmgr *bufmgr,
const char *name,
uint64_t size,
uint32_t alignment,
unsigned flags)
enum bo_alloc_flags flags)
{
if (flags & BO_ALLOC_NO_SUBALLOC)
return NULL;
@ -1010,7 +1010,7 @@ alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
uint32_t alignment,
enum iris_memory_zone memzone,
enum iris_mmap_mode mmap_mode,
unsigned flags,
enum bo_alloc_flags flags,
bool match_zone)
{
if (!bucket)
@ -1101,7 +1101,7 @@ alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
}
static struct iris_bo *
alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size, unsigned flags)
alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size, enum bo_alloc_flags flags)
{
struct iris_bo *bo = bo_calloc();
if (!bo)
@ -1207,7 +1207,7 @@ iris_bo_alloc(struct iris_bufmgr *bufmgr,
uint64_t size,
uint32_t alignment,
enum iris_memory_zone memzone,
unsigned flags)
enum bo_alloc_flags flags)
{
struct iris_bo *bo;
unsigned int page_size = getpagesize();

View file

@ -367,33 +367,35 @@ struct iris_bo {
};
};
/* No special attributes. */
#define BO_ALLOC_PLAIN 0
/* Content is set to 0, only done in cache and slabs code paths. */
#define BO_ALLOC_ZEROED (1<<0)
/* Allocate a cached and coherent BO, this has a performance cost in
* integrated platforms without LLC.
* Should only be used in BOs that will be written and read from CPU often.
*/
#define BO_ALLOC_CACHED_COHERENT (1<<1)
/* Place BO only on smem. */
#define BO_ALLOC_SMEM (1<<2)
/* BO can be sent to display. */
#define BO_ALLOC_SCANOUT (1<<3)
/* No sub-allocation(slabs). */
#define BO_ALLOC_NO_SUBALLOC (1<<4)
/* Place BO only on lmem. */
#define BO_ALLOC_LMEM (1<<5)
/* Content is protected, can't be mapped and needs special handling. */
#define BO_ALLOC_PROTECTED (1<<6)
/* BO can be exported to other applications. */
#define BO_ALLOC_SHARED (1<<7)
/* BO will be captured in the KMD error dump. */
#define BO_ALLOC_CAPTURE (1<<8)
/* Can be mapped. */
#define BO_ALLOC_CPU_VISIBLE (1<<9)
/* BO content is compressed. */
#define BO_ALLOC_COMPRESSED (1<<10)
enum bo_alloc_flags {
/* No special attributes. */
BO_ALLOC_PLAIN = 0,
/* Content is set to 0, only done in cache and slabs code paths. */
BO_ALLOC_ZEROED = (1<<0),
/* Allocate a cached and coherent BO, this has a performance cost in
* integrated platforms without LLC.
* Should only be used in BOs that will be written and read from CPU often.
*/
BO_ALLOC_CACHED_COHERENT = (1<<1),
/* Place BO only on smem. */
BO_ALLOC_SMEM = (1<<2),
/* BO can be sent to display. */
BO_ALLOC_SCANOUT = (1<<3),
/* No sub-allocation(slabs). */
BO_ALLOC_NO_SUBALLOC = (1<<4),
/* Place BO only on lmem. */
BO_ALLOC_LMEM = (1<<5),
/* Content is protected, can't be mapped and needs special handling. */
BO_ALLOC_PROTECTED = (1<<6),
/* BO can be exported to other applications. */
BO_ALLOC_SHARED = (1<<7),
/* BO will be captured in the KMD error dump. */
BO_ALLOC_CAPTURE = (1<<8),
/* Can be mapped. */
BO_ALLOC_CPU_VISIBLE = (1<<9),
/* BO content is compressed. */
BO_ALLOC_COMPRESSED = (1<<10),
};
/**
* Allocate a buffer object.
@ -407,7 +409,7 @@ struct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr,
uint64_t size,
uint32_t alignment,
enum iris_memory_zone memzone,
unsigned flags);
enum bo_alloc_flags flags);
struct iris_bo *
iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,

View file

@ -31,6 +31,7 @@
struct iris_batch;
struct iris_bo;
struct iris_bufmgr;
enum bo_alloc_flags;
enum iris_heap;
enum iris_madvice;
@ -38,7 +39,7 @@ struct iris_kmd_backend {
uint32_t (*gem_create)(struct iris_bufmgr *bufmgr,
const struct intel_memory_class_instance **regions,
uint16_t regions_count, uint64_t size,
enum iris_heap heap_flags, unsigned alloc_flags);
enum iris_heap heap_flags, enum bo_alloc_flags alloc_flags);
uint32_t (*gem_create_userptr)(struct iris_bufmgr *bufmgr, void *ptr,
uint64_t size);
int (*gem_close)(struct iris_bufmgr *bufmgr, struct iris_bo *bo);
@ -47,7 +48,7 @@ struct iris_kmd_backend {
void *(*gem_mmap)(struct iris_bufmgr *bufmgr, struct iris_bo *bo);
enum pipe_reset_status (*batch_check_for_reset)(struct iris_batch *batch);
int (*batch_submit)(struct iris_batch *batch);
bool (*gem_vm_bind)(struct iris_bo *bo, unsigned flags);
bool (*gem_vm_bind)(struct iris_bo *bo, enum bo_alloc_flags flags);
bool (*gem_vm_unbind)(struct iris_bo *bo);
};

View file

@ -462,7 +462,7 @@ iris_resource_disable_aux(struct iris_resource *res)
res->aux.state = NULL;
}
static unsigned
static enum bo_alloc_flags
iris_resource_alloc_flags(const struct iris_screen *screen,
const struct pipe_resource *templ,
struct iris_resource *res)
@ -1027,7 +1027,7 @@ static bool
iris_resource_image_is_pat_compressible(const struct iris_screen *screen,
const struct pipe_resource *templ,
struct iris_resource *res,
unsigned flags)
enum bo_alloc_flags flags)
{
assert(templ->target != PIPE_BUFFER);
@ -1126,7 +1126,7 @@ iris_resource_create_for_image(struct pipe_screen *pscreen,
const char *name = "miptree";
enum iris_memory_zone memzone = IRIS_MEMZONE_OTHER;
unsigned flags = iris_resource_alloc_flags(screen, templ, res);
enum bo_alloc_flags flags = iris_resource_alloc_flags(screen, templ, res);
if (iris_resource_image_is_pat_compressible(screen, templ, res, flags))
flags |= BO_ALLOC_COMPRESSED;
@ -1999,7 +1999,7 @@ iris_invalidate_buffer(struct iris_context *ice, struct iris_resource *res)
return false;
struct iris_bo *old_bo = res->bo;
unsigned flags = old_bo->real.protected ? BO_ALLOC_PROTECTED : BO_ALLOC_PLAIN;
enum bo_alloc_flags flags = old_bo->real.protected ? BO_ALLOC_PROTECTED : BO_ALLOC_PLAIN;
struct iris_bo *new_bo =
iris_bo_alloc(screen->bufmgr, res->bo->name, res->base.b.width0,
iris_buffer_alignment(res->base.b.width0),

View file

@ -39,7 +39,7 @@ static uint32_t
xe_gem_create(struct iris_bufmgr *bufmgr,
const struct intel_memory_class_instance **regions,
uint16_t regions_count, uint64_t size,
enum iris_heap heap_flags, unsigned alloc_flags)
enum iris_heap heap_flags, enum bo_alloc_flags alloc_flags)
{
struct drm_xe_ext_set_property pxp_ext = {
.base.name = DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY,
@ -109,7 +109,7 @@ xe_gem_mmap(struct iris_bufmgr *bufmgr, struct iris_bo *bo)
}
static inline int
xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op, unsigned iris_flags)
xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op, enum bo_alloc_flags iris_flags)
{
struct iris_bufmgr *bufmgr = bo->bufmgr;
struct intel_bind_timeline *bind_timeline = iris_bufmgr_get_bind_timeline(bufmgr);
@ -169,7 +169,7 @@ xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op, unsigned iris_flags)
}
static bool
xe_gem_vm_bind(struct iris_bo *bo, unsigned flags)
xe_gem_vm_bind(struct iris_bo *bo, enum bo_alloc_flags flags)
{
return xe_gem_vm_bind_op(bo, DRM_XE_VM_BIND_OP_MAP, flags) == 0;
}