mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-06 15:20:17 +01:00
gallium/pb: pass an optional winsys pointer to the buffer destroy function
This will allow removing the winsys pointer from buffers. Reviewed-by: Zoltán Böszörményi <zboszor@gmail.com> Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9809>
This commit is contained in:
parent
33b2daab1a
commit
6c6a39682e
17 changed files with 61 additions and 46 deletions
|
|
@ -142,7 +142,7 @@ struct pb_buffer
|
|||
*/
|
||||
struct pb_vtbl
|
||||
{
|
||||
void (*destroy)(struct pb_buffer *buf);
|
||||
void (*destroy)(void *winsys, struct pb_buffer *buf);
|
||||
|
||||
/**
|
||||
* Map the entire data store of a buffer object into the client's address.
|
||||
|
|
@ -245,13 +245,13 @@ pb_fence(struct pb_buffer *buf, struct pipe_fence_handle *fence)
|
|||
|
||||
|
||||
static inline void
|
||||
pb_destroy(struct pb_buffer *buf)
|
||||
pb_destroy(void *winsys, struct pb_buffer *buf)
|
||||
{
|
||||
assert(buf);
|
||||
if (!buf)
|
||||
return;
|
||||
assert(!pipe_is_referenced(&buf->reference));
|
||||
buf->vtbl->destroy(buf);
|
||||
buf->vtbl->destroy(winsys, buf);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -262,10 +262,21 @@ pb_reference(struct pb_buffer **dst,
|
|||
struct pb_buffer *old = *dst;
|
||||
|
||||
if (pipe_reference(&(*dst)->reference, &src->reference))
|
||||
pb_destroy(old);
|
||||
pb_destroy(NULL, old);
|
||||
*dst = src;
|
||||
}
|
||||
|
||||
static inline void
|
||||
pb_reference_with_winsys(void *winsys,
|
||||
struct pb_buffer **dst,
|
||||
struct pb_buffer *src)
|
||||
{
|
||||
struct pb_buffer *old = *dst;
|
||||
|
||||
if (pipe_reference(&(*dst)->reference, &src->reference))
|
||||
pb_destroy(winsys, old);
|
||||
*dst = src;
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility function to check whether the provided alignment is consistent with
|
||||
|
|
|
|||
|
|
@ -645,7 +645,7 @@ fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
|
|||
|
||||
|
||||
static void
|
||||
fenced_buffer_destroy(struct pb_buffer *buf)
|
||||
fenced_buffer_destroy(void *winsys, struct pb_buffer *buf)
|
||||
{
|
||||
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
|
||||
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ pb_cache_manager_remove_buffer(struct pb_buffer *pb_buf)
|
|||
* Actually destroy the buffer.
|
||||
*/
|
||||
static void
|
||||
_pb_cache_buffer_destroy(struct pb_buffer *pb_buf)
|
||||
_pb_cache_buffer_destroy(void *winsys, struct pb_buffer *pb_buf)
|
||||
{
|
||||
struct pb_cache_buffer *buf = pb_cache_buffer(pb_buf);
|
||||
|
||||
|
|
@ -105,7 +105,7 @@ _pb_cache_buffer_destroy(struct pb_buffer *pb_buf)
|
|||
|
||||
|
||||
static void
|
||||
pb_cache_buffer_destroy(struct pb_buffer *_buf)
|
||||
pb_cache_buffer_destroy(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
|
||||
struct pb_cache_manager *mgr = buf->mgr;
|
||||
|
|
@ -178,7 +178,7 @@ pb_cache_buffer_vtbl = {
|
|||
|
||||
|
||||
static bool
|
||||
pb_cache_can_reclaim_buffer(struct pb_buffer *_buf)
|
||||
pb_cache_can_reclaim_buffer(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
|
||||
|
||||
|
|
@ -305,7 +305,7 @@ pb_cache_manager_create(struct pb_manager *provider,
|
|||
mgr->base.flush = pb_cache_manager_flush;
|
||||
mgr->provider = provider;
|
||||
pb_cache_init(&mgr->cache, 1, usecs, size_factor, bypass_usage,
|
||||
maximum_cache_size,
|
||||
maximum_cache_size, NULL,
|
||||
_pb_cache_buffer_destroy,
|
||||
pb_cache_can_reclaim_buffer);
|
||||
return &mgr->base;
|
||||
|
|
|
|||
|
|
@ -226,7 +226,7 @@ pb_debug_buffer_check(struct pb_debug_buffer *buf)
|
|||
|
||||
|
||||
static void
|
||||
pb_debug_buffer_destroy(struct pb_buffer *_buf)
|
||||
pb_debug_buffer_destroy(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
|
||||
struct pb_debug_manager *mgr = buf->mgr;
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ mm_buffer(struct pb_buffer *buf)
|
|||
|
||||
|
||||
static void
|
||||
mm_buffer_destroy(struct pb_buffer *buf)
|
||||
mm_buffer_destroy(void *winsys, struct pb_buffer *buf)
|
||||
{
|
||||
struct mm_buffer *mm_buf = mm_buffer(buf);
|
||||
struct mm_pb_manager *mm = mm_buf->mgr;
|
||||
|
|
|
|||
|
|
@ -187,7 +187,7 @@ pb_slab_range_manager(struct pb_manager *mgr)
|
|||
* it on the slab FREE list.
|
||||
*/
|
||||
static void
|
||||
pb_slab_buffer_destroy(struct pb_buffer *_buf)
|
||||
pb_slab_buffer_destroy(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
|
||||
struct pb_slab *slab = buf->slab;
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ destroy_buffer_locked(struct pb_cache_entry *entry)
|
|||
--mgr->num_buffers;
|
||||
mgr->cache_size -= buf->size;
|
||||
}
|
||||
mgr->destroy_buffer(buf);
|
||||
mgr->destroy_buffer(mgr->winsys, buf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -97,7 +97,7 @@ pb_cache_add_buffer(struct pb_cache_entry *entry)
|
|||
|
||||
/* Directly release any buffer that exceeds the limit. */
|
||||
if (mgr->cache_size + buf->size > mgr->max_cache_size) {
|
||||
mgr->destroy_buffer(buf);
|
||||
mgr->destroy_buffer(mgr->winsys, buf);
|
||||
mtx_unlock(&mgr->mutex);
|
||||
return;
|
||||
}
|
||||
|
|
@ -136,7 +136,7 @@ pb_cache_is_buffer_compat(struct pb_cache_entry *entry,
|
|||
if (!pb_check_alignment(alignment, buf->alignment))
|
||||
return 0;
|
||||
|
||||
return mgr->can_reclaim(buf) ? 1 : -1;
|
||||
return mgr->can_reclaim(mgr->winsys, buf) ? 1 : -1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -280,8 +280,9 @@ void
|
|||
pb_cache_init(struct pb_cache *mgr, uint num_heaps,
|
||||
uint usecs, float size_factor,
|
||||
unsigned bypass_usage, uint64_t maximum_cache_size,
|
||||
void (*destroy_buffer)(struct pb_buffer *buf),
|
||||
bool (*can_reclaim)(struct pb_buffer *buf))
|
||||
void *winsys,
|
||||
void (*destroy_buffer)(void *winsys, struct pb_buffer *buf),
|
||||
bool (*can_reclaim)(void *winsys, struct pb_buffer *buf))
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
|
|
@ -293,6 +294,7 @@ pb_cache_init(struct pb_cache *mgr, uint num_heaps,
|
|||
list_inithead(&mgr->buckets[i]);
|
||||
|
||||
(void) mtx_init(&mgr->mutex, mtx_plain);
|
||||
mgr->winsys = winsys;
|
||||
mgr->cache_size = 0;
|
||||
mgr->max_cache_size = maximum_cache_size;
|
||||
mgr->num_heaps = num_heaps;
|
||||
|
|
|
|||
|
|
@ -53,6 +53,7 @@ struct pb_cache
|
|||
struct list_head *buckets;
|
||||
|
||||
mtx_t mutex;
|
||||
void *winsys;
|
||||
uint64_t cache_size;
|
||||
uint64_t max_cache_size;
|
||||
unsigned num_heaps;
|
||||
|
|
@ -61,8 +62,8 @@ struct pb_cache
|
|||
unsigned bypass_usage;
|
||||
float size_factor;
|
||||
|
||||
void (*destroy_buffer)(struct pb_buffer *buf);
|
||||
bool (*can_reclaim)(struct pb_buffer *buf);
|
||||
void (*destroy_buffer)(void *winsys, struct pb_buffer *buf);
|
||||
bool (*can_reclaim)(void *winsys, struct pb_buffer *buf);
|
||||
};
|
||||
|
||||
void pb_cache_add_buffer(struct pb_cache_entry *entry);
|
||||
|
|
@ -75,8 +76,9 @@ void pb_cache_init_entry(struct pb_cache *mgr, struct pb_cache_entry *entry,
|
|||
void pb_cache_init(struct pb_cache *mgr, uint num_heaps,
|
||||
uint usecs, float size_factor,
|
||||
unsigned bypass_usage, uint64_t maximum_cache_size,
|
||||
void (*destroy_buffer)(struct pb_buffer *buf),
|
||||
bool (*can_reclaim)(struct pb_buffer *buf));
|
||||
void *winsys,
|
||||
void (*destroy_buffer)(void *winsys, struct pb_buffer *buf),
|
||||
bool (*can_reclaim)(void *winsys, struct pb_buffer *buf));
|
||||
void pb_cache_deinit(struct pb_cache *mgr);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -214,7 +214,7 @@ d3d12_bo_unmap(struct d3d12_bo *bo, D3D12_RANGE *range)
|
|||
}
|
||||
|
||||
static void
|
||||
d3d12_buffer_destroy(struct pb_buffer *pbuf)
|
||||
d3d12_buffer_destroy(void *winsys, struct pb_buffer *pbuf)
|
||||
{
|
||||
struct d3d12_buffer *buf = d3d12_buffer(pbuf);
|
||||
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
|
|||
bo->max_fences = 0;
|
||||
}
|
||||
|
||||
void amdgpu_bo_destroy(struct pb_buffer *_buf)
|
||||
void amdgpu_bo_destroy(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
|
||||
struct amdgpu_screen_winsys *sws_iter;
|
||||
|
|
@ -231,7 +231,7 @@ void amdgpu_bo_destroy(struct pb_buffer *_buf)
|
|||
FREE(bo);
|
||||
}
|
||||
|
||||
static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
|
||||
static void amdgpu_bo_destroy_or_cache(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
|
||||
|
||||
|
|
@ -240,7 +240,7 @@ static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
|
|||
if (bo->u.real.use_reusable_pool)
|
||||
pb_cache_add_buffer(bo->cache_entry);
|
||||
else
|
||||
amdgpu_bo_destroy(_buf);
|
||||
amdgpu_bo_destroy(winsys, _buf);
|
||||
}
|
||||
|
||||
static void amdgpu_clean_up_buffer_managers(struct amdgpu_winsys *ws)
|
||||
|
|
@ -620,7 +620,7 @@ error_bo_alloc:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
|
||||
bool amdgpu_bo_can_reclaim(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
|
||||
}
|
||||
|
|
@ -629,7 +629,7 @@ bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
|
|||
{
|
||||
struct amdgpu_winsys_bo *bo = container_of(entry, struct amdgpu_winsys_bo, u.slab.entry);
|
||||
|
||||
return amdgpu_bo_can_reclaim(&bo->base);
|
||||
return amdgpu_bo_can_reclaim(NULL, &bo->base);
|
||||
}
|
||||
|
||||
static struct pb_slabs *get_slabs(struct amdgpu_winsys *ws, uint64_t size,
|
||||
|
|
@ -658,7 +658,7 @@ static unsigned get_slab_wasted_size(struct amdgpu_winsys_bo *bo)
|
|||
return bo->u.slab.entry.entry_size - bo->base.size;
|
||||
}
|
||||
|
||||
static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
|
||||
static void amdgpu_bo_slab_destroy(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
|
||||
|
||||
|
|
@ -1090,7 +1090,7 @@ sparse_backing_free(struct amdgpu_winsys_bo *bo,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
|
||||
static void amdgpu_bo_sparse_destroy(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
|
||||
int r;
|
||||
|
|
|
|||
|
|
@ -119,13 +119,13 @@ struct amdgpu_slab {
|
|||
struct amdgpu_winsys_bo *entries;
|
||||
};
|
||||
|
||||
bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf);
|
||||
bool amdgpu_bo_can_reclaim(void *winsys, struct pb_buffer *_buf);
|
||||
struct pb_buffer *amdgpu_bo_create(struct amdgpu_winsys *ws,
|
||||
uint64_t size,
|
||||
unsigned alignment,
|
||||
enum radeon_bo_domain domain,
|
||||
enum radeon_bo_flag flags);
|
||||
void amdgpu_bo_destroy(struct pb_buffer *_buf);
|
||||
void amdgpu_bo_destroy(void *winsys, struct pb_buffer *_buf);
|
||||
void *amdgpu_bo_map(struct pb_buffer *buf,
|
||||
struct radeon_cmdbuf *rcs,
|
||||
enum pipe_map_flags usage);
|
||||
|
|
|
|||
|
|
@ -442,7 +442,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
/* Create managers. */
|
||||
pb_cache_init(&aws->bo_cache, RADEON_MAX_CACHED_HEAPS,
|
||||
500000, aws->check_vm ? 1.0f : 2.0f, 0,
|
||||
(aws->info.vram_size + aws->info.gart_size) / 8,
|
||||
(aws->info.vram_size + aws->info.gart_size) / 8, NULL,
|
||||
amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
|
||||
|
||||
unsigned min_slab_order = 8; /* 256 bytes */
|
||||
|
|
|
|||
|
|
@ -350,7 +350,7 @@ out:
|
|||
mtx_unlock(&heap->mutex);
|
||||
}
|
||||
|
||||
void radeon_bo_destroy(struct pb_buffer *_buf)
|
||||
void radeon_bo_destroy(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct radeon_bo *bo = radeon_bo(_buf);
|
||||
struct radeon_drm_winsys *rws = bo->rws;
|
||||
|
|
@ -419,7 +419,7 @@ void radeon_bo_destroy(struct pb_buffer *_buf)
|
|||
FREE(bo);
|
||||
}
|
||||
|
||||
static void radeon_bo_destroy_or_cache(struct pb_buffer *_buf)
|
||||
static void radeon_bo_destroy_or_cache(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct radeon_bo *bo = radeon_bo(_buf);
|
||||
|
||||
|
|
@ -428,7 +428,7 @@ static void radeon_bo_destroy_or_cache(struct pb_buffer *_buf)
|
|||
if (bo->u.real.use_reusable_pool)
|
||||
pb_cache_add_buffer(&bo->u.real.cache_entry);
|
||||
else
|
||||
radeon_bo_destroy(_buf);
|
||||
radeon_bo_destroy(NULL, _buf);
|
||||
}
|
||||
|
||||
void *radeon_bo_do_map(struct radeon_bo *bo)
|
||||
|
|
@ -711,7 +711,7 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
|
|||
fprintf(stderr, "radeon: alignment : %d bytes\n", alignment);
|
||||
fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
|
||||
fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
|
||||
radeon_bo_destroy(&bo->base);
|
||||
radeon_bo_destroy(NULL, &bo->base);
|
||||
return NULL;
|
||||
}
|
||||
mtx_lock(&rws->bo_handles_mutex);
|
||||
|
|
@ -737,7 +737,7 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
|
|||
return bo;
|
||||
}
|
||||
|
||||
bool radeon_bo_can_reclaim(struct pb_buffer *_buf)
|
||||
bool radeon_bo_can_reclaim(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct radeon_bo *bo = radeon_bo(_buf);
|
||||
|
||||
|
|
@ -751,10 +751,10 @@ bool radeon_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
|
|||
{
|
||||
struct radeon_bo *bo = container_of(entry, struct radeon_bo, u.slab.entry);
|
||||
|
||||
return radeon_bo_can_reclaim(&bo->base);
|
||||
return radeon_bo_can_reclaim(NULL, &bo->base);
|
||||
}
|
||||
|
||||
static void radeon_bo_slab_destroy(struct pb_buffer *_buf)
|
||||
static void radeon_bo_slab_destroy(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct radeon_bo *bo = radeon_bo(_buf);
|
||||
|
||||
|
|
@ -1156,7 +1156,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
|
|||
r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
|
||||
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
|
||||
fprintf(stderr, "radeon: Failed to assign virtual address space\n");
|
||||
radeon_bo_destroy(&bo->base);
|
||||
radeon_bo_destroy(NULL, &bo->base);
|
||||
return NULL;
|
||||
}
|
||||
mtx_lock(&ws->bo_handles_mutex);
|
||||
|
|
@ -1287,7 +1287,7 @@ done:
|
|||
r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
|
||||
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
|
||||
fprintf(stderr, "radeon: Failed to assign virtual address space\n");
|
||||
radeon_bo_destroy(&bo->base);
|
||||
radeon_bo_destroy(NULL, &bo->base);
|
||||
return NULL;
|
||||
}
|
||||
mtx_lock(&ws->bo_handles_mutex);
|
||||
|
|
|
|||
|
|
@ -76,8 +76,8 @@ struct radeon_slab {
|
|||
struct radeon_bo *entries;
|
||||
};
|
||||
|
||||
void radeon_bo_destroy(struct pb_buffer *_buf);
|
||||
bool radeon_bo_can_reclaim(struct pb_buffer *_buf);
|
||||
void radeon_bo_destroy(void *winsys, struct pb_buffer *_buf);
|
||||
bool radeon_bo_can_reclaim(void *winsys, struct pb_buffer *_buf);
|
||||
void radeon_drm_bo_init_functions(struct radeon_drm_winsys *ws);
|
||||
|
||||
bool radeon_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
|
||||
|
|
|
|||
|
|
@ -851,7 +851,7 @@ radeon_drm_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
|
||||
pb_cache_init(&ws->bo_cache, RADEON_MAX_CACHED_HEAPS,
|
||||
500000, ws->check_vm ? 1.0f : 2.0f, 0,
|
||||
MIN2(ws->info.vram_size, ws->info.gart_size),
|
||||
MIN2(ws->info.vram_size, ws->info.gart_size), NULL,
|
||||
radeon_bo_destroy,
|
||||
radeon_bo_can_reclaim);
|
||||
|
||||
|
|
|
|||
|
|
@ -501,7 +501,7 @@ fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
|
|||
|
||||
|
||||
static void
|
||||
fenced_buffer_destroy(struct pb_buffer *buf)
|
||||
fenced_buffer_destroy(void *winsys, struct pb_buffer *buf)
|
||||
{
|
||||
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
|
||||
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ vmw_gmr_bufmgr(struct pb_manager *mgr)
|
|||
|
||||
|
||||
static void
|
||||
vmw_gmr_buffer_destroy(struct pb_buffer *_buf)
|
||||
vmw_gmr_buffer_destroy(void *winsys, struct pb_buffer *_buf)
|
||||
{
|
||||
struct vmw_gmr_buffer *buf = vmw_gmr_buffer(_buf);
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue