venus: convert bo and shmem to use vn_refcount

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org>
Reviewed-by: Ryan Neph <ryanneph@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13090>
This commit is contained in:
Chia-I Wu 2021-09-28 10:04:47 -07:00 committed by Marge Bot
parent 5cdbf75615
commit d7560a71a2
3 changed files with 18 additions and 35 deletions

View file

@ -9,14 +9,16 @@
#include "vn_common.h"
struct vn_renderer_shmem {
atomic_int refcount;
struct vn_refcount refcount;
uint32_t res_id;
size_t mmap_size; /* for internal use only (i.e., munmap) */
void *mmap_ptr;
};
struct vn_renderer_bo {
atomic_int refcount;
struct vn_refcount refcount;
uint32_t res_id;
/* for internal use only */
size_t mmap_size;
@ -290,7 +292,7 @@ vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)
struct vn_renderer_shmem *shmem =
renderer->shmem_ops.create(renderer, size);
if (shmem) {
assert(atomic_load(&shmem->refcount) == 1);
assert(vn_refcount_is_valid(&shmem->refcount));
assert(shmem->res_id);
assert(shmem->mmap_size >= size);
assert(shmem->mmap_ptr);
@ -303,10 +305,7 @@ static inline struct vn_renderer_shmem *
vn_renderer_shmem_ref(struct vn_renderer *renderer,
struct vn_renderer_shmem *shmem)
{
ASSERTED const int old =
atomic_fetch_add_explicit(&shmem->refcount, 1, memory_order_relaxed);
assert(old >= 1);
vn_refcount_inc(&shmem->refcount);
return shmem;
}
@ -314,14 +313,8 @@ static inline void
vn_renderer_shmem_unref(struct vn_renderer *renderer,
struct vn_renderer_shmem *shmem)
{
const int old =
atomic_fetch_sub_explicit(&shmem->refcount, 1, memory_order_release);
assert(old >= 1);
if (old == 1) {
atomic_thread_fence(memory_order_acquire);
if (vn_refcount_dec(&shmem->refcount))
renderer->shmem_ops.destroy(renderer, shmem);
}
}
static inline VkResult
@ -339,7 +332,7 @@ vn_renderer_bo_create_from_device_memory(
if (result != VK_SUCCESS)
return result;
assert(atomic_load(&bo->refcount) == 1);
assert(vn_refcount_is_valid(&bo->refcount));
assert(bo->res_id);
assert(!bo->mmap_size || bo->mmap_size >= size);
@ -360,7 +353,7 @@ vn_renderer_bo_create_from_dma_buf(struct vn_renderer *renderer,
if (result != VK_SUCCESS)
return result;
assert(atomic_load(&bo->refcount) >= 1);
assert(vn_refcount_is_valid(&bo->refcount));
assert(bo->res_id);
assert(!bo->mmap_size || bo->mmap_size >= size);
@ -371,25 +364,15 @@ vn_renderer_bo_create_from_dma_buf(struct vn_renderer *renderer,
static inline struct vn_renderer_bo *
vn_renderer_bo_ref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
{
ASSERTED const int old =
atomic_fetch_add_explicit(&bo->refcount, 1, memory_order_relaxed);
assert(old >= 1);
vn_refcount_inc(&bo->refcount);
return bo;
}
static inline bool
vn_renderer_bo_unref(struct vn_renderer *renderer, struct vn_renderer_bo *bo)
{
const int old =
atomic_fetch_sub_explicit(&bo->refcount, 1, memory_order_release);
assert(old >= 1);
if (old == 1) {
atomic_thread_fence(memory_order_acquire);
if (vn_refcount_dec(&bo->refcount))
return renderer->bo_ops.destroy(renderer, bo);
}
return false;
}

View file

@ -1112,7 +1112,7 @@ virtgpu_bo_destroy(struct vn_renderer *renderer, struct vn_renderer_bo *_bo)
/* Check the refcount again after the import lock is grabbed. Yes, we use
* the double-checked locking anti-pattern.
*/
if (atomic_load_explicit(&bo->base.refcount, memory_order_relaxed) > 0) {
if (vn_refcount_is_valid(&bo->base.refcount)) {
mtx_unlock(&gpu->dma_buf_import_mutex);
return false;
}
@ -1208,11 +1208,11 @@ virtgpu_bo_create_from_dma_buf(struct vn_renderer *renderer,
/* we can't use vn_renderer_bo_ref as the refcount may drop to 0
* temporarily before virtgpu_bo_destroy grabs the lock
*/
atomic_fetch_add_explicit(&bo->base.refcount, 1, memory_order_relaxed);
vn_refcount_fetch_add_relaxed(&bo->base.refcount, 1);
} else {
*bo = (struct virtgpu_bo){
.base = {
.refcount = 1,
.refcount = VN_REFCOUNT_INIT(1),
.res_id = info.res_handle,
.mmap_size = mmap_size,
},
@ -1255,7 +1255,7 @@ virtgpu_bo_create_from_device_memory(
struct virtgpu_bo *bo = util_sparse_array_get(&gpu->bo_array, gem_handle);
*bo = (struct virtgpu_bo){
.base = {
.refcount = 1,
.refcount = VN_REFCOUNT_INIT(1),
.res_id = res_id,
.mmap_size = size,
},
@ -1301,7 +1301,7 @@ virtgpu_shmem_create(struct vn_renderer *renderer, size_t size)
util_sparse_array_get(&gpu->shmem_array, gem_handle);
*shmem = (struct virtgpu_shmem){
.base = {
.refcount = 1,
.refcount = VN_REFCOUNT_INIT(1),
.res_id = res_id,
.mmap_size = size,
.mmap_ptr = ptr,

View file

@ -761,7 +761,7 @@ vtest_bo_create_from_device_memory(
struct vtest_bo *bo = util_sparse_array_get(&vtest->bo_array, res_id);
*bo = (struct vtest_bo){
.base = {
.refcount = 1,
.refcount = VN_REFCOUNT_INIT(1),
.res_id = res_id,
.mmap_size = size,
},
@ -814,7 +814,7 @@ vtest_shmem_create(struct vn_renderer *renderer, size_t size)
util_sparse_array_get(&vtest->shmem_array, res_id);
*shmem = (struct vtest_shmem){
.base = {
.refcount = 1,
.refcount = VN_REFCOUNT_INIT(1),
.res_id = res_id,
.mmap_size = size,
.mmap_ptr = ptr,