mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-28 14:40:10 +01:00
amdgpu: use simple mtx
Reviewed-by: Marek Olšák <marek.olsak@amd.com> Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
parent
f0857fe87b
commit
87f02ddfd1
5 changed files with 45 additions and 44 deletions
|
|
@ -90,7 +90,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
|
|||
unsigned idle_fences;
|
||||
bool buffer_idle;
|
||||
|
||||
mtx_lock(&ws->bo_fence_lock);
|
||||
simple_mtx_lock(&ws->bo_fence_lock);
|
||||
|
||||
for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
|
||||
if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
|
||||
|
|
@ -106,13 +106,13 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
|
|||
bo->num_fences -= idle_fences;
|
||||
|
||||
buffer_idle = !bo->num_fences;
|
||||
mtx_unlock(&ws->bo_fence_lock);
|
||||
simple_mtx_unlock(&ws->bo_fence_lock);
|
||||
|
||||
return buffer_idle;
|
||||
} else {
|
||||
bool buffer_idle = true;
|
||||
|
||||
mtx_lock(&ws->bo_fence_lock);
|
||||
simple_mtx_lock(&ws->bo_fence_lock);
|
||||
while (bo->num_fences && buffer_idle) {
|
||||
struct pipe_fence_handle *fence = NULL;
|
||||
bool fence_idle = false;
|
||||
|
|
@ -120,12 +120,12 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
|
|||
amdgpu_fence_reference(&fence, bo->fences[0]);
|
||||
|
||||
/* Wait for the fence. */
|
||||
mtx_unlock(&ws->bo_fence_lock);
|
||||
simple_mtx_unlock(&ws->bo_fence_lock);
|
||||
if (amdgpu_fence_wait(fence, abs_timeout, true))
|
||||
fence_idle = true;
|
||||
else
|
||||
buffer_idle = false;
|
||||
mtx_lock(&ws->bo_fence_lock);
|
||||
simple_mtx_lock(&ws->bo_fence_lock);
|
||||
|
||||
/* Release an idle fence to avoid checking it again later, keeping in
|
||||
* mind that the fence array may have been modified by other threads.
|
||||
|
|
@ -139,7 +139,7 @@ static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
|
|||
|
||||
amdgpu_fence_reference(&fence, NULL);
|
||||
}
|
||||
mtx_unlock(&ws->bo_fence_lock);
|
||||
simple_mtx_unlock(&ws->bo_fence_lock);
|
||||
|
||||
return buffer_idle;
|
||||
}
|
||||
|
|
@ -168,10 +168,10 @@ void amdgpu_bo_destroy(struct pb_buffer *_buf)
|
|||
assert(bo->bo && "must not be called for slab entries");
|
||||
|
||||
if (bo->ws->debug_all_bos) {
|
||||
mtx_lock(&bo->ws->global_bo_list_lock);
|
||||
simple_mtx_lock(&bo->ws->global_bo_list_lock);
|
||||
LIST_DEL(&bo->u.real.global_list_item);
|
||||
bo->ws->num_buffers--;
|
||||
mtx_unlock(&bo->ws->global_bo_list_lock);
|
||||
simple_mtx_unlock(&bo->ws->global_bo_list_lock);
|
||||
}
|
||||
|
||||
amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
|
||||
|
|
@ -363,10 +363,10 @@ static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
|
|||
assert(bo->bo);
|
||||
|
||||
if (ws->debug_all_bos) {
|
||||
mtx_lock(&ws->global_bo_list_lock);
|
||||
simple_mtx_lock(&ws->global_bo_list_lock);
|
||||
LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
|
||||
ws->num_buffers++;
|
||||
mtx_unlock(&ws->global_bo_list_lock);
|
||||
simple_mtx_unlock(&ws->global_bo_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -722,9 +722,9 @@ sparse_free_backing_buffer(struct amdgpu_winsys_bo *bo,
|
|||
|
||||
bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
|
||||
|
||||
mtx_lock(&ws->bo_fence_lock);
|
||||
simple_mtx_lock(&ws->bo_fence_lock);
|
||||
amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
|
||||
mtx_unlock(&ws->bo_fence_lock);
|
||||
simple_mtx_unlock(&ws->bo_fence_lock);
|
||||
|
||||
list_del(&backing->list);
|
||||
amdgpu_winsys_bo_reference(&backing->bo, NULL);
|
||||
|
|
@ -819,7 +819,7 @@ static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
|
|||
}
|
||||
|
||||
amdgpu_va_range_free(bo->u.sparse.va_handle);
|
||||
mtx_destroy(&bo->u.sparse.commit_lock);
|
||||
simple_mtx_destroy(&bo->u.sparse.commit_lock);
|
||||
FREE(bo->u.sparse.commitments);
|
||||
FREE(bo);
|
||||
}
|
||||
|
|
@ -866,7 +866,7 @@ amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
|
|||
if (!bo->u.sparse.commitments)
|
||||
goto error_alloc_commitments;
|
||||
|
||||
mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
|
||||
simple_mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
|
||||
LIST_INITHEAD(&bo->u.sparse.backing);
|
||||
|
||||
/* For simplicity, we always map a multiple of the page size. */
|
||||
|
|
@ -888,7 +888,7 @@ amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
|
|||
error_va_map:
|
||||
amdgpu_va_range_free(bo->u.sparse.va_handle);
|
||||
error_va_alloc:
|
||||
mtx_destroy(&bo->u.sparse.commit_lock);
|
||||
simple_mtx_destroy(&bo->u.sparse.commit_lock);
|
||||
FREE(bo->u.sparse.commitments);
|
||||
error_alloc_commitments:
|
||||
FREE(bo);
|
||||
|
|
@ -915,7 +915,7 @@ amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
|
|||
va_page = offset / RADEON_SPARSE_PAGE_SIZE;
|
||||
end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
|
||||
|
||||
mtx_lock(&bo->u.sparse.commit_lock);
|
||||
simple_mtx_lock(&bo->u.sparse.commit_lock);
|
||||
|
||||
#if DEBUG_SPARSE_COMMITS
|
||||
sparse_dump(bo, __func__);
|
||||
|
|
@ -1019,7 +1019,7 @@ amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
|
|||
}
|
||||
out:
|
||||
|
||||
mtx_unlock(&bo->u.sparse.commit_lock);
|
||||
simple_mtx_unlock(&bo->u.sparse.commit_lock);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ struct amdgpu_winsys_bo {
|
|||
struct amdgpu_winsys_bo *real;
|
||||
} slab;
|
||||
struct {
|
||||
mtx_t commit_lock;
|
||||
simple_mtx_t commit_lock;
|
||||
amdgpu_va_handle va_handle;
|
||||
enum radeon_bo_flag flags;
|
||||
|
||||
|
|
|
|||
|
|
@ -538,7 +538,7 @@ static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_cs *acs,
|
|||
/* We delay adding the backing buffers until we really have to. However,
|
||||
* we cannot delay accounting for memory use.
|
||||
*/
|
||||
mtx_lock(&bo->u.sparse.commit_lock);
|
||||
simple_mtx_lock(&bo->u.sparse.commit_lock);
|
||||
|
||||
list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
|
||||
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
|
|
@ -547,7 +547,7 @@ static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_cs *acs,
|
|||
acs->main.base.used_gart += backing->bo->base.size;
|
||||
}
|
||||
|
||||
mtx_unlock(&bo->u.sparse.commit_lock);
|
||||
simple_mtx_unlock(&bo->u.sparse.commit_lock);
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
|
@ -1146,7 +1146,7 @@ static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context *cs)
|
|||
struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];
|
||||
struct amdgpu_winsys_bo *bo = buffer->bo;
|
||||
|
||||
mtx_lock(&bo->u.sparse.commit_lock);
|
||||
simple_mtx_lock(&bo->u.sparse.commit_lock);
|
||||
|
||||
list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
|
||||
/* We can directly add the buffer here, because we know that each
|
||||
|
|
@ -1155,7 +1155,7 @@ static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context *cs)
|
|||
int idx = amdgpu_do_add_real_buffer(cs, backing->bo);
|
||||
if (idx < 0) {
|
||||
fprintf(stderr, "%s: failed to add buffer\n", __FUNCTION__);
|
||||
mtx_unlock(&bo->u.sparse.commit_lock);
|
||||
simple_mtx_unlock(&bo->u.sparse.commit_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -1164,7 +1164,7 @@ static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context *cs)
|
|||
p_atomic_inc(&backing->bo->num_active_ioctls);
|
||||
}
|
||||
|
||||
mtx_unlock(&bo->u.sparse.commit_lock);
|
||||
simple_mtx_unlock(&bo->u.sparse.commit_lock);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
@ -1188,11 +1188,11 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
|
|||
amdgpu_bo_handle *handles;
|
||||
unsigned num = 0;
|
||||
|
||||
mtx_lock(&ws->global_bo_list_lock);
|
||||
simple_mtx_lock(&ws->global_bo_list_lock);
|
||||
|
||||
handles = malloc(sizeof(handles[0]) * ws->num_buffers);
|
||||
if (!handles) {
|
||||
mtx_unlock(&ws->global_bo_list_lock);
|
||||
simple_mtx_unlock(&ws->global_bo_list_lock);
|
||||
amdgpu_cs_context_cleanup(cs);
|
||||
cs->error_code = -ENOMEM;
|
||||
return;
|
||||
|
|
@ -1206,7 +1206,7 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
|
|||
r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
|
||||
handles, NULL, &bo_list);
|
||||
free(handles);
|
||||
mtx_unlock(&ws->global_bo_list_lock);
|
||||
simple_mtx_unlock(&ws->global_bo_list_lock);
|
||||
} else {
|
||||
unsigned num_handles;
|
||||
|
||||
|
|
@ -1469,7 +1469,7 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
|
|||
* that the order of fence dependency updates matches the order of
|
||||
* submissions.
|
||||
*/
|
||||
mtx_lock(&ws->bo_fence_lock);
|
||||
simple_mtx_lock(&ws->bo_fence_lock);
|
||||
amdgpu_add_fence_dependencies_bo_lists(cs);
|
||||
|
||||
/* Swap command streams. "cst" is going to be submitted. */
|
||||
|
|
@ -1480,7 +1480,7 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
|
|||
util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
|
||||
amdgpu_cs_submit_ib, NULL);
|
||||
/* The submission has been queued, unlock the fence now. */
|
||||
mtx_unlock(&ws->bo_fence_lock);
|
||||
simple_mtx_unlock(&ws->bo_fence_lock);
|
||||
|
||||
if (!(flags & RADEON_FLUSH_ASYNC)) {
|
||||
amdgpu_cs_sync_flush(rcs);
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@
|
|||
#endif
|
||||
|
||||
static struct util_hash_table *dev_tab = NULL;
|
||||
static mtx_t dev_tab_mutex = _MTX_INITIALIZER_NP;
|
||||
static simple_mtx_t dev_tab_mutex = _SIMPLE_MTX_INITIALIZER_NP;
|
||||
|
||||
DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
|
||||
|
||||
|
|
@ -94,10 +94,10 @@ static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
|
|||
if (util_queue_is_initialized(&ws->cs_queue))
|
||||
util_queue_destroy(&ws->cs_queue);
|
||||
|
||||
mtx_destroy(&ws->bo_fence_lock);
|
||||
simple_mtx_destroy(&ws->bo_fence_lock);
|
||||
pb_slabs_deinit(&ws->bo_slabs);
|
||||
pb_cache_deinit(&ws->bo_cache);
|
||||
mtx_destroy(&ws->global_bo_list_lock);
|
||||
simple_mtx_destroy(&ws->global_bo_list_lock);
|
||||
do_winsys_deinit(ws);
|
||||
FREE(rws);
|
||||
}
|
||||
|
|
@ -217,13 +217,13 @@ static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
|
|||
* This must happen while the mutex is locked, so that
|
||||
* amdgpu_winsys_create in another thread doesn't get the winsys
|
||||
* from the table when the counter drops to 0. */
|
||||
mtx_lock(&dev_tab_mutex);
|
||||
simple_mtx_lock(&dev_tab_mutex);
|
||||
|
||||
destroy = pipe_reference(&ws->reference, NULL);
|
||||
if (destroy && dev_tab)
|
||||
util_hash_table_remove(dev_tab, ws->dev);
|
||||
|
||||
mtx_unlock(&dev_tab_mutex);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
return destroy;
|
||||
}
|
||||
|
||||
|
|
@ -251,7 +251,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
drmFreeVersion(version);
|
||||
|
||||
/* Look up the winsys from the dev table. */
|
||||
mtx_lock(&dev_tab_mutex);
|
||||
simple_mtx_lock(&dev_tab_mutex);
|
||||
if (!dev_tab)
|
||||
dev_tab = util_hash_table_create(hash_dev, compare_dev);
|
||||
|
||||
|
|
@ -259,7 +259,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
* for the same fd. */
|
||||
r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
|
||||
if (r) {
|
||||
mtx_unlock(&dev_tab_mutex);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -268,7 +268,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
ws = util_hash_table_get(dev_tab, dev);
|
||||
if (ws) {
|
||||
pipe_reference(NULL, &ws->reference);
|
||||
mtx_unlock(&dev_tab_mutex);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
return &ws->base;
|
||||
}
|
||||
|
||||
|
|
@ -317,13 +317,13 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
amdgpu_surface_init_functions(ws);
|
||||
|
||||
LIST_INITHEAD(&ws->global_bo_list);
|
||||
(void) mtx_init(&ws->global_bo_list_lock, mtx_plain);
|
||||
(void) mtx_init(&ws->bo_fence_lock, mtx_plain);
|
||||
(void) simple_mtx_init(&ws->global_bo_list_lock, mtx_plain);
|
||||
(void) simple_mtx_init(&ws->bo_fence_lock, mtx_plain);
|
||||
|
||||
if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1,
|
||||
UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
|
||||
amdgpu_winsys_destroy(&ws->base);
|
||||
mtx_unlock(&dev_tab_mutex);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -335,7 +335,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
ws->base.screen = screen_create(&ws->base, config);
|
||||
if (!ws->base.screen) {
|
||||
amdgpu_winsys_destroy(&ws->base);
|
||||
mtx_unlock(&dev_tab_mutex);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -352,7 +352,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
/* We must unlock the mutex once the winsys is fully initialized, so that
|
||||
* other threads attempting to create the winsys from the same fd will
|
||||
* get a fully initialized winsys and not just half-way initialized. */
|
||||
mtx_unlock(&dev_tab_mutex);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
|
||||
return &ws->base;
|
||||
|
||||
|
|
@ -362,6 +362,6 @@ fail_cache:
|
|||
fail_alloc:
|
||||
FREE(ws);
|
||||
fail:
|
||||
mtx_unlock(&dev_tab_mutex);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@
|
|||
#include "pipebuffer/pb_slab.h"
|
||||
#include "gallium/drivers/radeon/radeon_winsys.h"
|
||||
#include "addrlib/addrinterface.h"
|
||||
#include "util/simple_mtx.h"
|
||||
#include "util/u_queue.h"
|
||||
#include <amdgpu.h>
|
||||
|
||||
|
|
@ -49,7 +50,7 @@ struct amdgpu_winsys {
|
|||
|
||||
amdgpu_device_handle dev;
|
||||
|
||||
mtx_t bo_fence_lock;
|
||||
simple_mtx_t bo_fence_lock;
|
||||
|
||||
int num_cs; /* The number of command streams created. */
|
||||
unsigned num_total_rejected_cs;
|
||||
|
|
@ -80,7 +81,7 @@ struct amdgpu_winsys {
|
|||
bool reserve_vmid;
|
||||
|
||||
/* List of all allocated buffers */
|
||||
mtx_t global_bo_list_lock;
|
||||
simple_mtx_t global_bo_list_lock;
|
||||
struct list_head global_bo_list;
|
||||
unsigned num_buffers;
|
||||
};
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue