mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-03 15:50:17 +01:00
winsys/amdgpu: Add amdgpu_screen_winsys
It extends pipe_screen / radeon_winsys and references amdgpu_winsys. Multiple amdgpu_screen_winsys instances may reference the same amdgpu_winsys instance, which corresponds to an amdgpu_device_handle. The purpose of amdgpu_screen_winsys is to keep a duplicate of the DRM file descriptor passed to amdgpu_winsys_create, which will be needed in the next change. v2: * Add comment in amdgpu_winsys_unref explaining why it always returns true (Marek Olšák) Reviewed-by: Marek Olšák <marek.olsak@amd.com> Tested-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
This commit is contained in:
parent
6fce296400
commit
cb446dc0fa
7 changed files with 182 additions and 141 deletions
|
|
@ -50,12 +50,6 @@ struct amdgpu_sparse_backing_chunk {
|
|||
uint32_t begin, end;
|
||||
};
|
||||
|
||||
static struct pb_buffer *
|
||||
amdgpu_bo_create(struct radeon_winsys *rws,
|
||||
uint64_t size,
|
||||
unsigned alignment,
|
||||
enum radeon_bo_domain domain,
|
||||
enum radeon_bo_flag flags);
|
||||
static void amdgpu_bo_unmap(struct pb_buffer *buf);
|
||||
|
||||
static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
|
||||
|
|
@ -251,9 +245,9 @@ static bool amdgpu_bo_do_map(struct amdgpu_winsys_bo *bo, void **cpu)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void *amdgpu_bo_map(struct pb_buffer *buf,
|
||||
struct radeon_cmdbuf *rcs,
|
||||
enum pipe_transfer_usage usage)
|
||||
void *amdgpu_bo_map(struct pb_buffer *buf,
|
||||
struct radeon_cmdbuf *rcs,
|
||||
enum pipe_transfer_usage usage)
|
||||
{
|
||||
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
|
||||
struct amdgpu_winsys_bo *real;
|
||||
|
|
@ -658,7 +652,7 @@ struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
|
|||
}
|
||||
assert(slab_size != 0);
|
||||
|
||||
slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
|
||||
slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(ws,
|
||||
slab_size, slab_size,
|
||||
domains, flags));
|
||||
if (!slab->buffer)
|
||||
|
|
@ -833,7 +827,7 @@ sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_
|
|||
bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
|
||||
size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
|
||||
|
||||
buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
|
||||
buf = amdgpu_bo_create(bo->ws, size, RADEON_SPARSE_PAGE_SIZE,
|
||||
bo->initial_domain,
|
||||
bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
|
||||
if (!buf) {
|
||||
|
|
@ -1298,14 +1292,13 @@ static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
|
|||
amdgpu_bo_set_metadata(bo->bo, &metadata);
|
||||
}
|
||||
|
||||
static struct pb_buffer *
|
||||
amdgpu_bo_create(struct radeon_winsys *rws,
|
||||
struct pb_buffer *
|
||||
amdgpu_bo_create(struct amdgpu_winsys *ws,
|
||||
uint64_t size,
|
||||
unsigned alignment,
|
||||
enum radeon_bo_domain domain,
|
||||
enum radeon_bo_flag flags)
|
||||
{
|
||||
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
|
||||
struct amdgpu_winsys_bo *bo;
|
||||
int heap = -1;
|
||||
|
||||
|
|
@ -1402,6 +1395,17 @@ no_slab:
|
|||
return &bo->base;
|
||||
}
|
||||
|
||||
static struct pb_buffer *
|
||||
amdgpu_buffer_create(struct radeon_winsys *ws,
|
||||
uint64_t size,
|
||||
unsigned alignment,
|
||||
enum radeon_bo_domain domain,
|
||||
enum radeon_bo_flag flags)
|
||||
{
|
||||
return amdgpu_bo_create(amdgpu_winsys(ws), size, alignment, domain,
|
||||
flags);
|
||||
}
|
||||
|
||||
static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
||||
struct winsys_handle *whandle,
|
||||
unsigned vm_alignment,
|
||||
|
|
@ -1645,14 +1649,14 @@ static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
|
|||
return ((struct amdgpu_winsys_bo*)buf)->va;
|
||||
}
|
||||
|
||||
void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
|
||||
void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws)
|
||||
{
|
||||
ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
|
||||
ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
|
||||
ws->base.buffer_map = amdgpu_bo_map;
|
||||
ws->base.buffer_unmap = amdgpu_bo_unmap;
|
||||
ws->base.buffer_wait = amdgpu_bo_wait;
|
||||
ws->base.buffer_create = amdgpu_bo_create;
|
||||
ws->base.buffer_create = amdgpu_buffer_create;
|
||||
ws->base.buffer_from_handle = amdgpu_bo_from_handle;
|
||||
ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
|
||||
ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
|
||||
|
|
|
|||
|
|
@ -125,8 +125,16 @@ struct amdgpu_slab {
|
|||
};
|
||||
|
||||
bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf);
|
||||
struct pb_buffer *amdgpu_bo_create(struct amdgpu_winsys *ws,
|
||||
uint64_t size,
|
||||
unsigned alignment,
|
||||
enum radeon_bo_domain domain,
|
||||
enum radeon_bo_flag flags);
|
||||
void amdgpu_bo_destroy(struct pb_buffer *_buf);
|
||||
void amdgpu_bo_init_functions(struct amdgpu_winsys *ws);
|
||||
void *amdgpu_bo_map(struct pb_buffer *buf,
|
||||
struct radeon_cmdbuf *rcs,
|
||||
enum pipe_transfer_usage usage);
|
||||
void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws);
|
||||
|
||||
bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
|
||||
struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
|
||||
|
|
|
|||
|
|
@ -672,7 +672,8 @@ static unsigned amdgpu_cs_add_buffer(struct radeon_cmdbuf *rcs,
|
|||
return index;
|
||||
}
|
||||
|
||||
static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib,
|
||||
static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws,
|
||||
struct amdgpu_ib *ib,
|
||||
enum ring_type ring_type)
|
||||
{
|
||||
struct pb_buffer *pb;
|
||||
|
|
@ -696,18 +697,18 @@ static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib,
|
|||
buffer_size = MIN2(buffer_size, max_size);
|
||||
buffer_size = MAX2(buffer_size, min_size); /* min_size is more important */
|
||||
|
||||
pb = ws->base.buffer_create(&ws->base, buffer_size,
|
||||
ws->info.gart_page_size,
|
||||
RADEON_DOMAIN_GTT,
|
||||
RADEON_FLAG_NO_INTERPROCESS_SHARING |
|
||||
(ring_type == RING_GFX ||
|
||||
ring_type == RING_COMPUTE ||
|
||||
ring_type == RING_DMA ?
|
||||
RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC : 0));
|
||||
pb = amdgpu_bo_create(ws, buffer_size,
|
||||
ws->info.gart_page_size,
|
||||
RADEON_DOMAIN_GTT,
|
||||
RADEON_FLAG_NO_INTERPROCESS_SHARING |
|
||||
(ring_type == RING_GFX ||
|
||||
ring_type == RING_COMPUTE ||
|
||||
ring_type == RING_DMA ?
|
||||
RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC : 0));
|
||||
if (!pb)
|
||||
return false;
|
||||
|
||||
mapped = ws->base.buffer_map(pb, NULL, PIPE_TRANSFER_WRITE);
|
||||
mapped = amdgpu_bo_map(pb, NULL, PIPE_TRANSFER_WRITE);
|
||||
if (!mapped) {
|
||||
pb_reference(&pb, NULL);
|
||||
return false;
|
||||
|
|
@ -740,10 +741,9 @@ static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
|
|||
}
|
||||
}
|
||||
|
||||
static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
|
||||
static bool amdgpu_get_new_ib(struct amdgpu_winsys *ws, struct amdgpu_cs *cs,
|
||||
enum ib_type ib_type)
|
||||
{
|
||||
struct amdgpu_winsys *aws = amdgpu_winsys(ws);
|
||||
/* Small IBs are better than big IBs, because the GPU goes idle quicker
|
||||
* and there is less waiting for buffers and fences. Proof:
|
||||
* http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
|
||||
|
|
@ -785,7 +785,7 @@ static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
|
|||
/* Allocate a new buffer for IBs if the current buffer is all used. */
|
||||
if (!ib->big_ib_buffer ||
|
||||
ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
|
||||
if (!amdgpu_ib_new_buffer(aws, ib, cs->ring_type))
|
||||
if (!amdgpu_ib_new_buffer(ws, ib, cs->ring_type))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -987,7 +987,7 @@ amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
|
|||
cs->csc = &cs->csc1;
|
||||
cs->cst = &cs->csc2;
|
||||
|
||||
if (!amdgpu_get_new_ib(&ctx->ws->base, cs, IB_MAIN)) {
|
||||
if (!amdgpu_get_new_ib(ctx->ws, cs, IB_MAIN)) {
|
||||
amdgpu_destroy_cs_context(&cs->csc2);
|
||||
amdgpu_destroy_cs_context(&cs->csc1);
|
||||
FREE(cs);
|
||||
|
|
@ -1013,7 +1013,7 @@ amdgpu_cs_add_parallel_compute_ib(struct radeon_cmdbuf *ib,
|
|||
return NULL;
|
||||
|
||||
/* Allocate the compute IB. */
|
||||
if (!amdgpu_get_new_ib(&ws->base, cs, IB_PARALLEL_COMPUTE))
|
||||
if (!amdgpu_get_new_ib(ws, cs, IB_PARALLEL_COMPUTE))
|
||||
return NULL;
|
||||
|
||||
if (uses_gds_ordered_append) {
|
||||
|
|
@ -1768,9 +1768,9 @@ static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs,
|
|||
amdgpu_cs_context_cleanup(cs->csc);
|
||||
}
|
||||
|
||||
amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
|
||||
amdgpu_get_new_ib(ws, cs, IB_MAIN);
|
||||
if (cs->compute_ib.ib_mapped)
|
||||
amdgpu_get_new_ib(&ws->base, cs, IB_PARALLEL_COMPUTE);
|
||||
amdgpu_get_new_ib(ws, cs, IB_PARALLEL_COMPUTE);
|
||||
|
||||
cs->main.base.used_gart = 0;
|
||||
cs->main.base.used_vram = 0;
|
||||
|
|
@ -1810,7 +1810,7 @@ static bool amdgpu_bo_is_referenced(struct radeon_cmdbuf *rcs,
|
|||
return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
|
||||
}
|
||||
|
||||
void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
|
||||
void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws)
|
||||
{
|
||||
ws->base.ctx_create = amdgpu_ctx_create;
|
||||
ws->base.ctx_destroy = amdgpu_ctx_destroy;
|
||||
|
|
|
|||
|
|
@ -276,7 +276,7 @@ void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,
|
|||
unsigned num_fences,
|
||||
struct pipe_fence_handle **fences);
|
||||
void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs);
|
||||
void amdgpu_cs_init_functions(struct amdgpu_winsys *ws);
|
||||
void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws);
|
||||
void amdgpu_cs_submit_ib(void *job, int thread_index);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ static int amdgpu_surface_init(struct radeon_winsys *rws,
|
|||
return ac_compute_surface(ws->addrlib, &ws->info, &config, mode, surf);
|
||||
}
|
||||
|
||||
void amdgpu_surface_init_functions(struct amdgpu_winsys *ws)
|
||||
void amdgpu_surface_init_functions(struct amdgpu_screen_winsys *ws)
|
||||
{
|
||||
ws->base.surface_init = amdgpu_surface_init;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@
|
|||
#include <xf86drm.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include "amd/common/ac_llvm_util.h"
|
||||
#include "amd/common/sid.h"
|
||||
|
||||
|
|
@ -119,14 +120,6 @@ fail:
|
|||
|
||||
static void do_winsys_deinit(struct amdgpu_winsys *ws)
|
||||
{
|
||||
AddrDestroy(ws->addrlib);
|
||||
amdgpu_device_deinitialize(ws->dev);
|
||||
}
|
||||
|
||||
static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
|
||||
{
|
||||
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
|
||||
|
||||
if (ws->reserve_vmid)
|
||||
amdgpu_vm_unreserve_vmid(ws->dev, 0);
|
||||
|
||||
|
|
@ -142,7 +135,41 @@ static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
|
|||
util_hash_table_destroy(ws->bo_export_table);
|
||||
simple_mtx_destroy(&ws->global_bo_list_lock);
|
||||
simple_mtx_destroy(&ws->bo_export_table_lock);
|
||||
do_winsys_deinit(ws);
|
||||
|
||||
AddrDestroy(ws->addrlib);
|
||||
amdgpu_device_deinitialize(ws->dev);
|
||||
FREE(ws);
|
||||
}
|
||||
|
||||
static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
|
||||
{
|
||||
struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
|
||||
struct amdgpu_winsys *ws = sws->aws;
|
||||
bool destroy;
|
||||
|
||||
/* When the reference counter drops to zero, remove the device pointer
|
||||
* from the table.
|
||||
* This must happen while the mutex is locked, so that
|
||||
* amdgpu_winsys_create in another thread doesn't get the winsys
|
||||
* from the table when the counter drops to 0.
|
||||
*/
|
||||
simple_mtx_lock(&dev_tab_mutex);
|
||||
|
||||
destroy = pipe_reference(&ws->reference, NULL);
|
||||
if (destroy && dev_tab) {
|
||||
util_hash_table_remove(dev_tab, ws->dev);
|
||||
if (util_hash_table_count(dev_tab) == 0) {
|
||||
util_hash_table_destroy(dev_tab);
|
||||
dev_tab = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
|
||||
if (destroy)
|
||||
do_winsys_deinit(ws);
|
||||
|
||||
close(sws->fd);
|
||||
FREE(rws);
|
||||
}
|
||||
|
||||
|
|
@ -246,27 +273,11 @@ static int compare_pointers(void *key1, void *key2)
|
|||
|
||||
static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
|
||||
{
|
||||
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
|
||||
bool destroy;
|
||||
|
||||
/* When the reference counter drops to zero, remove the device pointer
|
||||
* from the table.
|
||||
* This must happen while the mutex is locked, so that
|
||||
* amdgpu_winsys_create in another thread doesn't get the winsys
|
||||
* from the table when the counter drops to 0. */
|
||||
simple_mtx_lock(&dev_tab_mutex);
|
||||
|
||||
destroy = pipe_reference(&ws->reference, NULL);
|
||||
if (destroy && dev_tab) {
|
||||
util_hash_table_remove(dev_tab, ws->dev);
|
||||
if (util_hash_table_count(dev_tab) == 0) {
|
||||
util_hash_table_destroy(dev_tab);
|
||||
dev_tab = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
return destroy;
|
||||
/* radeon_winsys corresponds to amdgpu_screen_winsys, which is never
|
||||
* referenced multiple times, so amdgpu_winsys_destroy always needs to be
|
||||
* called. It handles reference counting for amdgpu_winsys.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
static void amdgpu_pin_threads_to_L3_cache(struct radeon_winsys *rws,
|
||||
|
|
@ -282,10 +293,17 @@ PUBLIC struct radeon_winsys *
|
|||
amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
||||
radeon_screen_create_t screen_create)
|
||||
{
|
||||
struct amdgpu_winsys *ws;
|
||||
struct amdgpu_screen_winsys *ws;
|
||||
struct amdgpu_winsys *aws;
|
||||
amdgpu_device_handle dev;
|
||||
uint32_t drm_major, drm_minor, r;
|
||||
|
||||
ws = CALLOC_STRUCT(amdgpu_screen_winsys);
|
||||
if (!ws)
|
||||
return NULL;
|
||||
|
||||
ws->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
|
||||
|
||||
/* Look up the winsys from the dev table. */
|
||||
simple_mtx_lock(&dev_tab_mutex);
|
||||
if (!dev_tab)
|
||||
|
|
@ -295,15 +313,14 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
* for the same fd. */
|
||||
r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
|
||||
if (r) {
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
|
||||
return NULL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Lookup a winsys if we have already created one for this device. */
|
||||
ws = util_hash_table_get(dev_tab, dev);
|
||||
if (ws) {
|
||||
pipe_reference(NULL, &ws->reference);
|
||||
aws = util_hash_table_get(dev_tab, dev);
|
||||
if (aws) {
|
||||
pipe_reference(NULL, &aws->reference);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
|
||||
/* Release the device handle, because we don't need it anymore.
|
||||
|
|
@ -311,57 +328,83 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
* has its own device handle.
|
||||
*/
|
||||
amdgpu_device_deinitialize(dev);
|
||||
return &ws->base;
|
||||
}
|
||||
} else {
|
||||
/* Create a new winsys. */
|
||||
aws = CALLOC_STRUCT(amdgpu_winsys);
|
||||
if (!aws)
|
||||
goto fail;
|
||||
|
||||
/* Create a new winsys. */
|
||||
ws = CALLOC_STRUCT(amdgpu_winsys);
|
||||
if (!ws)
|
||||
goto fail;
|
||||
aws->dev = dev;
|
||||
aws->info.drm_major = drm_major;
|
||||
aws->info.drm_minor = drm_minor;
|
||||
|
||||
ws->dev = dev;
|
||||
ws->info.drm_major = drm_major;
|
||||
ws->info.drm_minor = drm_minor;
|
||||
if (!do_winsys_init(aws, config, fd))
|
||||
goto fail_alloc;
|
||||
|
||||
if (!do_winsys_init(ws, config, fd))
|
||||
goto fail_alloc;
|
||||
/* Create managers. */
|
||||
pb_cache_init(&aws->bo_cache, RADEON_MAX_CACHED_HEAPS,
|
||||
500000, aws->check_vm ? 1.0f : 2.0f, 0,
|
||||
(aws->info.vram_size + aws->info.gart_size) / 8,
|
||||
amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
|
||||
|
||||
/* Create managers. */
|
||||
pb_cache_init(&ws->bo_cache, RADEON_MAX_CACHED_HEAPS,
|
||||
500000, ws->check_vm ? 1.0f : 2.0f, 0,
|
||||
(ws->info.vram_size + ws->info.gart_size) / 8,
|
||||
amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
|
||||
unsigned min_slab_order = 9; /* 512 bytes */
|
||||
unsigned max_slab_order = 18; /* 256 KB - higher numbers increase memory usage */
|
||||
unsigned num_slab_orders_per_allocator = (max_slab_order - min_slab_order) /
|
||||
NUM_SLAB_ALLOCATORS;
|
||||
|
||||
unsigned min_slab_order = 9; /* 512 bytes */
|
||||
unsigned max_slab_order = 18; /* 256 KB - higher numbers increase memory usage */
|
||||
unsigned num_slab_orders_per_allocator = (max_slab_order - min_slab_order) /
|
||||
NUM_SLAB_ALLOCATORS;
|
||||
/* Divide the size order range among slab managers. */
|
||||
for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
|
||||
unsigned min_order = min_slab_order;
|
||||
unsigned max_order = MIN2(min_order + num_slab_orders_per_allocator,
|
||||
max_slab_order);
|
||||
|
||||
/* Divide the size order range among slab managers. */
|
||||
for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
|
||||
unsigned min_order = min_slab_order;
|
||||
unsigned max_order = MIN2(min_order + num_slab_orders_per_allocator,
|
||||
max_slab_order);
|
||||
if (!pb_slabs_init(&aws->bo_slabs[i],
|
||||
min_order, max_order,
|
||||
RADEON_MAX_SLAB_HEAPS,
|
||||
aws,
|
||||
amdgpu_bo_can_reclaim_slab,
|
||||
amdgpu_bo_slab_alloc,
|
||||
amdgpu_bo_slab_free)) {
|
||||
amdgpu_winsys_destroy(&ws->base);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!pb_slabs_init(&ws->bo_slabs[i],
|
||||
min_order, max_order,
|
||||
RADEON_MAX_SLAB_HEAPS,
|
||||
ws,
|
||||
amdgpu_bo_can_reclaim_slab,
|
||||
amdgpu_bo_slab_alloc,
|
||||
amdgpu_bo_slab_free)) {
|
||||
min_slab_order = max_order + 1;
|
||||
}
|
||||
|
||||
aws->info.min_alloc_size = 1 << aws->bo_slabs[0].min_order;
|
||||
|
||||
/* init reference */
|
||||
pipe_reference_init(&aws->reference, 1);
|
||||
|
||||
LIST_INITHEAD(&aws->global_bo_list);
|
||||
aws->bo_export_table = util_hash_table_create(hash_pointer, compare_pointers);
|
||||
|
||||
(void) simple_mtx_init(&aws->global_bo_list_lock, mtx_plain);
|
||||
(void) simple_mtx_init(&aws->bo_fence_lock, mtx_plain);
|
||||
(void) simple_mtx_init(&aws->bo_export_table_lock, mtx_plain);
|
||||
|
||||
if (!util_queue_init(&aws->cs_queue, "cs", 8, 1,
|
||||
UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
|
||||
amdgpu_winsys_destroy(&ws->base);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
min_slab_order = max_order + 1;
|
||||
util_hash_table_set(dev_tab, dev, aws);
|
||||
|
||||
if (aws->reserve_vmid) {
|
||||
r = amdgpu_vm_reserve_vmid(dev, 0);
|
||||
if (r) {
|
||||
amdgpu_winsys_destroy(&ws->base);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ws->info.min_alloc_size = 1 << ws->bo_slabs[0].min_order;
|
||||
|
||||
/* init reference */
|
||||
pipe_reference_init(&ws->reference, 1);
|
||||
ws->aws = aws;
|
||||
|
||||
/* Set functions. */
|
||||
ws->base.unref = amdgpu_winsys_unref;
|
||||
|
|
@ -376,20 +419,6 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
amdgpu_cs_init_functions(ws);
|
||||
amdgpu_surface_init_functions(ws);
|
||||
|
||||
LIST_INITHEAD(&ws->global_bo_list);
|
||||
ws->bo_export_table = util_hash_table_create(hash_pointer, compare_pointers);
|
||||
|
||||
(void) simple_mtx_init(&ws->global_bo_list_lock, mtx_plain);
|
||||
(void) simple_mtx_init(&ws->bo_fence_lock, mtx_plain);
|
||||
(void) simple_mtx_init(&ws->bo_export_table_lock, mtx_plain);
|
||||
|
||||
if (!util_queue_init(&ws->cs_queue, "cs", 8, 1,
|
||||
UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
|
||||
amdgpu_winsys_destroy(&ws->base);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Create the screen at the end. The winsys must be initialized
|
||||
* completely.
|
||||
*
|
||||
|
|
@ -402,16 +431,6 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
util_hash_table_set(dev_tab, dev, ws);
|
||||
|
||||
if (ws->reserve_vmid) {
|
||||
r = amdgpu_vm_reserve_vmid(dev, 0);
|
||||
if (r) {
|
||||
fprintf(stderr, "amdgpu: amdgpu_vm_reserve_vmid failed. (%i)\n", r);
|
||||
goto fail_cache;
|
||||
}
|
||||
}
|
||||
|
||||
/* We must unlock the mutex once the winsys is fully initialized, so that
|
||||
* other threads attempting to create the winsys from the same fd will
|
||||
* get a fully initialized winsys and not just half-way initialized. */
|
||||
|
|
@ -419,12 +438,11 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
|
|||
|
||||
return &ws->base;
|
||||
|
||||
fail_cache:
|
||||
pb_cache_deinit(&ws->bo_cache);
|
||||
do_winsys_deinit(ws);
|
||||
fail_alloc:
|
||||
FREE(ws);
|
||||
FREE(aws);
|
||||
fail:
|
||||
close(ws->fd);
|
||||
FREE(ws);
|
||||
simple_mtx_unlock(&dev_tab_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,7 +41,6 @@ struct amdgpu_cs;
|
|||
#define NUM_SLAB_ALLOCATORS 3
|
||||
|
||||
struct amdgpu_winsys {
|
||||
struct radeon_winsys base;
|
||||
struct pipe_reference reference;
|
||||
struct pb_cache bo_cache;
|
||||
|
||||
|
|
@ -94,12 +93,24 @@ struct amdgpu_winsys {
|
|||
simple_mtx_t bo_export_table_lock;
|
||||
};
|
||||
|
||||
struct amdgpu_screen_winsys {
|
||||
struct radeon_winsys base;
|
||||
struct amdgpu_winsys *aws;
|
||||
int fd;
|
||||
};
|
||||
|
||||
static inline struct amdgpu_screen_winsys *
|
||||
amdgpu_screen_winsys(struct radeon_winsys *base)
|
||||
{
|
||||
return (struct amdgpu_screen_winsys*)base;
|
||||
}
|
||||
|
||||
static inline struct amdgpu_winsys *
|
||||
amdgpu_winsys(struct radeon_winsys *base)
|
||||
{
|
||||
return (struct amdgpu_winsys*)base;
|
||||
return amdgpu_screen_winsys(base)->aws;
|
||||
}
|
||||
|
||||
void amdgpu_surface_init_functions(struct amdgpu_winsys *ws);
|
||||
void amdgpu_surface_init_functions(struct amdgpu_screen_winsys *ws);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue