winsys/amdgpu: sws instead of ws for amdgpu_screen_winsys

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/27968>
This commit is contained in:
Yogesh Mohan Marimuthu 2024-03-04 09:07:04 +05:30 committed by Marge Bot
parent 92879e9da8
commit f2275eed44
7 changed files with 111 additions and 111 deletions

View file

@ -189,7 +189,7 @@ void amdgpu_bo_destroy(struct amdgpu_winsys *ws, struct pb_buffer_lean *_buf)
if (!bo->is_user_ptr && bo->cpu_ptr) {
bo->cpu_ptr = NULL;
amdgpu_bo_unmap(&ws->dummy_ws.base, &bo->b.base);
amdgpu_bo_unmap(&ws->dummy_sws.base, &bo->b.base);
}
assert(bo->is_user_ptr || bo->map_count == 0);
@ -628,7 +628,7 @@ error_bo_alloc:
bool amdgpu_bo_can_reclaim(struct amdgpu_winsys *ws, struct pb_buffer_lean *_buf)
{
return amdgpu_bo_wait(&ws->dummy_ws.base, _buf, 0, RADEON_USAGE_READWRITE);
return amdgpu_bo_wait(&ws->dummy_sws.base, _buf, 0, RADEON_USAGE_READWRITE);
}
bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
@ -1795,23 +1795,23 @@ static void amdgpu_buffer_destroy(struct radeon_winsys *ws, struct pb_buffer_lea
amdgpu_bo_destroy_or_cache(ws, buf);
}
void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws)
void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *sws)
{
ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
ws->base.buffer_map = amdgpu_bo_map;
ws->base.buffer_unmap = amdgpu_bo_unmap;
ws->base.buffer_wait = amdgpu_bo_wait;
ws->base.buffer_create = amdgpu_buffer_create;
ws->base.buffer_destroy = amdgpu_buffer_destroy;
ws->base.buffer_from_handle = amdgpu_bo_from_handle;
ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
ws->base.buffer_is_suballocated = amdgpu_bo_is_suballocated;
ws->base.buffer_get_handle = amdgpu_bo_get_handle;
ws->base.buffer_commit = amdgpu_bo_sparse_commit;
ws->base.buffer_find_next_committed_memory = amdgpu_bo_find_next_committed_memory;
ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
ws->base.buffer_get_flags = amdgpu_bo_get_flags;
sws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
sws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
sws->base.buffer_map = amdgpu_bo_map;
sws->base.buffer_unmap = amdgpu_bo_unmap;
sws->base.buffer_wait = amdgpu_bo_wait;
sws->base.buffer_create = amdgpu_buffer_create;
sws->base.buffer_destroy = amdgpu_buffer_destroy;
sws->base.buffer_from_handle = amdgpu_bo_from_handle;
sws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
sws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
sws->base.buffer_is_suballocated = amdgpu_bo_is_suballocated;
sws->base.buffer_get_handle = amdgpu_bo_get_handle;
sws->base.buffer_commit = amdgpu_bo_sparse_commit;
sws->base.buffer_find_next_committed_memory = amdgpu_bo_find_next_committed_memory;
sws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
sws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
sws->base.buffer_get_flags = amdgpu_bo_get_flags;
}

View file

@ -268,7 +268,7 @@ void *amdgpu_bo_map(struct radeon_winsys *rws,
struct radeon_cmdbuf *rcs,
enum pipe_map_flags usage);
void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer_lean *buf);
void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws);
void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *sws);
bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, unsigned entry_size,
@ -286,7 +286,7 @@ static inline void
amdgpu_winsys_bo_reference(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo **dst,
struct amdgpu_winsys_bo *src)
{
radeon_bo_reference(&ws->dummy_ws.base,
radeon_bo_reference(&ws->dummy_sws.base,
(struct pb_buffer_lean**)dst, (struct pb_buffer_lean*)src);
}
@ -301,7 +301,7 @@ amdgpu_winsys_bo_set_reference(struct amdgpu_winsys_bo **dst, struct amdgpu_wins
static inline void
amdgpu_winsys_bo_drop_reference(struct amdgpu_winsys *ws, struct amdgpu_winsys_bo *dst)
{
radeon_bo_drop_reference(&ws->dummy_ws.base, &dst->base);
radeon_bo_drop_reference(&ws->dummy_sws.base, &dst->base);
}
#ifdef __cplusplus

View file

@ -707,14 +707,14 @@ static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws,
if (!pb)
return false;
mapped = (uint8_t*)amdgpu_bo_map(&ws->dummy_ws.base, pb, NULL, PIPE_MAP_WRITE);
mapped = (uint8_t*)amdgpu_bo_map(&ws->dummy_sws.base, pb, NULL, PIPE_MAP_WRITE);
if (!mapped) {
radeon_bo_reference(&ws->dummy_ws.base, &pb, NULL);
radeon_bo_reference(&ws->dummy_sws.base, &pb, NULL);
return false;
}
radeon_bo_reference(&ws->dummy_ws.base, &main_ib->big_buffer, pb);
radeon_bo_reference(&ws->dummy_ws.base, &pb, NULL);
radeon_bo_reference(&ws->dummy_sws.base, &main_ib->big_buffer, pb);
radeon_bo_reference(&ws->dummy_sws.base, &pb, NULL);
main_ib->gpu_address = amdgpu_bo_get_va(main_ib->big_buffer);
main_ib->big_buffer_cpu_ptr = mapped;
@ -992,10 +992,10 @@ amdgpu_cs_setup_preemption(struct radeon_cmdbuf *rcs, const uint32_t *preamble_i
if (!preamble_bo)
return false;
map = (uint32_t*)amdgpu_bo_map(&ws->dummy_ws.base, preamble_bo, NULL,
map = (uint32_t*)amdgpu_bo_map(&ws->dummy_sws.base, preamble_bo, NULL,
(pipe_map_flags)(PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY));
if (!map) {
radeon_bo_reference(&ws->dummy_ws.base, &preamble_bo, NULL);
radeon_bo_reference(&ws->dummy_sws.base, &preamble_bo, NULL);
return false;
}
@ -1004,7 +1004,7 @@ amdgpu_cs_setup_preemption(struct radeon_cmdbuf *rcs, const uint32_t *preamble_i
/* Pad the IB. */
amdgpu_pad_gfx_compute_ib(ws, cs->ip_type, map, &preamble_num_dw, 0);
amdgpu_bo_unmap(&ws->dummy_ws.base, preamble_bo);
amdgpu_bo_unmap(&ws->dummy_sws.base, preamble_bo);
for (unsigned i = 0; i < 2; i++) {
csc[i]->chunk_ib[IB_PREAMBLE].va_start = amdgpu_bo_get_va(preamble_bo);
@ -1857,8 +1857,8 @@ static void amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
amdgpu_cs_sync_flush(rcs);
util_queue_fence_destroy(&cs->flush_completed);
p_atomic_dec(&cs->ws->num_cs);
radeon_bo_reference(&cs->ws->dummy_ws.base, &cs->preamble_ib_bo, NULL);
radeon_bo_reference(&cs->ws->dummy_ws.base, &cs->main_ib.big_buffer, NULL);
radeon_bo_reference(&cs->ws->dummy_sws.base, &cs->preamble_ib_bo, NULL);
radeon_bo_reference(&cs->ws->dummy_sws.base, &cs->main_ib.big_buffer, NULL);
FREE(rcs->prev);
amdgpu_destroy_cs_context(cs->ws, &cs->csc1);
amdgpu_destroy_cs_context(cs->ws, &cs->csc2);
@ -1893,33 +1893,33 @@ static void amdgpu_winsys_fence_reference(struct radeon_winsys *rws,
amdgpu_fence_reference(dst, src);
}
void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws)
void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *sws)
{
ws->base.ctx_create = amdgpu_ctx_create;
ws->base.ctx_destroy = amdgpu_ctx_destroy;
ws->base.ctx_set_sw_reset_status = amdgpu_ctx_set_sw_reset_status;
ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
ws->base.cs_create = amdgpu_cs_create;
ws->base.cs_setup_preemption = amdgpu_cs_setup_preemption;
ws->base.cs_destroy = amdgpu_cs_destroy;
ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
ws->base.cs_validate = amdgpu_cs_validate;
ws->base.cs_check_space = amdgpu_cs_check_space;
ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
ws->base.cs_flush = amdgpu_cs_flush;
ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
ws->base.cs_add_fence_dependency = amdgpu_cs_add_fence_dependency;
ws->base.cs_add_syncobj_signal = amdgpu_cs_add_syncobj_signal;
ws->base.cs_get_ip_type = amdgpu_cs_get_ip_type;
ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
ws->base.fence_reference = amdgpu_winsys_fence_reference;
ws->base.fence_import_syncobj = amdgpu_fence_import_syncobj;
ws->base.fence_import_sync_file = amdgpu_fence_import_sync_file;
ws->base.fence_export_sync_file = amdgpu_fence_export_sync_file;
ws->base.export_signalled_sync_file = amdgpu_export_signalled_sync_file;
sws->base.ctx_create = amdgpu_ctx_create;
sws->base.ctx_destroy = amdgpu_ctx_destroy;
sws->base.ctx_set_sw_reset_status = amdgpu_ctx_set_sw_reset_status;
sws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
sws->base.cs_create = amdgpu_cs_create;
sws->base.cs_setup_preemption = amdgpu_cs_setup_preemption;
sws->base.cs_destroy = amdgpu_cs_destroy;
sws->base.cs_add_buffer = amdgpu_cs_add_buffer;
sws->base.cs_validate = amdgpu_cs_validate;
sws->base.cs_check_space = amdgpu_cs_check_space;
sws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
sws->base.cs_flush = amdgpu_cs_flush;
sws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
sws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
sws->base.cs_sync_flush = amdgpu_cs_sync_flush;
sws->base.cs_add_fence_dependency = amdgpu_cs_add_fence_dependency;
sws->base.cs_add_syncobj_signal = amdgpu_cs_add_syncobj_signal;
sws->base.cs_get_ip_type = amdgpu_cs_get_ip_type;
sws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
sws->base.fence_reference = amdgpu_winsys_fence_reference;
sws->base.fence_import_syncobj = amdgpu_fence_import_syncobj;
sws->base.fence_import_sync_file = amdgpu_fence_import_sync_file;
sws->base.fence_export_sync_file = amdgpu_fence_export_sync_file;
sws->base.export_signalled_sync_file = amdgpu_export_signalled_sync_file;
if (ws->aws->info.has_fw_based_shadowing)
ws->base.cs_set_mcbp_reg_shadowing_va = amdgpu_cs_set_mcbp_reg_shadowing_va;
if (sws->aws->info.has_fw_based_shadowing)
sws->base.cs_set_mcbp_reg_shadowing_va = amdgpu_cs_set_mcbp_reg_shadowing_va;
}

View file

@ -269,7 +269,7 @@ amdgpu_bo_is_referenced_by_cs_with_usage(struct amdgpu_cs *cs,
bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
bool absolute);
void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs);
void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws);
void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *sws);
#ifdef __cplusplus
}

View file

@ -91,7 +91,7 @@ static int amdgpu_surface_init(struct radeon_winsys *rws,
return ac_compute_surface(ws->addrlib, info, &config, mode, surf);
}
void amdgpu_surface_init_functions(struct amdgpu_screen_winsys *ws)
void amdgpu_surface_init_functions(struct amdgpu_screen_winsys *sws)
{
ws->base.surface_init = amdgpu_surface_init;
sws->base.surface_init = amdgpu_surface_init;
}

View file

@ -359,18 +359,18 @@ PUBLIC struct radeon_winsys *
amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
radeon_screen_create_t screen_create)
{
struct amdgpu_screen_winsys *ws;
struct amdgpu_screen_winsys *sws;
struct amdgpu_winsys *aws;
amdgpu_device_handle dev;
uint32_t drm_major, drm_minor;
int r;
ws = CALLOC_STRUCT(amdgpu_screen_winsys);
if (!ws)
sws = CALLOC_STRUCT(amdgpu_screen_winsys);
if (!sws)
return NULL;
pipe_reference_init(&ws->reference, 1);
ws->fd = os_dupfd_cloexec(fd);
pipe_reference_init(&sws->reference, 1);
sws->fd = os_dupfd_cloexec(fd);
/* Look up the winsys from the dev table. */
simple_mtx_lock(&dev_tab_mutex);
@ -379,7 +379,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
/* Initialize the amdgpu device. This should always return the same pointer
* for the same fd. */
r = amdgpu_device_initialize(ws->fd, &drm_major, &drm_minor, &dev);
r = amdgpu_device_initialize(sws->fd, &drm_major, &drm_minor, &dev);
if (r) {
fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
goto fail;
@ -398,20 +398,20 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
simple_mtx_lock(&aws->sws_list_lock);
for (sws_iter = aws->sws_list; sws_iter; sws_iter = sws_iter->next) {
if (are_file_descriptions_equal(sws_iter->fd, ws->fd)) {
close(ws->fd);
FREE(ws);
ws = sws_iter;
pipe_reference(NULL, &ws->reference);
if (are_file_descriptions_equal(sws_iter->fd, sws->fd)) {
close(sws->fd);
FREE(sws);
sws = sws_iter;
pipe_reference(NULL, &sws->reference);
simple_mtx_unlock(&aws->sws_list_lock);
goto unlock;
}
}
simple_mtx_unlock(&aws->sws_list_lock);
ws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
sws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
kms_handle_equals);
if (!ws->kms_handles)
if (!sws->kms_handles)
goto fail;
pipe_reference(NULL, &aws->reference);
@ -429,9 +429,9 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
*/
int device_fd = amdgpu_device_get_fd(dev);
if (!are_file_descriptions_equal(device_fd, fd)) {
ws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
kms_handle_equals);
if (!ws->kms_handles)
sws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
kms_handle_equals);
if (!sws->kms_handles)
goto fail;
/* We could avoid storing the fd and use amdgpu_device_get_fd() where
* we need it but we'd have to use os_same_file_description() to
@ -439,14 +439,14 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
*/
aws->fd = device_fd;
} else {
aws->fd = ws->fd;
aws->fd = sws->fd;
}
aws->info.drm_major = drm_major;
aws->info.drm_minor = drm_minor;
/* Only aws and buffer functions are used. */
aws->dummy_ws.aws = aws;
amdgpu_bo_init_functions(&aws->dummy_ws);
aws->dummy_sws.aws = aws;
amdgpu_bo_init_functions(&aws->dummy_sws);
if (!do_winsys_init(aws, config, fd))
goto fail_alloc;
@ -470,7 +470,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
/* Cast to void* because one of the function parameters
* is a struct pointer instead of void*. */
(void*)amdgpu_bo_slab_free)) {
amdgpu_winsys_destroy(&ws->base);
amdgpu_winsys_destroy(&sws->base);
simple_mtx_unlock(&dev_tab_mutex);
return NULL;
}
@ -493,7 +493,7 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
if (!util_queue_init(&aws->cs_queue, "cs", 8, 1,
UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL)) {
amdgpu_winsys_destroy(&ws->base);
amdgpu_winsys_destroy(&sws->base);
simple_mtx_unlock(&dev_tab_mutex);
return NULL;
}
@ -503,34 +503,34 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
if (aws->reserve_vmid) {
r = amdgpu_vm_reserve_vmid(dev, 0);
if (r) {
amdgpu_winsys_destroy(&ws->base);
amdgpu_winsys_destroy(&sws->base);
simple_mtx_unlock(&dev_tab_mutex);
return NULL;
}
}
}
ws->aws = aws;
sws->aws = aws;
/* Set functions. */
ws->base.unref = amdgpu_winsys_unref;
ws->base.destroy = amdgpu_winsys_destroy;
ws->base.get_fd = amdgpu_drm_winsys_get_fd;
ws->base.query_info = amdgpu_winsys_query_info;
ws->base.cs_request_feature = amdgpu_cs_request_feature;
ws->base.query_value = amdgpu_query_value;
ws->base.read_registers = amdgpu_read_registers;
ws->base.pin_threads_to_L3_cache = amdgpu_pin_threads_to_L3_cache;
ws->base.cs_is_secure = amdgpu_cs_is_secure;
ws->base.cs_set_pstate = amdgpu_cs_set_pstate;
sws->base.unref = amdgpu_winsys_unref;
sws->base.destroy = amdgpu_winsys_destroy;
sws->base.get_fd = amdgpu_drm_winsys_get_fd;
sws->base.query_info = amdgpu_winsys_query_info;
sws->base.cs_request_feature = amdgpu_cs_request_feature;
sws->base.query_value = amdgpu_query_value;
sws->base.read_registers = amdgpu_read_registers;
sws->base.pin_threads_to_L3_cache = amdgpu_pin_threads_to_L3_cache;
sws->base.cs_is_secure = amdgpu_cs_is_secure;
sws->base.cs_set_pstate = amdgpu_cs_set_pstate;
amdgpu_bo_init_functions(ws);
amdgpu_cs_init_functions(ws);
amdgpu_surface_init_functions(ws);
amdgpu_bo_init_functions(sws);
amdgpu_cs_init_functions(sws);
amdgpu_surface_init_functions(sws);
simple_mtx_lock(&aws->sws_list_lock);
ws->next = aws->sws_list;
aws->sws_list = ws;
sws->next = aws->sws_list;
aws->sws_list = sws;
simple_mtx_unlock(&aws->sws_list_lock);
/* Create the screen at the end. The winsys must be initialized
@ -538,9 +538,9 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
*
* Alternatively, we could create the screen based on "ws->gen"
* and link all drivers into one binary blob. */
ws->base.screen = screen_create(&ws->base, config);
if (!ws->base.screen) {
amdgpu_winsys_destroy_locked(&ws->base, true);
sws->base.screen = screen_create(&sws->base, config);
if (!sws->base.screen) {
amdgpu_winsys_destroy_locked(&sws->base, true);
simple_mtx_unlock(&dev_tab_mutex);
return NULL;
}
@ -551,15 +551,15 @@ unlock:
* get a fully initialized winsys and not just half-way initialized. */
simple_mtx_unlock(&dev_tab_mutex);
return &ws->base;
return &sws->base;
fail_alloc:
FREE(aws);
fail:
if (ws->kms_handles)
_mesa_hash_table_destroy(ws->kms_handles, NULL);
close(ws->fd);
FREE(ws);
if (sws->kms_handles)
_mesa_hash_table_destroy(sws->kms_handles, NULL);
close(sws->fd);
FREE(sws);
simple_mtx_unlock(&dev_tab_mutex);
return NULL;
}

View file

@ -202,10 +202,10 @@ struct amdgpu_winsys {
struct hash_table *bo_export_table;
simple_mtx_t bo_export_table_lock;
/* Since most winsys functions require struct radeon_winsys *, dummy_ws.base is used
/* Since most winsys functions require struct radeon_winsys *, dummy_sws.base is used
* for invoking them because sws_list can be NULL.
*/
struct amdgpu_screen_winsys dummy_ws;
struct amdgpu_screen_winsys dummy_sws;
};
static inline struct amdgpu_screen_winsys *
@ -220,6 +220,6 @@ amdgpu_winsys(struct radeon_winsys *base)
return amdgpu_screen_winsys(base)->aws;
}
void amdgpu_surface_init_functions(struct amdgpu_screen_winsys *ws);
void amdgpu_surface_init_functions(struct amdgpu_screen_winsys *sws);
#endif