diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index 160263b5e03..3185d129991 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -2386,7 +2386,7 @@ radv_queue_finish(struct radv_queue *queue) static void radv_bo_list_init(struct radv_bo_list *bo_list) { - pthread_rwlock_init(&bo_list->rwlock, NULL); + u_rwlock_init(&bo_list->rwlock); bo_list->list.count = bo_list->capacity = 0; bo_list->list.bos = NULL; } @@ -2395,7 +2395,7 @@ static void radv_bo_list_finish(struct radv_bo_list *bo_list) { free(bo_list->list.bos); - pthread_rwlock_destroy(&bo_list->rwlock); + u_rwlock_destroy(&bo_list->rwlock); } VkResult radv_bo_list_add(struct radv_device *device, @@ -2409,13 +2409,13 @@ VkResult radv_bo_list_add(struct radv_device *device, if (unlikely(!device->use_global_bo_list)) return VK_SUCCESS; - pthread_rwlock_wrlock(&bo_list->rwlock); + u_rwlock_wrlock(&bo_list->rwlock); if (bo_list->list.count == bo_list->capacity) { unsigned capacity = MAX2(4, bo_list->capacity * 2); void *data = realloc(bo_list->list.bos, capacity * sizeof(struct radeon_winsys_bo*)); if (!data) { - pthread_rwlock_unlock(&bo_list->rwlock); + u_rwlock_wrunlock(&bo_list->rwlock); return VK_ERROR_OUT_OF_HOST_MEMORY; } @@ -2424,7 +2424,7 @@ VkResult radv_bo_list_add(struct radv_device *device, } bo_list->list.bos[bo_list->list.count++] = bo; - pthread_rwlock_unlock(&bo_list->rwlock); + u_rwlock_wrunlock(&bo_list->rwlock); return VK_SUCCESS; } @@ -2439,7 +2439,7 @@ void radv_bo_list_remove(struct radv_device *device, if (unlikely(!device->use_global_bo_list)) return; - pthread_rwlock_wrlock(&bo_list->rwlock); + u_rwlock_wrlock(&bo_list->rwlock); /* Loop the list backwards so we find the most recently added * memory first. */ for(unsigned i = bo_list->list.count; i-- > 0;) { @@ -2449,7 +2449,7 @@ void radv_bo_list_remove(struct radv_device *device, break; } } - pthread_rwlock_unlock(&bo_list->rwlock); + u_rwlock_wrunlock(&bo_list->rwlock); } static void @@ -4574,7 +4574,7 @@ radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission, sem_info.cs_emit_signal = j + advance == submission->cmd_buffer_count; if (unlikely(queue->device->use_global_bo_list)) { - pthread_rwlock_rdlock(&queue->device->bo_list.rwlock); + u_rwlock_rdlock(&queue->device->bo_list.rwlock); bo_list = &queue->device->bo_list.list; } @@ -4584,7 +4584,7 @@ radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission, can_patch, base_fence); if (unlikely(queue->device->use_global_bo_list)) - pthread_rwlock_unlock(&queue->device->bo_list.rwlock); + u_rwlock_rdunlock(&queue->device->bo_list.rwlock); if (result != VK_SUCCESS) goto fail; diff --git a/src/amd/vulkan/radv_private.h b/src/amd/vulkan/radv_private.h index 5bb6bbfae8a..eb651b547fa 100644 --- a/src/amd/vulkan/radv_private.h +++ b/src/amd/vulkan/radv_private.h @@ -48,6 +48,7 @@ #include "compiler/shader_enums.h" #include "util/macros.h" #include "util/list.h" +#include "util/rwlock.h" #include "util/xmlconfig.h" #include "vk_alloc.h" #include "vk_debug_report.h" @@ -741,7 +742,7 @@ struct radv_queue { struct radv_bo_list { struct radv_winsys_bo_list list; unsigned capacity; - pthread_rwlock_t rwlock; + struct u_rwlock rwlock; }; VkResult radv_bo_list_add(struct radv_device *device, diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c index a9fbb929e92..ee274552355 100644 --- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c +++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c @@ -297,10 +297,10 @@ static void radv_amdgpu_winsys_bo_destroy(struct radeon_winsys_bo *_bo) free(bo->ranges); } else { if (bo->ws->debug_all_bos) { - pthread_rwlock_wrlock(&bo->ws->global_bo_list_lock); + u_rwlock_wrlock(&bo->ws->global_bo_list_lock); list_del(&bo->global_list_item); bo->ws->num_buffers--; - pthread_rwlock_unlock(&bo->ws->global_bo_list_lock); + u_rwlock_wrunlock(&bo->ws->global_bo_list_lock); } radv_amdgpu_bo_va_op(bo->ws, bo->bo, 0, bo->size, bo->base.va, 0, 0, AMDGPU_VA_OP_UNMAP); @@ -330,10 +330,10 @@ static void radv_amdgpu_add_buffer_to_global_list(struct radv_amdgpu_winsys_bo * struct radv_amdgpu_winsys *ws = bo->ws; if (bo->ws->debug_all_bos) { - pthread_rwlock_wrlock(&ws->global_bo_list_lock); + u_rwlock_wrlock(&ws->global_bo_list_lock); list_addtail(&bo->global_list_item, &ws->global_bo_list); ws->num_buffers++; - pthread_rwlock_unlock(&ws->global_bo_list_lock); + u_rwlock_wrunlock(&ws->global_bo_list_lock); } } diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c index c66bf3ec1c9..76218dccfbd 100644 --- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c +++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c @@ -920,7 +920,7 @@ radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx, } if (aws->debug_all_bos) - pthread_rwlock_rdlock(&aws->global_bo_list_lock); + u_rwlock_rdlock(&aws->global_bo_list_lock); /* Get the BO list. */ result = radv_amdgpu_get_bo_list(cs0->ws, cs_array, cs_count, NULL, 0, @@ -961,7 +961,7 @@ radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx, fail: if (aws->debug_all_bos) - pthread_rwlock_unlock(&aws->global_bo_list_lock); + u_rwlock_rdunlock(&aws->global_bo_list_lock); return result; } @@ -995,7 +995,7 @@ radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx, number_of_ibs = cs_count + !!initial_preamble_cs; if (aws->debug_all_bos) - pthread_rwlock_rdlock(&aws->global_bo_list_lock); + u_rwlock_rdlock(&aws->global_bo_list_lock); /* Get the BO list. */ result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0, @@ -1051,7 +1051,7 @@ radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx, fail: if (aws->debug_all_bos) - pthread_rwlock_unlock(&aws->global_bo_list_lock); + u_rwlock_rdunlock(&aws->global_bo_list_lock); return result; } @@ -1209,7 +1209,7 @@ radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx, } if (aws->debug_all_bos) - pthread_rwlock_rdlock(&aws->global_bo_list_lock); + u_rwlock_rdlock(&aws->global_bo_list_lock); result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[i], cnt, (struct radv_amdgpu_winsys_bo **)bos, @@ -1220,7 +1220,7 @@ radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx, free(ibs); free(bos); if (aws->debug_all_bos) - pthread_rwlock_unlock(&aws->global_bo_list_lock); + u_rwlock_rdunlock(&aws->global_bo_list_lock); return result; } @@ -1239,7 +1239,7 @@ radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx, free(request.handles); if (aws->debug_all_bos) - pthread_rwlock_unlock(&aws->global_bo_list_lock); + u_rwlock_rdunlock(&aws->global_bo_list_lock); for (unsigned j = 0; j < number_of_ibs; j++) { ws->buffer_destroy(bos[j]); @@ -1310,17 +1310,17 @@ static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr) } } if(cs->ws->debug_all_bos) { - pthread_rwlock_rdlock(&cs->ws->global_bo_list_lock); + u_rwlock_rdlock(&cs->ws->global_bo_list_lock); list_for_each_entry(struct radv_amdgpu_winsys_bo, bo, &cs->ws->global_bo_list, global_list_item) { if (addr >= bo->base.va && addr - bo->base.va < bo->size) { if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) { - pthread_rwlock_unlock(&cs->ws->global_bo_list_lock); + u_rwlock_rdunlock(&cs->ws->global_bo_list_lock); return (char *)ret + (addr - bo->base.va); } } } - pthread_rwlock_unlock(&cs->ws->global_bo_list_lock); + u_rwlock_rdunlock(&cs->ws->global_bo_list_lock); } return ret; } diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_winsys.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_winsys.c index ebcd39efd53..c1a738a2b7f 100644 --- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_winsys.c +++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_winsys.c @@ -166,7 +166,7 @@ static void radv_amdgpu_winsys_destroy(struct radeon_winsys *rws) amdgpu_cs_destroy_syncobj(ws->dev, ws->syncobj[i]); free(ws->syncobj); - pthread_rwlock_destroy(&ws->global_bo_list_lock); + u_rwlock_destroy(&ws->global_bo_list_lock); ac_addrlib_destroy(ws->addrlib); amdgpu_device_deinitialize(ws->dev); FREE(rws); @@ -201,7 +201,7 @@ radv_amdgpu_winsys_create(int fd, uint64_t debug_flags, uint64_t perftest_flags) ws->zero_all_vram_allocs = debug_flags & RADV_DEBUG_ZERO_VRAM; ws->use_llvm = debug_flags & RADV_DEBUG_LLVM; list_inithead(&ws->global_bo_list); - pthread_rwlock_init(&ws->global_bo_list_lock, NULL); + u_rwlock_init(&ws->global_bo_list_lock); pthread_mutex_init(&ws->syncobj_lock, NULL); ws->base.query_info = radv_amdgpu_winsys_query_info; ws->base.query_value = radv_amdgpu_winsys_query_value; diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_winsys.h b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_winsys.h index b2c5006cf10..7807c4d947f 100644 --- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_winsys.h +++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_winsys.h @@ -32,6 +32,7 @@ #include "ac_gpu_info.h" #include #include "util/list.h" +#include "util/rwlock.h" #include struct radv_amdgpu_winsys { @@ -49,7 +50,7 @@ struct radv_amdgpu_winsys { bool use_llvm; unsigned num_buffers; - pthread_rwlock_t global_bo_list_lock; + struct u_rwlock global_bo_list_lock; struct list_head global_bo_list; uint64_t allocated_vram; diff --git a/src/util/meson.build b/src/util/meson.build index a14c0137383..7c0e37cf1ed 100644 --- a/src/util/meson.build +++ b/src/util/meson.build @@ -72,6 +72,7 @@ files_mesa_util = files( 'os_socket.h', 'u_process.c', 'u_process.h', + 'rwlock.h', 'sha1/sha1.c', 'sha1/sha1.h', 'ralloc.c', diff --git a/src/util/rwlock.h b/src/util/rwlock.h new file mode 100644 index 00000000000..695241179b8 --- /dev/null +++ b/src/util/rwlock.h @@ -0,0 +1,113 @@ +/************************************************************************** + * + * Copyright 2020 Lag Free Games, LLC + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef RWLOCK_H +#define RWLOCK_H + +#ifdef _WIN32 +#include +#else +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +struct u_rwlock +{ +#ifdef _WIN32 + SRWLOCK rwlock; +#else + pthread_rwlock_t rwlock; +#endif +}; + +static inline int u_rwlock_init(struct u_rwlock *rwlock) +{ +#ifdef _WIN32 + InitializeSRWLock(&rwlock->rwlock); + return 0; +#else + return pthread_rwlock_init(&rwlock->rwlock, NULL); +#endif +} + +static inline int u_rwlock_destroy(struct u_rwlock *rwlock) +{ +#ifdef _WIN32 + return 0; +#else + return pthread_rwlock_destroy(&rwlock->rwlock); +#endif +} + +static inline int u_rwlock_rdlock(struct u_rwlock *rwlock) +{ +#ifdef _WIN32 + AcquireSRWLockShared(&rwlock->rwlock); + return 0; +#else + return pthread_rwlock_rdlock(&rwlock->rwlock); +#endif +} + +static inline int u_rwlock_rdunlock(struct u_rwlock *rwlock) +{ +#ifdef _WIN32 + ReleaseSRWLockShared(&rwlock->rwlock); + return 0; +#else + return pthread_rwlock_unlock(&rwlock->rwlock); +#endif +} + +static inline int u_rwlock_wrlock(struct u_rwlock *rwlock) +{ +#ifdef _WIN32 + AcquireSRWLockExclusive(&rwlock->rwlock); + return 0; +#else + return pthread_rwlock_wrlock(&rwlock->rwlock); +#endif +} + +static inline int u_rwlock_wrunlock(struct u_rwlock *rwlock) +{ +#ifdef _WIN32 + ReleaseSRWLockExclusive(&rwlock->rwlock); + return 0; +#else + return pthread_rwlock_unlock(&rwlock->rwlock); +#endif +} + +#ifdef __cplusplus +} +#endif + +#endif