mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-06 20:18:12 +02:00
winsys/amdgpu: call userq wait ioctl only once
Allocate to hold 256 wait fences. Since there is only one queue per per ip per process, the idea is that there won't be app or windowing system that would have large number of job dependencies / wait fences. If there is an app that has wait fences greater than 256, there won't be corruption issues since kernel will wait for the extra fences. Reviewed-by: Marek Olšák <marek.olsak@amd.com> Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/40698>
This commit is contained in:
parent
5df6fc5481
commit
21047e61a5
1 changed files with 5 additions and 11 deletions
|
|
@ -1657,7 +1657,9 @@ static int amdgpu_cs_submit_ib_userq(struct amdgpu_userq *userq,
|
|||
}
|
||||
syncobj_signal_list[num_syncobj_to_signal - 1] = ((struct amdgpu_fence*)csc->fence)->syncobj;
|
||||
|
||||
struct drm_amdgpu_userq_fence_info *fence_info;
|
||||
uint16_t num_wait_fences = 256;
|
||||
struct drm_amdgpu_userq_fence_info *fence_info = (struct drm_amdgpu_userq_fence_info*)
|
||||
alloca(num_wait_fences * sizeof(struct drm_amdgpu_userq_fence_info));
|
||||
struct drm_amdgpu_userq_wait userq_wait_data = {
|
||||
.waitq_id = userq->userq_handle,
|
||||
.syncobj_handles = (uintptr_t)syncobj_dependencies_list,
|
||||
|
|
@ -1668,11 +1670,11 @@ static int amdgpu_cs_submit_ib_userq(struct amdgpu_userq *userq,
|
|||
/* Wait for previous writes to complete before reading from these BOs. */
|
||||
.bo_write_handles = num_shared_buf_read ? (uintptr_t)shared_buf_kms_handles_read : 0,
|
||||
.num_syncobj_timeline_handles = num_syncobj_timeline_dependencies,
|
||||
.num_fences = 0,
|
||||
.num_fences = num_wait_fences,
|
||||
.num_syncobj_handles = num_syncobj_dependencies,
|
||||
.num_bo_read_handles = num_shared_buf_write,
|
||||
.num_bo_write_handles = num_shared_buf_read,
|
||||
.out_fences = (uintptr_t)NULL,
|
||||
.out_fences = (uintptr_t)fence_info,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -1682,14 +1684,6 @@ static int amdgpu_cs_submit_ib_userq(struct amdgpu_userq *userq,
|
|||
* To implement this strategy, we use amdgpu_userq_wait() before submitting
|
||||
* a job, and amdgpu_userq_signal() after to indicate completion.
|
||||
*/
|
||||
r = ac_drm_userq_wait(aws->dev, &userq_wait_data);
|
||||
if (r)
|
||||
mesa_loge("amdgpu: getting wait num_fences failed\n");
|
||||
|
||||
fence_info = (struct drm_amdgpu_userq_fence_info*)
|
||||
alloca(userq_wait_data.num_fences * sizeof(struct drm_amdgpu_userq_fence_info));
|
||||
userq_wait_data.out_fences = (uintptr_t)fence_info;
|
||||
|
||||
r = ac_drm_userq_wait(aws->dev, &userq_wait_data);
|
||||
if (r)
|
||||
mesa_loge("amdgpu: getting wait fences failed\n");
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue