radv/amdgpu: Remove command submission with scheduled dependencies.

These are no longer used by any part of RADV, so we
can just safely delete it.

Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20010>
This commit is contained in:
Timur Kristóf 2022-09-22 12:38:12 -07:00 committed by Marge Bot
parent 7cb128ffb1
commit 97d3377b56
3 changed files with 7 additions and 48 deletions

View file

@ -5579,7 +5579,7 @@ radv_queue_submit_empty(struct radv_queue *queue, struct vk_queue_submit *submis
.queue_index = queue->vk.index_in_family,
};
return queue->device->ws->cs_submit(ctx, 1, &submit, submission->wait_count, submission->waits,
return queue->device->ws->cs_submit(ctx, &submit, submission->wait_count, submission->waits,
submission->signal_count, submission->signals, false);
}
@ -5711,7 +5711,7 @@ radv_queue_submit_normal(struct radv_queue *queue, struct vk_queue_submit *submi
submit.preamble_count = submit_ace ? 4 : 1;
result = queue->device->ws->cs_submit(
ctx, 1, &submit, j == 0 ? submission->wait_count : 0, submission->waits,
ctx, &submit, j == 0 ? submission->wait_count : 0, submission->waits,
last_submit ? submission->signal_count : 0, submission->signals, can_patch);
if (result != VK_SUCCESS)
@ -5781,7 +5781,7 @@ radv_queue_internal_submit(struct radv_queue *queue, struct radeon_cmdbuf *cs)
.cs_count = 1,
};
VkResult result = queue->device->ws->cs_submit(ctx, 1, &submit, 0, NULL, 0, NULL, false);
VkResult result = queue->device->ws->cs_submit(ctx, &submit, 0, NULL, 0, NULL, false);
if (result != VK_SUCCESS)
return false;

View file

@ -283,7 +283,7 @@ struct radeon_winsys {
void (*cs_grow)(struct radeon_cmdbuf *cs, size_t min_size);
VkResult (*cs_submit)(struct radeon_winsys_ctx *ctx, uint32_t submit_count,
VkResult (*cs_submit)(struct radeon_winsys_ctx *ctx,
const struct radv_winsys_submit_info *submits, uint32_t wait_count,
const struct vk_sync_wait *waits, uint32_t signal_count,
const struct vk_sync_signal *signals, bool can_patch);

View file

@ -104,11 +104,6 @@ struct radv_winsys_sem_info {
bool cs_emit_wait;
struct radv_winsys_sem_counts wait;
struct radv_winsys_sem_counts signal;
/* Expresses a scheduled dependency, meaning that the sumbission of the
* referenced fence must be scheduled before the current submission.
*/
struct radv_amdgpu_fence *scheduled_dependency;
};
static uint32_t radv_amdgpu_ctx_queue_syncobj(struct radv_amdgpu_ctx *ctx, unsigned ip,
@ -1375,7 +1370,7 @@ radv_amdgpu_winsys_cs_submit_internal(struct radv_amdgpu_ctx *ctx,
}
static VkResult
radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx, uint32_t submit_count,
radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
const struct radv_winsys_submit_info *submits, uint32_t wait_count,
const struct vk_sync_wait *waits, uint32_t signal_count,
const struct vk_sync_signal *signals, bool can_patch)
@ -1442,33 +1437,7 @@ radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx, uint32_t submit_cou
.cs_emit_signal = true,
};
/* Should submit to at least 1 queue. */
assert(submit_count);
if (submit_count == 1) {
result = radv_amdgpu_winsys_cs_submit_internal(ctx, &submits[0], &sem_info, can_patch);
} else {
/* Multiple queue submissions without gang submit.
* This code path will submit each item separately and add the
* previous submission as a scheduled dependency to the next one.
*/
assert(ws->info.has_scheduled_fence_dependency);
struct radv_amdgpu_fence *next_dependency = NULL;
for (unsigned i = 0; i < submit_count; ++i) {
sem_info.scheduled_dependency = next_dependency;
sem_info.cs_emit_wait = i == 0;
sem_info.cs_emit_signal = i == submit_count - 1;
result = radv_amdgpu_winsys_cs_submit_internal(ctx, &submits[i], &sem_info, can_patch);
if (result != VK_SUCCESS)
goto out;
next_dependency = &ctx->last_submission[submits[i].ip_type][submits[i].queue_index];
}
}
result = radv_amdgpu_winsys_cs_submit_internal(ctx, &submits[0], &sem_info, can_patch);
out:
STACK_ARRAY_FINISH(wait_points);
@ -1740,7 +1709,6 @@ radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx, struct radv_amdgpu_cs_request
int size;
struct drm_amdgpu_cs_chunk *chunks;
struct drm_amdgpu_cs_chunk_data *chunk_data;
struct drm_amdgpu_cs_chunk_dep chunk_dep;
bool use_bo_list_create = ctx->ws->info.drm_minor < 27;
struct drm_amdgpu_bo_list_in bo_list_in;
void *wait_syncobj = NULL, *signal_syncobj = NULL;
@ -1754,8 +1722,7 @@ radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx, struct radv_amdgpu_cs_request
if (!queue_syncobj)
return VK_ERROR_OUT_OF_HOST_MEMORY;
size = request->number_of_ibs + 1 + (has_user_fence ? 1 : 0) + (!use_bo_list_create ? 1 : 0) +
3 + !!sem_info->scheduled_dependency;
size = request->number_of_ibs + 1 + (has_user_fence ? 1 : 0) + (!use_bo_list_create ? 1 : 0) + 3;
chunks = malloc(sizeof(chunks[0]) * size);
if (!chunks)
@ -1801,14 +1768,6 @@ radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx, struct radv_amdgpu_cs_request
amdgpu_cs_chunk_fence_info_to_data(&fence_info, &chunk_data[i]);
}
if (sem_info->scheduled_dependency) {
amdgpu_cs_chunk_fence_to_dep(&sem_info->scheduled_dependency->fence, &chunk_dep);
i = num_chunks++;
chunks[i].chunk_id = AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES;
chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4;
chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_dep;
}
if (sem_info->cs_emit_wait && (sem_info->wait.timeline_syncobj_count ||
sem_info->wait.syncobj_count || *queue_syncobj_wait)) {