mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-03 11:30:21 +01:00
radeonsi: don't emit partial flushes for internal CS flushes only
Tested-by: Benedikt Schemmer <ben@besd.de> Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
parent
692f550740
commit
60299e9abe
10 changed files with 32 additions and 20 deletions
|
|
@ -64,10 +64,10 @@ void *si_buffer_map_sync_with_rings(struct si_context *sctx,
|
|||
sctx->ws->cs_is_buffer_referenced(sctx->gfx_cs,
|
||||
resource->buf, rusage)) {
|
||||
if (usage & PIPE_TRANSFER_DONTBLOCK) {
|
||||
si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
return NULL;
|
||||
} else {
|
||||
si_flush_gfx_cs(sctx, 0, NULL);
|
||||
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
busy = true;
|
||||
}
|
||||
}
|
||||
|
|
@ -725,7 +725,7 @@ static bool si_resource_commit(struct pipe_context *pctx,
|
|||
if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
|
||||
ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
|
||||
res->buf, RADEON_USAGE_READWRITE)) {
|
||||
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
}
|
||||
if (radeon_emitted(ctx->dma_cs, 0) &&
|
||||
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
|
|||
(src &&
|
||||
ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
|
||||
RADEON_USAGE_WRITE))))
|
||||
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
|
||||
/* Flush if there's not enough space, or if the memory usage per IB
|
||||
* is too large.
|
||||
|
|
|
|||
|
|
@ -374,7 +374,10 @@ static boolean si_fence_finish(struct pipe_screen *screen,
|
|||
* not going to wait.
|
||||
*/
|
||||
threaded_context_unwrap_sync(ctx);
|
||||
si_flush_gfx_cs(sctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(sctx,
|
||||
(timeout ? 0 : PIPE_FLUSH_ASYNC) |
|
||||
RADEON_FLUSH_START_NEXT_GFX_IB_NOW,
|
||||
NULL);
|
||||
rfence->gfx_unflushed.ctx = NULL;
|
||||
|
||||
if (!timeout)
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ void si_need_gfx_cs_space(struct si_context *ctx)
|
|||
ctx->vram, ctx->gtt))) {
|
||||
ctx->gtt = 0;
|
||||
ctx->vram = 0;
|
||||
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
return;
|
||||
}
|
||||
ctx->gtt = 0;
|
||||
|
|
@ -61,7 +61,7 @@ void si_need_gfx_cs_space(struct si_context *ctx)
|
|||
*/
|
||||
unsigned need_dwords = 2048 + ctx->num_cs_dw_queries_suspend;
|
||||
if (!ctx->ws->cs_check_space(cs, need_dwords))
|
||||
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
}
|
||||
|
||||
void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
|
||||
|
|
|
|||
|
|
@ -1340,7 +1340,7 @@ radeon_add_to_gfx_buffer_list_check_mem(struct si_context *sctx,
|
|||
!radeon_cs_memory_below_limit(sctx->screen, sctx->gfx_cs,
|
||||
sctx->vram + rbo->vram_usage,
|
||||
sctx->gtt + rbo->gart_usage))
|
||||
si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
|
||||
radeon_add_to_buffer_list(sctx, sctx->gfx_cs, rbo, usage, priority);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2774,7 +2774,7 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx)
|
|||
|
||||
/* Flush the context to re-emit both init_config states. */
|
||||
sctx->initial_gfx_cs_size = 0; /* force flush */
|
||||
si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
|
||||
/* Set ring bindings. */
|
||||
if (sctx->esgs_ring) {
|
||||
|
|
@ -3051,7 +3051,7 @@ static void si_init_tess_factor_ring(struct si_context *sctx)
|
|||
*/
|
||||
si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
|
||||
sctx->initial_gfx_cs_size = 0; /* force flush */
|
||||
si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -1869,7 +1869,7 @@ static void si_texture_transfer_unmap(struct pipe_context *ctx,
|
|||
* The result is that the kernel memory manager is never a bottleneck.
|
||||
*/
|
||||
if (sctx->num_alloc_tex_transfer_bytes > sctx->screen->info.gart_size / 4) {
|
||||
si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
sctx->num_alloc_tex_transfer_bytes = 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -239,7 +239,8 @@ static void *amdgpu_bo_map(struct pb_buffer *buf,
|
|||
* Only check whether the buffer is being used for write. */
|
||||
if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
|
||||
RADEON_USAGE_WRITE)) {
|
||||
cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
|
||||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -249,7 +250,8 @@ static void *amdgpu_bo_map(struct pb_buffer *buf,
|
|||
}
|
||||
} else {
|
||||
if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
|
||||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -272,7 +274,8 @@ static void *amdgpu_bo_map(struct pb_buffer *buf,
|
|||
if (cs) {
|
||||
if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
|
||||
RADEON_USAGE_WRITE)) {
|
||||
cs->flush_cs(cs->flush_data, 0, NULL);
|
||||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
|
||||
} else {
|
||||
/* Try to avoid busy-waiting in amdgpu_bo_wait. */
|
||||
if (p_atomic_read(&bo->num_active_ioctls))
|
||||
|
|
@ -286,7 +289,8 @@ static void *amdgpu_bo_map(struct pb_buffer *buf,
|
|||
/* Mapping for write. */
|
||||
if (cs) {
|
||||
if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, 0, NULL);
|
||||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
|
||||
} else {
|
||||
/* Try to avoid busy-waiting in amdgpu_bo_wait. */
|
||||
if (p_atomic_read(&bo->num_active_ioctls))
|
||||
|
|
|
|||
|
|
@ -516,7 +516,8 @@ static void *radeon_bo_map(struct pb_buffer *buf,
|
|||
*
|
||||
* Only check whether the buffer is being used for write. */
|
||||
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
|
||||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -526,7 +527,8 @@ static void *radeon_bo_map(struct pb_buffer *buf,
|
|||
}
|
||||
} else {
|
||||
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
|
||||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -547,7 +549,8 @@ static void *radeon_bo_map(struct pb_buffer *buf,
|
|||
*
|
||||
* Only check whether the buffer is being used for write. */
|
||||
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, 0, NULL);
|
||||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
|
||||
}
|
||||
radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_WRITE);
|
||||
|
|
@ -555,7 +558,8 @@ static void *radeon_bo_map(struct pb_buffer *buf,
|
|||
/* Mapping for write. */
|
||||
if (cs) {
|
||||
if (radeon_bo_is_referenced_by_cs(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, 0, NULL);
|
||||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
|
||||
} else {
|
||||
/* Try to avoid busy-waiting in radeon_bo_wait. */
|
||||
if (p_atomic_read(&bo->num_active_ioctls))
|
||||
|
|
|
|||
|
|
@ -407,7 +407,8 @@ static bool radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
|
|||
|
||||
/* Flush if there are any relocs. Clean up otherwise. */
|
||||
if (cs->csc->num_relocs) {
|
||||
cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
|
||||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
} else {
|
||||
radeon_cs_context_cleanup(cs->csc);
|
||||
cs->base.used_vram = 0;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue