mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-06 07:18:17 +02:00
radeonsi: call CS flush functions directly whenever possible
Acked-by: Timothy Arceri <tarceri@itsqueeze.com>
This commit is contained in:
parent
0669dca9c0
commit
17e8f1608e
8 changed files with 24 additions and 24 deletions
|
|
@ -64,10 +64,10 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
|
|||
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
|
||||
resource->buf, rusage)) {
|
||||
if (usage & PIPE_TRANSFER_DONTBLOCK) {
|
||||
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
return NULL;
|
||||
} else {
|
||||
ctx->gfx.flush(ctx, 0, NULL);
|
||||
si_flush_gfx_cs(ctx, 0, NULL);
|
||||
busy = true;
|
||||
}
|
||||
}
|
||||
|
|
@ -75,10 +75,10 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
|
|||
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
|
||||
resource->buf, rusage)) {
|
||||
if (usage & PIPE_TRANSFER_DONTBLOCK) {
|
||||
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
return NULL;
|
||||
} else {
|
||||
ctx->dma.flush(ctx, 0, NULL);
|
||||
si_flush_dma_cs(ctx, 0, NULL);
|
||||
busy = true;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -186,7 +186,7 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
|
|||
(src &&
|
||||
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
|
||||
RADEON_USAGE_WRITE))))
|
||||
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
|
||||
/* Flush if there's not enough space, or if the memory usage per IB
|
||||
* is too large.
|
||||
|
|
@ -204,7 +204,7 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
|
|||
if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
|
||||
ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
|
||||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
|
||||
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
|
||||
}
|
||||
|
||||
|
|
@ -234,8 +234,7 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
|
|||
ctx->num_dma_calls++;
|
||||
}
|
||||
|
||||
static void r600_flush_dma_ring(void *ctx, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence)
|
||||
{
|
||||
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
|
||||
struct radeon_winsys_cs *cs = rctx->dma.cs;
|
||||
|
|
@ -380,12 +379,12 @@ static bool r600_resource_commit(struct pipe_context *pctx,
|
|||
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
|
||||
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
|
||||
res->buf, RADEON_USAGE_READWRITE)) {
|
||||
ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
if (radeon_emitted(ctx->dma.cs, 0) &&
|
||||
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
|
||||
res->buf, RADEON_USAGE_READWRITE)) {
|
||||
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
|
||||
ctx->ws->cs_sync_flush(ctx->dma.cs);
|
||||
|
|
@ -463,9 +462,9 @@ bool si_common_context_init(struct r600_common_context *rctx,
|
|||
|
||||
if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
|
||||
rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
|
||||
r600_flush_dma_ring,
|
||||
si_flush_dma_cs,
|
||||
rctx);
|
||||
rctx->dma.flush = r600_flush_dma_ring;
|
||||
rctx->dma.flush = si_flush_dma_cs;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -569,6 +569,7 @@ void si_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs,
|
|||
struct radeon_saved_cs *saved, bool get_buffer_list);
|
||||
void si_clear_saved_cs(struct radeon_saved_cs *saved);
|
||||
bool si_check_device_reset(struct r600_common_context *rctx);
|
||||
void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
|
||||
|
||||
/* r600_gpu_load.c */
|
||||
void si_gpu_load_kill_thread(struct si_screen *sscreen);
|
||||
|
|
|
|||
|
|
@ -1863,7 +1863,7 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx,
|
|||
* The result is that the kernel memory manager is never a bottleneck.
|
||||
*/
|
||||
if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
|
||||
rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(rctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
rctx->num_alloc_tex_transfer_bytes = 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ void si_need_cs_space(struct si_context *ctx)
|
|||
ctx->b.vram, ctx->b.gtt))) {
|
||||
ctx->b.gtt = 0;
|
||||
ctx->b.vram = 0;
|
||||
ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
return;
|
||||
}
|
||||
ctx->b.gtt = 0;
|
||||
|
|
@ -63,11 +63,11 @@ void si_need_cs_space(struct si_context *ctx)
|
|||
* and just flush if there is not enough space left.
|
||||
*/
|
||||
if (!ctx->b.ws->cs_check_space(cs, 2048))
|
||||
ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
|
||||
void si_context_gfx_flush(void *context, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
void si_flush_gfx_cs(void *context, unsigned flags,
|
||||
struct pipe_fence_handle **fence)
|
||||
{
|
||||
struct si_context *ctx = context;
|
||||
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
|
||||
|
|
@ -92,7 +92,7 @@ void si_context_gfx_flush(void *context, unsigned flags,
|
|||
*/
|
||||
if (radeon_emitted(ctx->b.dma.cs, 0)) {
|
||||
assert(fence == NULL); /* internal flushes only */
|
||||
ctx->b.dma.flush(ctx, flags, NULL);
|
||||
si_flush_dma_cs(ctx, flags, NULL);
|
||||
}
|
||||
|
||||
ctx->gfx_flush_in_progress = true;
|
||||
|
|
|
|||
|
|
@ -287,8 +287,8 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
|
|||
}
|
||||
|
||||
sctx->b.gfx.cs = ws->cs_create(sctx->b.ctx, RING_GFX,
|
||||
si_context_gfx_flush, sctx);
|
||||
sctx->b.gfx.flush = si_context_gfx_flush;
|
||||
si_flush_gfx_cs, sctx);
|
||||
sctx->b.gfx.flush = si_flush_gfx_cs;
|
||||
|
||||
/* Border colors. */
|
||||
sctx->border_color_table = malloc(SI_MAX_BORDER_COLORS *
|
||||
|
|
|
|||
|
|
@ -722,8 +722,8 @@ void si_init_screen_get_functions(struct si_screen *sscreen);
|
|||
|
||||
/* si_hw_context.c */
|
||||
void si_destroy_saved_cs(struct si_saved_cs *scs);
|
||||
void si_context_gfx_flush(void *context, unsigned flags,
|
||||
struct pipe_fence_handle **fence);
|
||||
void si_flush_gfx_cs(void *context, unsigned flags,
|
||||
struct pipe_fence_handle **fence);
|
||||
void si_begin_new_cs(struct si_context *ctx);
|
||||
void si_need_cs_space(struct si_context *ctx);
|
||||
|
||||
|
|
|
|||
|
|
@ -2775,7 +2775,7 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx)
|
|||
|
||||
/* Flush the context to re-emit both init_config states. */
|
||||
sctx->b.initial_gfx_cs_size = 0; /* force flush */
|
||||
si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
|
||||
/* Set ring bindings. */
|
||||
if (sctx->esgs_ring) {
|
||||
|
|
@ -3052,7 +3052,7 @@ static void si_init_tess_factor_ring(struct si_context *sctx)
|
|||
*/
|
||||
si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
|
||||
sctx->b.initial_gfx_cs_size = 0; /* force flush */
|
||||
si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue