mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-07 21:40:20 +01:00
radeonsi: move EOP event code from r600_pipe_common.c to si_fence.c
Acked-by: Timothy Arceri <tarceri@itsqueeze.com>
This commit is contained in:
parent
fc6a44e169
commit
19f550f1d2
4 changed files with 129 additions and 129 deletions
|
|
@ -31,127 +31,6 @@
|
|||
* pipe_context
|
||||
*/
|
||||
|
||||
/**
|
||||
* Write an EOP event.
|
||||
*
|
||||
* \param event EVENT_TYPE_*
|
||||
* \param event_flags Optional cache flush flags (TC)
|
||||
* \param data_sel 1 = fence, 3 = timestamp
|
||||
* \param buf Buffer
|
||||
* \param va GPU address
|
||||
* \param old_value Previous fence value (for a bug workaround)
|
||||
* \param new_value Fence value to write for this event.
|
||||
*/
|
||||
void si_gfx_write_event_eop(struct r600_common_context *ctx,
|
||||
unsigned event, unsigned event_flags,
|
||||
unsigned data_sel,
|
||||
struct r600_resource *buf, uint64_t va,
|
||||
uint32_t new_fence, unsigned query_type)
|
||||
{
|
||||
struct radeon_winsys_cs *cs = ctx->gfx.cs;
|
||||
unsigned op = EVENT_TYPE(event) |
|
||||
EVENT_INDEX(5) |
|
||||
event_flags;
|
||||
unsigned sel = EOP_DATA_SEL(data_sel);
|
||||
|
||||
/* Wait for write confirmation before writing data, but don't send
|
||||
* an interrupt. */
|
||||
if (data_sel != EOP_DATA_SEL_DISCARD)
|
||||
sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM);
|
||||
|
||||
if (ctx->chip_class >= GFX9) {
|
||||
/* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
|
||||
* counters) must immediately precede every timestamp event to
|
||||
* prevent a GPU hang on GFX9.
|
||||
*
|
||||
* Occlusion queries don't need to do it here, because they
|
||||
* always do ZPASS_DONE before the timestamp.
|
||||
*/
|
||||
if (ctx->chip_class == GFX9 &&
|
||||
query_type != PIPE_QUERY_OCCLUSION_COUNTER &&
|
||||
query_type != PIPE_QUERY_OCCLUSION_PREDICATE &&
|
||||
query_type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
|
||||
struct r600_resource *scratch = ctx->eop_bug_scratch;
|
||||
|
||||
assert(16 * ctx->screen->info.num_render_backends <=
|
||||
scratch->b.b.width0);
|
||||
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
|
||||
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
|
||||
radeon_emit(cs, scratch->gpu_address);
|
||||
radeon_emit(cs, scratch->gpu_address >> 32);
|
||||
|
||||
radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
|
||||
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
|
||||
}
|
||||
|
||||
radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 6, 0));
|
||||
radeon_emit(cs, op);
|
||||
radeon_emit(cs, sel);
|
||||
radeon_emit(cs, va); /* address lo */
|
||||
radeon_emit(cs, va >> 32); /* address hi */
|
||||
radeon_emit(cs, new_fence); /* immediate data lo */
|
||||
radeon_emit(cs, 0); /* immediate data hi */
|
||||
radeon_emit(cs, 0); /* unused */
|
||||
} else {
|
||||
if (ctx->chip_class == CIK ||
|
||||
ctx->chip_class == VI) {
|
||||
struct r600_resource *scratch = ctx->eop_bug_scratch;
|
||||
uint64_t va = scratch->gpu_address;
|
||||
|
||||
/* Two EOP events are required to make all engines go idle
|
||||
* (and optional cache flushes executed) before the timestamp
|
||||
* is written.
|
||||
*/
|
||||
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
|
||||
radeon_emit(cs, op);
|
||||
radeon_emit(cs, va);
|
||||
radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
|
||||
radeon_emit(cs, 0); /* immediate data */
|
||||
radeon_emit(cs, 0); /* unused */
|
||||
|
||||
radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
|
||||
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
|
||||
}
|
||||
|
||||
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
|
||||
radeon_emit(cs, op);
|
||||
radeon_emit(cs, va);
|
||||
radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
|
||||
radeon_emit(cs, new_fence); /* immediate data */
|
||||
radeon_emit(cs, 0); /* unused */
|
||||
}
|
||||
|
||||
if (buf) {
|
||||
radeon_add_to_buffer_list(ctx, &ctx->gfx, buf, RADEON_USAGE_WRITE,
|
||||
RADEON_PRIO_QUERY);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned si_gfx_write_fence_dwords(struct si_screen *screen)
|
||||
{
|
||||
unsigned dwords = 6;
|
||||
|
||||
if (screen->info.chip_class == CIK ||
|
||||
screen->info.chip_class == VI)
|
||||
dwords *= 2;
|
||||
|
||||
return dwords;
|
||||
}
|
||||
|
||||
void si_gfx_wait_fence(struct r600_common_context *ctx,
|
||||
uint64_t va, uint32_t ref, uint32_t mask)
|
||||
{
|
||||
struct radeon_winsys_cs *cs = ctx->gfx.cs;
|
||||
|
||||
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
|
||||
radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
|
||||
radeon_emit(cs, va);
|
||||
radeon_emit(cs, va >> 32);
|
||||
radeon_emit(cs, ref); /* reference value */
|
||||
radeon_emit(cs, mask); /* mask */
|
||||
radeon_emit(cs, 4); /* poll interval */
|
||||
}
|
||||
|
||||
static void r600_dma_emit_wait_idle(struct r600_common_context *rctx)
|
||||
{
|
||||
struct radeon_winsys_cs *cs = rctx->dma.cs;
|
||||
|
|
|
|||
|
|
@ -511,14 +511,6 @@ void si_init_screen_buffer_functions(struct si_screen *sscreen);
|
|||
void si_init_buffer_functions(struct si_context *sctx);
|
||||
|
||||
/* r600_common_pipe.c */
|
||||
void si_gfx_write_event_eop(struct r600_common_context *ctx,
|
||||
unsigned event, unsigned event_flags,
|
||||
unsigned data_sel,
|
||||
struct r600_resource *buf, uint64_t va,
|
||||
uint32_t new_fence, unsigned query_type);
|
||||
unsigned si_gfx_write_fence_dwords(struct si_screen *screen);
|
||||
void si_gfx_wait_fence(struct r600_common_context *ctx,
|
||||
uint64_t va, uint32_t ref, uint32_t mask);
|
||||
bool si_common_context_init(struct r600_common_context *rctx,
|
||||
struct si_screen *sscreen,
|
||||
unsigned context_flags);
|
||||
|
|
|
|||
|
|
@ -53,6 +53,127 @@ struct si_multi_fence {
|
|||
struct si_fine_fence fine;
|
||||
};
|
||||
|
||||
/**
|
||||
* Write an EOP event.
|
||||
*
|
||||
* \param event EVENT_TYPE_*
|
||||
* \param event_flags Optional cache flush flags (TC)
|
||||
* \param data_sel 1 = fence, 3 = timestamp
|
||||
* \param buf Buffer
|
||||
* \param va GPU address
|
||||
* \param old_value Previous fence value (for a bug workaround)
|
||||
* \param new_value Fence value to write for this event.
|
||||
*/
|
||||
void si_gfx_write_event_eop(struct r600_common_context *ctx,
|
||||
unsigned event, unsigned event_flags,
|
||||
unsigned data_sel,
|
||||
struct r600_resource *buf, uint64_t va,
|
||||
uint32_t new_fence, unsigned query_type)
|
||||
{
|
||||
struct radeon_winsys_cs *cs = ctx->gfx.cs;
|
||||
unsigned op = EVENT_TYPE(event) |
|
||||
EVENT_INDEX(5) |
|
||||
event_flags;
|
||||
unsigned sel = EOP_DATA_SEL(data_sel);
|
||||
|
||||
/* Wait for write confirmation before writing data, but don't send
|
||||
* an interrupt. */
|
||||
if (data_sel != EOP_DATA_SEL_DISCARD)
|
||||
sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM);
|
||||
|
||||
if (ctx->chip_class >= GFX9) {
|
||||
/* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
|
||||
* counters) must immediately precede every timestamp event to
|
||||
* prevent a GPU hang on GFX9.
|
||||
*
|
||||
* Occlusion queries don't need to do it here, because they
|
||||
* always do ZPASS_DONE before the timestamp.
|
||||
*/
|
||||
if (ctx->chip_class == GFX9 &&
|
||||
query_type != PIPE_QUERY_OCCLUSION_COUNTER &&
|
||||
query_type != PIPE_QUERY_OCCLUSION_PREDICATE &&
|
||||
query_type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
|
||||
struct r600_resource *scratch = ctx->eop_bug_scratch;
|
||||
|
||||
assert(16 * ctx->screen->info.num_render_backends <=
|
||||
scratch->b.b.width0);
|
||||
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
|
||||
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
|
||||
radeon_emit(cs, scratch->gpu_address);
|
||||
radeon_emit(cs, scratch->gpu_address >> 32);
|
||||
|
||||
radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
|
||||
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
|
||||
}
|
||||
|
||||
radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 6, 0));
|
||||
radeon_emit(cs, op);
|
||||
radeon_emit(cs, sel);
|
||||
radeon_emit(cs, va); /* address lo */
|
||||
radeon_emit(cs, va >> 32); /* address hi */
|
||||
radeon_emit(cs, new_fence); /* immediate data lo */
|
||||
radeon_emit(cs, 0); /* immediate data hi */
|
||||
radeon_emit(cs, 0); /* unused */
|
||||
} else {
|
||||
if (ctx->chip_class == CIK ||
|
||||
ctx->chip_class == VI) {
|
||||
struct r600_resource *scratch = ctx->eop_bug_scratch;
|
||||
uint64_t va = scratch->gpu_address;
|
||||
|
||||
/* Two EOP events are required to make all engines go idle
|
||||
* (and optional cache flushes executed) before the timestamp
|
||||
* is written.
|
||||
*/
|
||||
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
|
||||
radeon_emit(cs, op);
|
||||
radeon_emit(cs, va);
|
||||
radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
|
||||
radeon_emit(cs, 0); /* immediate data */
|
||||
radeon_emit(cs, 0); /* unused */
|
||||
|
||||
radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
|
||||
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
|
||||
}
|
||||
|
||||
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
|
||||
radeon_emit(cs, op);
|
||||
radeon_emit(cs, va);
|
||||
radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
|
||||
radeon_emit(cs, new_fence); /* immediate data */
|
||||
radeon_emit(cs, 0); /* unused */
|
||||
}
|
||||
|
||||
if (buf) {
|
||||
radeon_add_to_buffer_list(ctx, &ctx->gfx, buf, RADEON_USAGE_WRITE,
|
||||
RADEON_PRIO_QUERY);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned si_gfx_write_fence_dwords(struct si_screen *screen)
|
||||
{
|
||||
unsigned dwords = 6;
|
||||
|
||||
if (screen->info.chip_class == CIK ||
|
||||
screen->info.chip_class == VI)
|
||||
dwords *= 2;
|
||||
|
||||
return dwords;
|
||||
}
|
||||
|
||||
void si_gfx_wait_fence(struct r600_common_context *ctx,
|
||||
uint64_t va, uint32_t ref, uint32_t mask)
|
||||
{
|
||||
struct radeon_winsys_cs *cs = ctx->gfx.cs;
|
||||
|
||||
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
|
||||
radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
|
||||
radeon_emit(cs, va);
|
||||
radeon_emit(cs, va >> 32);
|
||||
radeon_emit(cs, ref); /* reference value */
|
||||
radeon_emit(cs, mask); /* mask */
|
||||
radeon_emit(cs, 4); /* poll interval */
|
||||
}
|
||||
|
||||
static void si_add_fence_dependency(struct r600_common_context *rctx,
|
||||
struct pipe_fence_handle *fence)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -719,6 +719,14 @@ bool si_replace_shader(unsigned num, struct ac_shader_binary *binary);
|
|||
void si_init_dma_functions(struct si_context *sctx);
|
||||
|
||||
/* si_fence.c */
|
||||
void si_gfx_write_event_eop(struct r600_common_context *ctx,
|
||||
unsigned event, unsigned event_flags,
|
||||
unsigned data_sel,
|
||||
struct r600_resource *buf, uint64_t va,
|
||||
uint32_t new_fence, unsigned query_type);
|
||||
unsigned si_gfx_write_fence_dwords(struct si_screen *screen);
|
||||
void si_gfx_wait_fence(struct r600_common_context *ctx,
|
||||
uint64_t va, uint32_t ref, uint32_t mask);
|
||||
void si_init_fence_functions(struct si_context *ctx);
|
||||
void si_init_screen_fence_functions(struct si_screen *screen);
|
||||
struct pipe_fence_handle *si_create_fence(struct pipe_context *ctx,
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue