gallium/radeon: merge timer and non-timer query lists

All of them are paused only between IBs.

Reviewed-by: Edward O'Callaghan <eocallaghan@alterapraxis.com>
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
Marek Olšák 2016-04-08 21:24:19 +02:00
parent 7347c068d8
commit 0222351fc1
4 changed files with 23 additions and 82 deletions

View file

@ -65,8 +65,7 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
}
/* Count in queries_suspend. */
num_dw += ctx->b.num_cs_dw_nontimer_queries_suspend +
ctx->b.num_cs_dw_timer_queries_suspend;
num_dw += ctx->b.num_cs_dw_queries_suspend;
/* Count in streamout_end at the end of CS. */
if (ctx->b.streamout.begin_emitted) {

View file

@ -156,14 +156,8 @@ static void r600_memory_barrier(struct pipe_context *ctx, unsigned flags)
void r600_preflush_suspend_features(struct r600_common_context *ctx)
{
/* suspend queries */
if (ctx->num_cs_dw_nontimer_queries_suspend) {
/* Since non-timer queries are suspended during blits,
* we have to guard against double-suspends. */
r600_suspend_nontimer_queries(ctx);
ctx->nontimer_queries_suspended_by_flush = true;
}
if (!LIST_IS_EMPTY(&ctx->active_timer_queries))
r600_suspend_timer_queries(ctx);
if (!LIST_IS_EMPTY(&ctx->active_queries))
r600_suspend_queries(ctx);
ctx->streamout.suspended = false;
if (ctx->streamout.begin_emitted) {
@ -180,12 +174,8 @@ void r600_postflush_resume_features(struct r600_common_context *ctx)
}
/* resume queries */
if (!LIST_IS_EMPTY(&ctx->active_timer_queries))
r600_resume_timer_queries(ctx);
if (ctx->nontimer_queries_suspended_by_flush) {
ctx->nontimer_queries_suspended_by_flush = false;
r600_resume_nontimer_queries(ctx);
}
if (!LIST_IS_EMPTY(&ctx->active_queries))
r600_resume_queries(ctx);
}
static void r600_flush_from_st(struct pipe_context *ctx,

View file

@ -428,18 +428,11 @@ struct r600_common_context {
unsigned flags; /* flush flags */
/* Queries. */
/* The list of active queries. */
/* Maintain the list of active queries for pausing between IBs. */
int num_occlusion_queries;
int num_perfect_occlusion_queries;
/* Keep track of non-timer queries, because they should be suspended
* during context flushing.
* The timer queries (TIME_ELAPSED) shouldn't be suspended for blits,
* but they should be suspended between IBs. */
struct list_head active_nontimer_queries;
struct list_head active_timer_queries;
unsigned num_cs_dw_nontimer_queries_suspend;
bool nontimer_queries_suspended_by_flush;
unsigned num_cs_dw_timer_queries_suspend;
struct list_head active_queries;
unsigned num_cs_dw_queries_suspend;
/* Additional hardware info. */
unsigned backend_mask;
unsigned max_db; /* for OQ */
@ -569,10 +562,8 @@ void r600_perfcounters_destroy(struct r600_common_screen *rscreen);
/* r600_query.c */
void r600_init_screen_query_functions(struct r600_common_screen *rscreen);
void r600_query_init(struct r600_common_context *rctx);
void r600_suspend_nontimer_queries(struct r600_common_context *ctx);
void r600_resume_nontimer_queries(struct r600_common_context *ctx);
void r600_suspend_timer_queries(struct r600_common_context *ctx);
void r600_resume_timer_queries(struct r600_common_context *ctx);
void r600_suspend_queries(struct r600_common_context *ctx);
void r600_resume_queries(struct r600_common_context *ctx);
void r600_query_init_backend_mask(struct r600_common_context *ctx);
/* r600_streamout.c */

View file

@ -516,10 +516,7 @@ static void r600_query_hw_emit_start(struct r600_common_context *ctx,
query->ops->emit_start(ctx, query, query->buffer.buf, va);
if (query->flags & R600_QUERY_HW_FLAG_TIMER)
ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw_end;
else
ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw_end;
ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
}
static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
@ -590,12 +587,8 @@ static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
query->buffer.results_end += query->result_size;
if (!(query->flags & R600_QUERY_HW_FLAG_NO_START)) {
if (query->flags & R600_QUERY_HW_FLAG_TIMER)
ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw_end;
else
ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw_end;
}
if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
r600_update_occlusion_query_state(ctx, query->b.type, -1);
r600_update_prims_generated_query_state(ctx, query->b.type, -1);
@ -730,11 +723,8 @@ boolean r600_query_hw_begin(struct r600_common_context *rctx,
r600_query_hw_emit_start(rctx, query);
if (query->flags & R600_QUERY_HW_FLAG_TIMER)
LIST_ADDTAIL(&query->list, &rctx->active_timer_queries);
else
LIST_ADDTAIL(&query->list, &rctx->active_nontimer_queries);
return true;
LIST_ADDTAIL(&query->list, &rctx->active_queries);
return true;
}
static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
@ -973,28 +963,14 @@ static void r600_render_condition(struct pipe_context *ctx,
rctx->set_atom_dirty(rctx, atom, query != NULL);
}
static void r600_suspend_queries(struct r600_common_context *ctx,
struct list_head *query_list,
unsigned *num_cs_dw_queries_suspend)
void r600_suspend_queries(struct r600_common_context *ctx)
{
struct r600_query_hw *query;
LIST_FOR_EACH_ENTRY(query, query_list, list) {
LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
r600_query_hw_emit_stop(ctx, query);
}
assert(*num_cs_dw_queries_suspend == 0);
}
void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
{
r600_suspend_queries(ctx, &ctx->active_nontimer_queries,
&ctx->num_cs_dw_nontimer_queries_suspend);
}
void r600_suspend_timer_queries(struct r600_common_context *ctx)
{
r600_suspend_queries(ctx, &ctx->active_timer_queries,
&ctx->num_cs_dw_timer_queries_suspend);
assert(ctx->num_cs_dw_queries_suspend == 0);
}
static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
@ -1022,35 +998,21 @@ static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *
return num_dw;
}
static void r600_resume_queries(struct r600_common_context *ctx,
struct list_head *query_list,
unsigned *num_cs_dw_queries_suspend)
void r600_resume_queries(struct r600_common_context *ctx)
{
struct r600_query_hw *query;
unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, query_list);
unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
assert(*num_cs_dw_queries_suspend == 0);
assert(ctx->num_cs_dw_queries_suspend == 0);
/* Check CS space here. Resuming must not be interrupted by flushes. */
ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
LIST_FOR_EACH_ENTRY(query, query_list, list) {
LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
r600_query_hw_emit_start(ctx, query);
}
}
void r600_resume_nontimer_queries(struct r600_common_context *ctx)
{
r600_resume_queries(ctx, &ctx->active_nontimer_queries,
&ctx->num_cs_dw_nontimer_queries_suspend);
}
void r600_resume_timer_queries(struct r600_common_context *ctx)
{
r600_resume_queries(ctx, &ctx->active_timer_queries,
&ctx->num_cs_dw_timer_queries_suspend);
}
/* Get backends mask */
void r600_query_init_backend_mask(struct r600_common_context *ctx)
{
@ -1274,8 +1236,7 @@ void r600_query_init(struct r600_common_context *rctx)
if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
rctx->b.render_condition = r600_render_condition;
LIST_INITHEAD(&rctx->active_nontimer_queries);
LIST_INITHEAD(&rctx->active_timer_queries);
LIST_INITHEAD(&rctx->active_queries);
}
void r600_init_screen_query_functions(struct r600_common_screen *rscreen)