r600g, radeonsi: fix primitives-generated query with disabled streamout

Buffers are disabled by VGT_STRMOUT_BUFFER_CONFIG, but the query only works
if VGT_STRMOUT_CONFIG.STREAMOUT_0_EN is enabled.

This moves VGT_STRMOUT_CONFIG to its own state. The register is set to 1
if either streamout or the primitives-generated query is enabled.

However, the primitives-emitted query is also incremented, so it's disabled
by setting VGT_STRMOUT_BUFFER_SIZE to 0 when there is no buffer bound.

This fixes piglit:
  ARB_transform_feedback2/counting with pause
  EXT_transform_feedback/primgen-query transform-feedback-disabled

Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
This commit is contained in:
Marek Olšák 2014-03-09 22:12:26 +01:00
parent 958ef47a6d
commit f549129564
11 changed files with 87 additions and 49 deletions

View file

@ -2248,9 +2248,7 @@ static void cayman_init_atom_start_cs(struct r600_context *rctx)
r600_store_value(cb, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */
r600_store_value(cb, 0); /* R_028A40_VGT_GS_MODE */
r600_store_context_reg_seq(cb, R_028B94_VGT_STRMOUT_CONFIG, 2);
r600_store_value(cb, 0); /* R_028B94_VGT_STRMOUT_CONFIG */
r600_store_value(cb, 0); /* R_028B98_VGT_STRMOUT_BUFFER_CONFIG */
r600_store_context_reg(cb, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0);
r600_store_context_reg_seq(cb, R_028AB4_VGT_REUSE_OFF, 2);
r600_store_value(cb, 0); /* R_028AB4_VGT_REUSE_OFF */
@ -2809,9 +2807,7 @@ void evergreen_init_atom_start_cs(struct r600_context *rctx)
r600_store_value(cb, 0);
r600_store_value(cb, 0);
r600_store_context_reg_seq(cb, R_028B94_VGT_STRMOUT_CONFIG, 2);
r600_store_value(cb, 0); /* R_028B94_VGT_STRMOUT_CONFIG */
r600_store_value(cb, 0); /* R_028B98_VGT_STRMOUT_BUFFER_CONFIG */
r600_store_context_reg(cb, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0);
if (rctx->screen->b.has_streamout) {
r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0);
@ -3488,6 +3484,7 @@ void evergreen_init_state_functions(struct r600_context *rctx)
r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4);
r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, evergreen_emit_vertex_fetch_shader, 5);
rctx->atoms[id++] = &rctx->b.streamout.begin_atom;
rctx->atoms[id++] = &rctx->b.streamout.enable_atom;
r600_init_atom(rctx, &rctx->vertex_shader.atom, id++, r600_emit_shader, 23);
r600_init_atom(rctx, &rctx->pixel_shader.atom, id++, r600_emit_shader, 0);
r600_init_atom(rctx, &rctx->geometry_shader.atom, id++, r600_emit_shader, 0);

View file

@ -311,7 +311,7 @@ void r600_begin_new_cs(struct r600_context *ctx)
ctx->gs_rings.atom.dirty = true;
}
ctx->vertex_shader.atom.dirty = true;
ctx->b.streamout.enable_atom.dirty = true;
if (ctx->blend_state.cso)
ctx->blend_state.atom.dirty = true;

View file

@ -37,7 +37,7 @@
#include "util/u_double_list.h"
#include "util/u_transfer.h"
#define R600_NUM_ATOMS 72
#define R600_NUM_ATOMS 73
/* the number of CS dwords for flushing and drawing */
#define R600_MAX_FLUSH_CS_DWORDS 16

View file

@ -2316,8 +2316,7 @@ void r600_init_atom_start_cs(struct r600_context *rctx)
r600_store_context_reg(cb, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 0);
r600_store_context_reg(cb, R_028AA4_VGT_INSTANCE_STEP_RATE_1, 0);
r600_store_context_reg_seq(cb, R_028AB0_VGT_STRMOUT_EN, 3);
r600_store_value(cb, 0); /* R_028AB0_VGT_STRMOUT_EN */
r600_store_context_reg_seq(cb, R_028AB4_VGT_REUSE_OFF, 2);
r600_store_value(cb, 1); /* R_028AB4_VGT_REUSE_OFF */
r600_store_value(cb, 0); /* R_028AB8_VGT_VTX_CNT_EN */
@ -3027,6 +3026,7 @@ void r600_init_state_functions(struct r600_context *rctx)
r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4);
r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, r600_emit_vertex_fetch_shader, 5);
rctx->atoms[id++] = &rctx->b.streamout.begin_atom;
rctx->atoms[id++] = &rctx->b.streamout.enable_atom;
r600_init_atom(rctx, &rctx->vertex_shader.atom, id++, r600_emit_shader, 23);
r600_init_atom(rctx, &rctx->pixel_shader.atom, id++, r600_emit_shader, 0);
r600_init_atom(rctx, &rctx->geometry_shader.atom, id++, r600_emit_shader, 0);

View file

@ -280,6 +280,12 @@ struct r600_streamout {
/* External state which comes from the vertex shader,
* it must be set explicitly when binding a shader. */
unsigned *stride_in_dw;
/* The state of VGT_STRMOUT_(CONFIG|EN). */
struct r600_atom enable_atom;
bool streamout_enabled;
bool prims_gen_query_enabled;
int num_prims_gen_queries;
};
struct r600_ring {
@ -416,6 +422,8 @@ void r600_set_streamout_targets(struct pipe_context *ctx,
struct pipe_stream_output_target **targets,
const unsigned *offset);
void r600_emit_streamout_end(struct r600_common_context *rctx);
void r600_update_prims_generated_query_state(struct r600_common_context *rctx,
unsigned type, int diff);
void r600_streamout_init(struct r600_common_context *rctx);
/* r600_texture.c */

View file

@ -152,6 +152,7 @@ static void r600_emit_query_begin(struct r600_common_context *ctx, struct r600_q
uint64_t va;
r600_update_occlusion_query_state(ctx, query->type, 1);
r600_update_prims_generated_query_state(ctx, query->type, 1);
ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw * 2, TRUE);
/* Get a new query buffer if needed. */
@ -284,6 +285,7 @@ static void r600_emit_query_end(struct r600_common_context *ctx, struct r600_que
}
r600_update_occlusion_query_state(ctx, query->type, -1);
r600_update_prims_generated_query_state(ctx, query->type, -1);
}
static void r600_emit_query_predication(struct r600_common_context *ctx, struct r600_query *query,

View file

@ -29,6 +29,8 @@
#include "util/u_memory.h"
static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable);
static struct pipe_stream_output_target *
r600_create_so_target(struct pipe_context *ctx,
struct pipe_resource *buffer,
@ -84,11 +86,10 @@ void r600_streamout_buffers_dirty(struct r600_common_context *rctx)
rctx->streamout.num_dw_for_end =
12 + /* flush_vgt_streamout */
num_bufs * 8 + /* STRMOUT_BUFFER_UPDATE */
3 /* set_streamout_enable(0) */;
num_bufs * 11; /* STRMOUT_BUFFER_UPDATE, BUFFER_SIZE */
begin->num_dw = 12 + /* flush_vgt_streamout */
6; /* set_streamout_enable */
3; /* VGT_STRMOUT_BUFFER_CONFIG */
if (rctx->chip_class >= SI) {
begin->num_dw += num_bufs * 4; /* SET_CONTEXT_REG */
@ -105,6 +106,8 @@ void r600_streamout_buffers_dirty(struct r600_common_context *rctx)
(rctx->family > CHIP_R600 && rctx->family < CHIP_RS780 ? 2 : 0); /* SURFACE_BASE_UPDATE */
begin->dirty = true;
r600_set_streamout_enable(rctx, true);
}
void r600_set_streamout_targets(struct pipe_context *ctx,
@ -144,6 +147,7 @@ void r600_set_streamout_targets(struct pipe_context *ctx,
r600_streamout_buffers_dirty(rctx);
} else {
rctx->streamout.begin_atom.dirty = false;
r600_set_streamout_enable(rctx, false);
}
}
@ -179,31 +183,6 @@ static void r600_flush_vgt_streamout(struct r600_common_context *rctx)
radeon_emit(cs, 4); /* poll interval */
}
static void r600_set_streamout_enable(struct r600_common_context *rctx, unsigned buffer_enable_bit)
{
struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
if (buffer_enable_bit) {
r600_write_context_reg(cs, R_028AB0_VGT_STRMOUT_EN, S_028AB0_STREAMOUT(1));
r600_write_context_reg(cs, R_028B20_VGT_STRMOUT_BUFFER_EN, buffer_enable_bit);
} else {
r600_write_context_reg(cs, R_028AB0_VGT_STRMOUT_EN, S_028AB0_STREAMOUT(0));
}
}
static void evergreen_set_streamout_enable(struct r600_common_context *rctx, unsigned buffer_enable_bit)
{
struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
if (buffer_enable_bit) {
r600_write_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
radeon_emit(cs, S_028B94_STREAMOUT_0_EN(1)); /* R_028B94_VGT_STRMOUT_CONFIG */
radeon_emit(cs, S_028B98_STREAM_0_BUFFER_EN(buffer_enable_bit)); /* R_028B98_VGT_STRMOUT_BUFFER_CONFIG */
} else {
r600_write_context_reg(cs, R_028B94_VGT_STRMOUT_CONFIG, S_028B94_STREAMOUT_0_EN(0));
}
}
static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
@ -213,11 +192,10 @@ static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r
r600_flush_vgt_streamout(rctx);
if (rctx->chip_class >= EVERGREEN) {
evergreen_set_streamout_enable(rctx, rctx->streamout.enabled_mask);
} else {
r600_set_streamout_enable(rctx, rctx->streamout.enabled_mask);
}
r600_write_context_reg(cs, rctx->chip_class >= EVERGREEN ?
R_028B98_VGT_STRMOUT_BUFFER_CONFIG :
R_028B20_VGT_STRMOUT_BUFFER_EN,
rctx->streamout.enabled_mask);
for (i = 0; i < rctx->streamout.num_targets; i++) {
if (!t[i])
@ -321,12 +299,12 @@ void r600_emit_streamout_end(struct r600_common_context *rctx)
r600_emit_reloc(rctx, &rctx->rings.gfx, t[i]->buf_filled_size,
RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
}
if (rctx->chip_class >= EVERGREEN) {
evergreen_set_streamout_enable(rctx, 0);
} else {
r600_set_streamout_enable(rctx, 0);
/* Zero the buffer size. The counters (primitives generated,
* primitives emitted) may be enabled even if there is not
* buffer bound. This ensures that the primitives-emitted query
* won't increment. */
r600_write_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
}
rctx->streamout.begin_emitted = false;
@ -338,9 +316,60 @@ void r600_emit_streamout_end(struct r600_common_context *rctx)
}
}
/* STREAMOUT CONFIG DERIVED STATE
*
* Streamout must be enabled for the PRIMITIVES_GENERATED query to work.
* The buffer mask is an independent state, so no writes occur if there
* are no buffers bound.
*/
static bool r600_get_strmout_en(struct r600_common_context *rctx)
{
return rctx->streamout.streamout_enabled ||
rctx->streamout.prims_gen_query_enabled;
}
static void r600_emit_streamout_enable(struct r600_common_context *rctx,
struct r600_atom *atom)
{
r600_write_context_reg(rctx->rings.gfx.cs,
rctx->chip_class >= EVERGREEN ?
R_028B94_VGT_STRMOUT_CONFIG :
R_028AB0_VGT_STRMOUT_EN,
S_028B94_STREAMOUT_0_EN(r600_get_strmout_en(rctx)));
}
static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable)
{
bool old_strmout_en = r600_get_strmout_en(rctx);
rctx->streamout.streamout_enabled = enable;
if (old_strmout_en != r600_get_strmout_en(rctx))
rctx->streamout.enable_atom.dirty = true;
}
void r600_update_prims_generated_query_state(struct r600_common_context *rctx,
unsigned type, int diff)
{
if (type == PIPE_QUERY_PRIMITIVES_GENERATED) {
bool old_strmout_en = r600_get_strmout_en(rctx);
rctx->streamout.num_prims_gen_queries += diff;
assert(rctx->streamout.num_prims_gen_queries >= 0);
rctx->streamout.prims_gen_query_enabled =
rctx->streamout.num_prims_gen_queries != 0;
if (old_strmout_en != r600_get_strmout_en(rctx))
rctx->streamout.enable_atom.dirty = true;
}
}
void r600_streamout_init(struct r600_common_context *rctx)
{
rctx->b.create_stream_output_target = r600_create_so_target;
rctx->b.stream_output_target_destroy = r600_so_target_destroy;
rctx->streamout.begin_atom.emit = r600_emit_streamout_begin;
rctx->streamout.enable_atom.emit = r600_emit_streamout_enable;
rctx->streamout.enable_atom.num_dw = 3;
}

View file

@ -177,6 +177,7 @@ void si_begin_new_cs(struct si_context *ctx)
}
ctx->framebuffer.atom.dirty = true;
ctx->b.streamout.enable_atom.dirty = true;
si_all_descriptors_begin_new_cs(ctx);
ctx->b.initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw;

View file

@ -148,6 +148,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, void *
sctx->atoms.cache_flush = &sctx->cache_flush;
sctx->atoms.streamout_begin = &sctx->b.streamout.begin_atom;
sctx->atoms.streamout_enable = &sctx->b.streamout.enable_atom;
switch (sctx->b.chip_class) {
case SI:

View file

@ -108,6 +108,7 @@ struct si_context {
* updated in memory. */
struct r600_atom *cache_flush;
struct r600_atom *streamout_begin;
struct r600_atom *streamout_enable; /* must be after streamout_begin */
struct r600_atom *framebuffer;
};
struct r600_atom *array[0];

View file

@ -3064,7 +3064,6 @@ void si_init_config(struct si_context *sctx)
si_pm4_set_reg(pm4, R_028B68_VGT_GS_VERT_ITEMSIZE_3, 0);
si_pm4_set_reg(pm4, R_028B90_VGT_GS_INSTANCE_CNT, 0);
si_pm4_set_reg(pm4, R_028B94_VGT_STRMOUT_CONFIG, 0x0);
si_pm4_set_reg(pm4, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
if (sctx->b.chip_class == SI) {
si_pm4_set_reg(pm4, R_028AA8_IA_MULTI_VGT_PARAM,