Reverts
* "i965: Modify state upload to allow 2 different sets of state atoms."
   8e27a4d2b3
* "i965: Modify dirty bit handling to support 2 pipelines."
   373143ed91
* "i965: Create a macro for checking a dirty bit."
   c5bdf9be1e
   Conflicts:
	src/mesa/drivers/dri/i965/brw_context.h
* "i965: Create a macro for setting all dirty bits."
   6f56e1424d
   Conflicts:
	src/mesa/drivers/dri/i965/brw_blorp.cpp
	src/mesa/drivers/dri/i965/brw_state_cache.c
	src/mesa/drivers/dri/i965/brw_state_upload.c
* "i965: Create a macro for setting a dirty bit."
   88e3d404da

Signed-off-by: Jordan Justen <jordan.l.justen@intel.com>
This commit is contained in:
Jordan Justen 2014-09-03 14:28:59 -07:00
parent 5d8f40a53a
commit 864c463485
34 changed files with 112 additions and 185 deletions

View file

@ -82,7 +82,7 @@ brw_upload_binding_table(struct brw_context *brw,
prog_data->binding_table.size_bytes); prog_data->binding_table.size_bytes);
} }
SET_DIRTY_BIT(brw, brw_new_binding_table); brw->state.dirty.brw |= brw_new_binding_table;
if (brw->gen >= 7) { if (brw->gen >= 7) {
BEGIN_BATCH(2); BEGIN_BATCH(2);

View file

@ -273,8 +273,8 @@ retry:
/* We've smashed all state compared to what the normal 3D pipeline /* We've smashed all state compared to what the normal 3D pipeline
* rendering tracks for GL. * rendering tracks for GL.
*/ */
SET_DIRTY64_ALL(brw); brw->state.dirty.brw = ~0;
SET_DIRTY_ALL(cache); brw->state.dirty.cache = ~0;
brw->no_depth_or_stencil = false; brw->no_depth_or_stencil = false;
brw->ib.type = -1; brw->ib.type = -1;

View file

@ -62,7 +62,7 @@ brw_upload_cc_vp(struct brw_context *brw)
} }
} }
SET_DIRTY_BIT(cache, CACHE_NEW_CC_VP); brw->state.dirty.cache |= CACHE_NEW_CC_VP;
} }
const struct brw_tracked_state brw_cc_vp = { const struct brw_tracked_state brw_cc_vp = {
@ -223,7 +223,7 @@ static void upload_cc_unit(struct brw_context *brw)
cc->cc4.cc_viewport_state_offset = (brw->batch.bo->offset64 + cc->cc4.cc_viewport_state_offset = (brw->batch.bo->offset64 +
brw->cc.vp_offset) >> 5; /* reloc */ brw->cc.vp_offset) >> 5; /* reloc */
SET_DIRTY_BIT(cache, CACHE_NEW_CC_UNIT); brw->state.dirty.cache |= CACHE_NEW_CC_UNIT;
/* Emit CC viewport relocation */ /* Emit CC viewport relocation */
drm_intel_bo_emit_reloc(brw->batch.bo, drm_intel_bo_emit_reloc(brw->batch.bo,

View file

@ -158,7 +158,7 @@ brw_upload_clip_unit(struct brw_context *brw)
clip->viewport_ymin = -1; clip->viewport_ymin = -1;
clip->viewport_ymax = 1; clip->viewport_ymax = 1;
SET_DIRTY_BIT(cache, CACHE_NEW_CLIP_UNIT); brw->state.dirty.cache |= CACHE_NEW_CLIP_UNIT;
} }
const struct brw_tracked_state brw_clip_unit = { const struct brw_tracked_state brw_clip_unit = {

View file

@ -241,67 +241,6 @@ struct brw_state_flags {
GLuint cache; GLuint cache;
}; };
/**
* Enum representing the different pipelines.
*/
typedef enum {
/**
* 3D rendering pipeline (vertex through fragment shader).
*/
BRW_PIPELINE_3D,
/**
* Compute shader pipeline.
*/
BRW_PIPELINE_COMPUTE,
BRW_NUM_PIPELINES
} brw_pipeline;
/**
* Set one of the bits in a field of brw_state_flags.
*/
#define SET_DIRTY_BIT(FIELD, FLAG) \
do { \
for (int pipeline = 0; pipeline < BRW_NUM_PIPELINES; pipeline++) \
brw->state.pipeline_dirty[pipeline].FIELD |= (FLAG); \
} while (false)
/**
* Set all of the bits in a field of brw_state_flags.
*/
#define SET_DIRTY_ALL(FIELD) \
do { \
/* ~0 == 0xffffffff, so make sure field is <= 32 bits */ \
STATIC_ASSERT(sizeof(brw->state.pipeline_dirty[0].FIELD) == 4); \
for (int pipeline = 0; pipeline < BRW_NUM_PIPELINES; pipeline++) \
brw->state.pipeline_dirty[pipeline].FIELD = ~0; \
} while (false)
/**
* Set all of the bits in a field of brw_state_flags.
*/
#define SET_DIRTY64_ALL(FIELD) \
do { \
/* ~0ULL == 0xffffffffffffffff, so make sure field is <= 64 bits */ \
STATIC_ASSERT(sizeof(brw->state.pipeline_dirty[0].FIELD) == 8); \
for (int pipeline = 0; pipeline < BRW_NUM_PIPELINES; pipeline++) \
brw->state.pipeline_dirty[pipeline].FIELD = ~(0ULL); \
} while (false)
/**
* Check one of the bits in a field of brw_state_flags.
*/
#define CHECK_DIRTY_BIT(FIELD, FLAG) \
((brw->state.pipeline_dirty[brw->state.current_pipeline].FIELD & (FLAG)) \
!= 0)
/** Subclass of Mesa vertex program */ /** Subclass of Mesa vertex program */
struct brw_vertex_program { struct brw_vertex_program {
struct gl_vertex_program program; struct gl_vertex_program program;
@ -1107,8 +1046,7 @@ struct brw_context
GLuint NewGLState; GLuint NewGLState;
struct { struct {
struct brw_state_flags pipeline_dirty[BRW_NUM_PIPELINES]; struct brw_state_flags dirty;
brw_pipeline current_pipeline;
} state; } state;
struct brw_cache cache; struct brw_cache cache;
@ -1367,8 +1305,8 @@ struct brw_context
int entries_per_oa_snapshot; int entries_per_oa_snapshot;
} perfmon; } perfmon;
int num_atoms[BRW_NUM_PIPELINES]; int num_atoms;
const struct brw_tracked_state **atoms[BRW_NUM_PIPELINES]; const struct brw_tracked_state **atoms;
/* If (INTEL_DEBUG & DEBUG_BATCH) */ /* If (INTEL_DEBUG & DEBUG_BATCH) */
struct { struct {

View file

@ -134,7 +134,7 @@ static void calculate_curbe_offsets( struct brw_context *brw )
brw->curbe.vs_start, brw->curbe.vs_start,
brw->curbe.vs_size ); brw->curbe.vs_size );
SET_DIRTY_BIT(brw, BRW_NEW_CURBE_OFFSETS); brw->state.dirty.brw |= BRW_NEW_CURBE_OFFSETS;
} }
} }

View file

@ -126,11 +126,11 @@ static void brw_set_prim(struct brw_context *brw,
if (hw_prim != brw->primitive) { if (hw_prim != brw->primitive) {
brw->primitive = hw_prim; brw->primitive = hw_prim;
SET_DIRTY_BIT(brw, BRW_NEW_PRIMITIVE); brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
if (reduced_prim[prim->mode] != brw->reduced_primitive) { if (reduced_prim[prim->mode] != brw->reduced_primitive) {
brw->reduced_primitive = reduced_prim[prim->mode]; brw->reduced_primitive = reduced_prim[prim->mode];
SET_DIRTY_BIT(brw, BRW_NEW_REDUCED_PRIMITIVE); brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
} }
} }
} }
@ -146,7 +146,7 @@ static void gen6_set_prim(struct brw_context *brw,
if (hw_prim != brw->primitive) { if (hw_prim != brw->primitive) {
brw->primitive = hw_prim; brw->primitive = hw_prim;
SET_DIRTY_BIT(brw, BRW_NEW_PRIMITIVE); brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
} }
} }
@ -403,11 +403,11 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
brw_merge_inputs( brw, arrays ); brw_merge_inputs( brw, arrays );
brw->ib.ib = ib; brw->ib.ib = ib;
SET_DIRTY_BIT(brw, BRW_NEW_INDICES); brw->state.dirty.brw |= BRW_NEW_INDICES;
brw->vb.min_index = min_index; brw->vb.min_index = min_index;
brw->vb.max_index = max_index; brw->vb.max_index = max_index;
SET_DIRTY_BIT(brw, BRW_NEW_VERTICES); brw->state.dirty.brw |= BRW_NEW_VERTICES;
for (i = 0; i < nr_prims; i++) { for (i = 0; i < nr_prims; i++) {
int estimated_max_prim_size; int estimated_max_prim_size;
@ -432,7 +432,7 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
brw->num_instances = prims[i].num_instances; brw->num_instances = prims[i].num_instances;
brw->basevertex = prims[i].basevertex; brw->basevertex = prims[i].basevertex;
if (i > 0) { /* For i == 0 we just did this before the loop */ if (i > 0) { /* For i == 0 we just did this before the loop */
SET_DIRTY_BIT(brw, BRW_NEW_VERTICES); brw->state.dirty.brw |= BRW_NEW_VERTICES;
brw_merge_inputs(brw, arrays); brw_merge_inputs(brw, arrays);
} }
} }
@ -447,9 +447,9 @@ retry:
* *_set_prim or intel_batchbuffer_flush(), which only impacts * *_set_prim or intel_batchbuffer_flush(), which only impacts
* brw->state.dirty.brw. * brw->state.dirty.brw.
*/ */
if (brw->state.pipeline_dirty[BRW_PIPELINE_3D].brw) { if (brw->state.dirty.brw) {
brw->no_batch_wrap = true; brw->no_batch_wrap = true;
brw_upload_state(brw, BRW_PIPELINE_3D); brw_upload_state(brw);
} }
brw_emit_prim(brw, &prims[i], brw->primitive); brw_emit_prim(brw, &prims[i], brw->primitive);
@ -480,8 +480,8 @@ retry:
/* Now that we know we haven't run out of aperture space, we can safely /* Now that we know we haven't run out of aperture space, we can safely
* reset the dirty bits. * reset the dirty bits.
*/ */
if (brw->state.pipeline_dirty[BRW_PIPELINE_3D].brw) if (brw->state.dirty.brw)
brw_clear_dirty_bits(brw, BRW_PIPELINE_3D); brw_clear_dirty_bits(brw);
} }
if (brw->always_flush_batch) if (brw->always_flush_batch)

View file

@ -872,11 +872,11 @@ static void brw_upload_indices(struct brw_context *brw)
brw->ib.start_vertex_offset = offset / ib_type_size; brw->ib.start_vertex_offset = offset / ib_type_size;
if (brw->ib.bo != old_bo) if (brw->ib.bo != old_bo)
SET_DIRTY_BIT(brw, BRW_NEW_INDEX_BUFFER); brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
if (index_buffer->type != brw->ib.type) { if (index_buffer->type != brw->ib.type) {
brw->ib.type = index_buffer->type; brw->ib.type = index_buffer->type;
SET_DIRTY_BIT(brw, BRW_NEW_INDEX_BUFFER); brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
} }
} }

View file

@ -230,7 +230,7 @@ brw_upload_ff_gs_prog(struct brw_context *brw)
populate_key(brw, &key); populate_key(brw, &key);
if (brw->ff_gs.prog_active != key.need_gs_prog) { if (brw->ff_gs.prog_active != key.need_gs_prog) {
SET_DIRTY_BIT(cache, CACHE_NEW_FF_GS_PROG); brw->state.dirty.cache |= CACHE_NEW_FF_GS_PROG;
brw->ff_gs.prog_active = key.need_gs_prog; brw->ff_gs.prog_active = key.need_gs_prog;
} }

View file

@ -85,7 +85,7 @@ brw_upload_gs_unit(struct brw_context *brw)
gs->gs6.max_vp_index = brw->ctx.Const.MaxViewports - 1; gs->gs6.max_vp_index = brw->ctx.Const.MaxViewports - 1;
SET_DIRTY_BIT(cache, CACHE_NEW_FF_GS_UNIT); brw->state.dirty.cache |= CACHE_NEW_FF_GS_UNIT;
} }
const struct brw_tracked_state brw_gs_unit = { const struct brw_tracked_state brw_gs_unit = {

View file

@ -44,7 +44,7 @@ brw_setup_vue_interpolation(struct brw_context *brw)
memset(&brw->interpolation_mode, INTERP_QUALIFIER_NONE, sizeof(brw->interpolation_mode)); memset(&brw->interpolation_mode, INTERP_QUALIFIER_NONE, sizeof(brw->interpolation_mode));
SET_DIRTY_BIT(brw, BRW_NEW_INTERPOLATION_MAP); brw->state.dirty.brw |= BRW_NEW_INTERPOLATION_MAP;
if (!fprog) if (!fprog)
return; return;

View file

@ -376,7 +376,7 @@ set_fast_clear_op(struct brw_context *brw, uint32_t op)
* 3DSTATE_PS. * 3DSTATE_PS.
*/ */
brw->wm.fast_clear_op = op; brw->wm.fast_clear_op = op;
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM); brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
} }
static void static void
@ -400,8 +400,8 @@ use_rectlist(struct brw_context *brw, bool enable)
* _NEW_BUFFERS to make sure we emit new SURFACE_STATE with the new fast * _NEW_BUFFERS to make sure we emit new SURFACE_STATE with the new fast
* clear color value. * clear color value.
*/ */
SET_DIRTY_BIT(mesa, _NEW_LIGHT | _NEW_BUFFERS); brw->state.dirty.mesa |= _NEW_LIGHT | _NEW_BUFFERS;
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM); brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
} }
bool bool
@ -602,7 +602,8 @@ brw_meta_fast_clear(struct brw_context *brw, struct gl_framebuffer *fb,
* color before resolve and sets irb->mt->fast_clear_state to UNRESOLVED if * color before resolve and sets irb->mt->fast_clear_state to UNRESOLVED if
* we render to it. * we render to it.
*/ */
SET_DIRTY_BIT(mesa, _NEW_BUFFERS); brw->state.dirty.mesa |= _NEW_BUFFERS;
/* Set the custom state back to normal and dirty the same bits as above */ /* Set the custom state back to normal and dirty the same bits as above */
use_rectlist(brw, false); use_rectlist(brw, false);

View file

@ -103,7 +103,7 @@ static void upload_pipelined_state_pointers(struct brw_context *brw )
brw->cc.state_offset); brw->cc.state_offset);
ADVANCE_BATCH(); ADVANCE_BATCH();
SET_DIRTY_BIT(brw, BRW_NEW_PSP); brw->state.dirty.brw |= BRW_NEW_PSP;
} }
static void upload_psp_urb_cbs(struct brw_context *brw ) static void upload_psp_urb_cbs(struct brw_context *brw )
@ -1054,7 +1054,7 @@ static void upload_state_base_address( struct brw_context *brw )
* obvious. * obvious.
*/ */
SET_DIRTY_BIT(brw, BRW_NEW_STATE_BASE_ADDRESS); brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
} }
const struct brw_tracked_state brw_state_base_address = { const struct brw_tracked_state brw_state_base_address = {

View file

@ -62,13 +62,13 @@ static void brwBindProgram( struct gl_context *ctx,
switch (target) { switch (target) {
case GL_VERTEX_PROGRAM_ARB: case GL_VERTEX_PROGRAM_ARB:
SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM); brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
break; break;
case MESA_GEOMETRY_PROGRAM: case MESA_GEOMETRY_PROGRAM:
SET_DIRTY_BIT(brw, BRW_NEW_GEOMETRY_PROGRAM); brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM;
break; break;
case GL_FRAGMENT_PROGRAM_ARB: case GL_FRAGMENT_PROGRAM_ARB:
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM); brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
break; break;
} }
} }
@ -161,7 +161,7 @@ brwProgramStringNotify(struct gl_context *ctx,
brw_fragment_program_const(brw->fragment_program); brw_fragment_program_const(brw->fragment_program);
if (newFP == curFP) if (newFP == curFP)
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM); brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
newFP->id = get_new_program_id(brw->intelScreen); newFP->id = get_new_program_id(brw->intelScreen);
break; break;
} }
@ -172,7 +172,7 @@ brwProgramStringNotify(struct gl_context *ctx,
brw_vertex_program_const(brw->vertex_program); brw_vertex_program_const(brw->vertex_program);
if (newVP == curVP) if (newVP == curVP)
SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM); brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
if (newVP->program.IsPositionInvariant) { if (newVP->program.IsPositionInvariant) {
_mesa_insert_mvp_code(ctx, &newVP->program); _mesa_insert_mvp_code(ctx, &newVP->program);
} }

View file

@ -255,7 +255,7 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
* so turn them on now. * so turn them on now.
*/ */
brw->stats_wm++; brw->stats_wm++;
SET_DIRTY_BIT(brw, BRW_NEW_STATS_WM); brw->state.dirty.brw |= BRW_NEW_STATS_WM;
break; break;
default: default:
@ -312,7 +312,7 @@ brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
brw->query.obj = NULL; brw->query.obj = NULL;
brw->stats_wm--; brw->stats_wm--;
SET_DIRTY_BIT(brw, BRW_NEW_STATS_WM); brw->state.dirty.brw |= BRW_NEW_STATS_WM;
break; break;
default: default:

View file

@ -494,7 +494,7 @@ brw_upload_sampler_state_table(struct brw_context *brw,
/* Flag that the sampler state table pointer has changed; later atoms /* Flag that the sampler state table pointer has changed; later atoms
* will handle it. * will handle it.
*/ */
SET_DIRTY_BIT(cache, CACHE_NEW_SAMPLER); brw->state.dirty.cache |= CACHE_NEW_SAMPLER;
} }
} }

View file

@ -109,7 +109,7 @@ static void upload_sf_vp(struct brw_context *brw)
sfv->scissor.ymax = ctx->DrawBuffer->Height - ctx->DrawBuffer->_Ymin - 1; sfv->scissor.ymax = ctx->DrawBuffer->Height - ctx->DrawBuffer->_Ymin - 1;
} }
SET_DIRTY_BIT(cache, CACHE_NEW_SF_VP); brw->state.dirty.cache |= CACHE_NEW_SF_VP;
} }
const struct brw_tracked_state brw_sf_vp = { const struct brw_tracked_state brw_sf_vp = {
@ -292,7 +292,7 @@ static void upload_sf_unit( struct brw_context *brw )
(sf->sf5.viewport_transform << 1)), (sf->sf5.viewport_transform << 1)),
I915_GEM_DOMAIN_INSTRUCTION, 0); I915_GEM_DOMAIN_INSTRUCTION, 0);
SET_DIRTY_BIT(cache, CACHE_NEW_SF_UNIT); brw->state.dirty.cache |= CACHE_NEW_SF_UNIT;
} }
const struct brw_tracked_state brw_sf_unit = { const struct brw_tracked_state brw_sf_unit = {

View file

@ -160,8 +160,8 @@ brw_depthbuffer_format(struct brw_context *brw);
/*********************************************************************** /***********************************************************************
* brw_state.c * brw_state.c
*/ */
void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline); void brw_upload_state(struct brw_context *brw);
void brw_clear_dirty_bits(struct brw_context *brw, brw_pipeline pipeline); void brw_clear_dirty_bits(struct brw_context *brw);
void brw_init_state(struct brw_context *brw); void brw_init_state(struct brw_context *brw);
void brw_destroy_state(struct brw_context *brw); void brw_destroy_state(struct brw_context *brw);

View file

@ -158,7 +158,7 @@ brw_search_cache(struct brw_cache *cache,
*(void **)out_aux = ((char *)item->key + item->key_size); *(void **)out_aux = ((char *)item->key + item->key_size);
if (item->offset != *inout_offset) { if (item->offset != *inout_offset) {
SET_DIRTY_BIT(cache, 1 << cache_id); brw->state.dirty.cache |= (1 << cache_id);
*inout_offset = item->offset; *inout_offset = item->offset;
} }
@ -187,7 +187,7 @@ brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
/* Since we have a new BO in place, we need to signal the units /* Since we have a new BO in place, we need to signal the units
* that depend on it (state base address on gen5+, or unit state before). * that depend on it (state base address on gen5+, or unit state before).
*/ */
SET_DIRTY_BIT(brw, BRW_NEW_PROGRAM_CACHE); brw->state.dirty.brw |= BRW_NEW_PROGRAM_CACHE;
} }
/** /**
@ -276,7 +276,6 @@ brw_upload_cache(struct brw_cache *cache,
uint32_t *out_offset, uint32_t *out_offset,
void *out_aux) void *out_aux)
{ {
struct brw_context *brw = cache->brw;
struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item); struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
GLuint hash; GLuint hash;
void *tmp; void *tmp;
@ -321,7 +320,7 @@ brw_upload_cache(struct brw_cache *cache,
*out_offset = item->offset; *out_offset = item->offset;
*(void **)out_aux = (void *)((char *)item->key + item->key_size); *(void **)out_aux = (void *)((char *)item->key + item->key_size);
SET_DIRTY_BIT(cache, 1 << cache_id); cache->brw->state.dirty.cache |= 1 << cache_id;
} }
void void
@ -379,9 +378,9 @@ brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
/* We need to make sure that the programs get regenerated, since /* We need to make sure that the programs get regenerated, since
* any offsets leftover in brw_context will no longer be valid. * any offsets leftover in brw_context will no longer be valid.
*/ */
SET_DIRTY_ALL(mesa); brw->state.dirty.mesa |= ~0;
SET_DIRTY64_ALL(brw); brw->state.dirty.brw |= ~0;
SET_DIRTY_ALL(cache); brw->state.dirty.cache |= ~0;
intel_batchbuffer_flush(brw); intel_batchbuffer_flush(brw);
} }

View file

@ -331,11 +331,6 @@ static const struct brw_tracked_state *gen8_atoms[] =
&haswell_cut_index, &haswell_cut_index,
}; };
static const struct brw_tracked_state *gen7_compute_atoms[] =
{
};
static void static void
brw_upload_initial_gpu_state(struct brw_context *brw) brw_upload_initial_gpu_state(struct brw_context *brw)
{ {
@ -356,48 +351,45 @@ brw_upload_initial_gpu_state(struct brw_context *brw)
void brw_init_state( struct brw_context *brw ) void brw_init_state( struct brw_context *brw )
{ {
struct gl_context *ctx = &brw->ctx; struct gl_context *ctx = &brw->ctx;
int i, j; const struct brw_tracked_state **atoms;
int num_atoms;
brw_init_caches(brw); brw_init_caches(brw);
memset(brw->atoms, 0, sizeof(brw->atoms));
memset(brw->num_atoms, 0, sizeof(brw->num_atoms));
if (brw->gen >= 8) { if (brw->gen >= 8) {
brw->atoms[BRW_PIPELINE_3D] = gen8_atoms; atoms = gen8_atoms;
brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen8_atoms); num_atoms = ARRAY_SIZE(gen8_atoms);
} else if (brw->gen == 7) { } else if (brw->gen == 7) {
brw->atoms[BRW_PIPELINE_3D] = gen7_atoms; atoms = gen7_atoms;
brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen7_atoms); num_atoms = ARRAY_SIZE(gen7_atoms);
brw->atoms[BRW_PIPELINE_COMPUTE] = gen7_compute_atoms;
brw->num_atoms[BRW_PIPELINE_COMPUTE] = ARRAY_SIZE(gen7_compute_atoms);
} else if (brw->gen == 6) { } else if (brw->gen == 6) {
brw->atoms[BRW_PIPELINE_3D] = gen6_atoms; atoms = gen6_atoms;
brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen6_atoms); num_atoms = ARRAY_SIZE(gen6_atoms);
} else { } else {
brw->atoms[BRW_PIPELINE_3D] = gen4_atoms; atoms = gen4_atoms;
brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen4_atoms); num_atoms = ARRAY_SIZE(gen4_atoms);
} }
for (i = 0; i < BRW_NUM_PIPELINES; i++) { brw->atoms = atoms;
for (j = 0; j < brw->num_atoms[i]; j++) { brw->num_atoms = num_atoms;
assert(brw->atoms[i][j]->dirty.mesa |
brw->atoms[i][j]->dirty.brw | while (num_atoms--) {
brw->atoms[i][j]->dirty.cache); assert((*atoms)->dirty.mesa |
assert(brw->atoms[i][j]->emit); (*atoms)->dirty.brw |
} (*atoms)->dirty.cache);
assert((*atoms)->emit);
atoms++;
} }
brw_upload_initial_gpu_state(brw); brw_upload_initial_gpu_state(brw);
SET_DIRTY_ALL(mesa); brw->state.dirty.mesa = ~0;
SET_DIRTY64_ALL(brw); brw->state.dirty.brw = ~0;
/* Make sure that brw->state.dirty.brw has enough bits to hold all possible /* Make sure that brw->state.dirty.brw has enough bits to hold all possible
* dirty flags. * dirty flags.
*/ */
STATIC_ASSERT(BRW_NUM_STATE_BITS <= STATIC_ASSERT(BRW_NUM_STATE_BITS <= 8 * sizeof(brw->state.dirty.brw));
8 * sizeof(brw->state.pipeline_dirty[0].brw));
ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK; ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK; ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK;
@ -573,20 +565,17 @@ brw_print_dirty_count(struct dirty_bit_map *bit_map)
/*********************************************************************** /***********************************************************************
* Emit all state: * Emit all state:
*/ */
void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline) void brw_upload_state(struct brw_context *brw)
{ {
struct gl_context *ctx = &brw->ctx; struct gl_context *ctx = &brw->ctx;
struct brw_state_flags *state = &brw->state.pipeline_dirty[pipeline]; struct brw_state_flags *state = &brw->state.dirty;
int i; int i;
static int dirty_count = 0; static int dirty_count = 0;
assert(0 <= pipeline && pipeline < BRW_NUM_PIPELINES); state->mesa |= brw->NewGLState;
brw->state.current_pipeline = pipeline;
SET_DIRTY_BIT(mesa, brw->NewGLState);
brw->NewGLState = 0; brw->NewGLState = 0;
SET_DIRTY_BIT(brw, ctx->NewDriverState); state->brw |= ctx->NewDriverState;
ctx->NewDriverState = 0; ctx->NewDriverState = 0;
if (0) { if (0) {
@ -598,27 +587,27 @@ void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline)
if (brw->fragment_program != ctx->FragmentProgram._Current) { if (brw->fragment_program != ctx->FragmentProgram._Current) {
brw->fragment_program = ctx->FragmentProgram._Current; brw->fragment_program = ctx->FragmentProgram._Current;
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM); brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
} }
if (brw->geometry_program != ctx->GeometryProgram._Current) { if (brw->geometry_program != ctx->GeometryProgram._Current) {
brw->geometry_program = ctx->GeometryProgram._Current; brw->geometry_program = ctx->GeometryProgram._Current;
SET_DIRTY_BIT(brw, BRW_NEW_GEOMETRY_PROGRAM); brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM;
} }
if (brw->vertex_program != ctx->VertexProgram._Current) { if (brw->vertex_program != ctx->VertexProgram._Current) {
brw->vertex_program = ctx->VertexProgram._Current; brw->vertex_program = ctx->VertexProgram._Current;
SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM); brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
} }
if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) { if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
brw->meta_in_progress = _mesa_meta_in_progress(ctx); brw->meta_in_progress = _mesa_meta_in_progress(ctx);
SET_DIRTY_BIT(brw, BRW_NEW_META_IN_PROGRESS); brw->state.dirty.brw |= BRW_NEW_META_IN_PROGRESS;
} }
if (brw->num_samples != ctx->DrawBuffer->Visual.samples) { if (brw->num_samples != ctx->DrawBuffer->Visual.samples) {
brw->num_samples = ctx->DrawBuffer->Visual.samples; brw->num_samples = ctx->DrawBuffer->Visual.samples;
SET_DIRTY_BIT(brw, BRW_NEW_NUM_SAMPLES); brw->state.dirty.brw |= BRW_NEW_NUM_SAMPLES;
} }
if ((state->mesa | state->cache | state->brw) == 0) if ((state->mesa | state->cache | state->brw) == 0)
@ -633,8 +622,8 @@ void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline)
memset(&examined, 0, sizeof(examined)); memset(&examined, 0, sizeof(examined));
prev = *state; prev = *state;
for (i = 0; i < brw->num_atoms[pipeline]; i++) { for (i = 0; i < brw->num_atoms; i++) {
const struct brw_tracked_state *atom = brw->atoms[pipeline][i]; const struct brw_tracked_state *atom = brw->atoms[i];
struct brw_state_flags generated; struct brw_state_flags generated;
if (check_state(state, &atom->dirty)) { if (check_state(state, &atom->dirty)) {
@ -653,8 +642,8 @@ void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline)
} }
} }
else { else {
for (i = 0; i < brw->num_atoms[pipeline]; i++) { for (i = 0; i < brw->num_atoms; i++) {
const struct brw_tracked_state *atom = brw->atoms[pipeline][i]; const struct brw_tracked_state *atom = brw->atoms[i];
if (check_state(state, &atom->dirty)) { if (check_state(state, &atom->dirty)) {
atom->emit(brw); atom->emit(brw);
@ -688,8 +677,8 @@ void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline)
* brw_upload_state() call. * brw_upload_state() call.
*/ */
void void
brw_clear_dirty_bits(struct brw_context *brw, brw_pipeline pipeline) brw_clear_dirty_bits(struct brw_context *brw)
{ {
struct brw_state_flags *state = &brw->state.pipeline_dirty[pipeline]; struct brw_state_flags *state = &brw->state.dirty;
memset(state, 0, sizeof(*state)); memset(state, 0, sizeof(*state));
} }

View file

@ -204,7 +204,7 @@ done:
brw->urb.cs_start, brw->urb.cs_start,
brw->urb.size); brw->urb.size);
SET_DIRTY_BIT(brw, BRW_NEW_URB_FENCE); brw->state.dirty.brw |= BRW_NEW_URB_FENCE;
} }
} }

View file

@ -273,9 +273,9 @@ brw_upload_gs_prog(struct brw_context *brw)
if (gp == NULL) { if (gp == NULL) {
/* No geometry shader. Vertex data just passes straight through. */ /* No geometry shader. Vertex data just passes straight through. */
if (CHECK_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_VS)) { if (brw->state.dirty.brw & BRW_NEW_VUE_MAP_VS) {
brw->vue_map_geom_out = brw->vue_map_vs; brw->vue_map_geom_out = brw->vue_map_vs;
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT); brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
} }
/* Other state atoms had better not try to access prog_data, since /* Other state atoms had better not try to access prog_data, since
@ -319,7 +319,7 @@ brw_upload_gs_prog(struct brw_context *brw)
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out, if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
sizeof(brw->vue_map_geom_out)) != 0) { sizeof(brw->vue_map_geom_out)) != 0) {
brw->vue_map_geom_out = brw->gs.prog_data->base.vue_map; brw->vue_map_geom_out = brw->gs.prog_data->base.vue_map;
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT); brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
} }
} }

View file

@ -497,13 +497,13 @@ static void brw_upload_vs_prog(struct brw_context *brw)
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out, if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
sizeof(brw->vue_map_geom_out)) != 0) { sizeof(brw->vue_map_geom_out)) != 0) {
brw->vue_map_vs = brw->vs.prog_data->base.vue_map; brw->vue_map_vs = brw->vs.prog_data->base.vue_map;
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_VS); brw->state.dirty.brw |= BRW_NEW_VUE_MAP_VS;
if (brw->gen < 7) { if (brw->gen < 7) {
/* No geometry shader support, so the VS VUE map is the VUE map for /* No geometry shader support, so the VS VUE map is the VUE map for
* the output of the "geometry" portion of the pipeline. * the output of the "geometry" portion of the pipeline.
*/ */
brw->vue_map_geom_out = brw->vue_map_vs; brw->vue_map_geom_out = brw->vue_map_vs;
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT); brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
} }
} }
} }

View file

@ -181,7 +181,7 @@ brw_upload_vs_unit(struct brw_context *brw)
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER); I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
} }
SET_DIRTY_BIT(cache, CACHE_NEW_VS_UNIT); brw->state.dirty.cache |= CACHE_NEW_VS_UNIT;
} }
const struct brw_tracked_state brw_vs_unit = { const struct brw_tracked_state brw_vs_unit = {

View file

@ -67,7 +67,7 @@ brw_upload_pull_constants(struct brw_context *brw,
if (!prog_data->nr_pull_params) { if (!prog_data->nr_pull_params) {
if (stage_state->surf_offset[surf_index]) { if (stage_state->surf_offset[surf_index]) {
stage_state->surf_offset[surf_index] = 0; stage_state->surf_offset[surf_index] = 0;
SET_DIRTY_BIT(brw, brw_new_constbuf); brw->state.dirty.brw |= brw_new_constbuf;
} }
return; return;
} }
@ -98,7 +98,7 @@ brw_upload_pull_constants(struct brw_context *brw,
dword_pitch); dword_pitch);
drm_intel_bo_unreference(const_bo); drm_intel_bo_unreference(const_bo);
SET_DIRTY_BIT(brw, brw_new_constbuf); brw->state.dirty.brw |= brw_new_constbuf;
} }

View file

@ -239,7 +239,7 @@ brw_upload_wm_unit(struct brw_context *brw)
I915_GEM_DOMAIN_INSTRUCTION, 0); I915_GEM_DOMAIN_INSTRUCTION, 0);
} }
SET_DIRTY_BIT(cache, CACHE_NEW_WM_UNIT); brw->state.dirty.cache |= CACHE_NEW_WM_UNIT;
} }
const struct brw_tracked_state brw_wm_unit = { const struct brw_tracked_state brw_wm_unit = {

View file

@ -729,7 +729,7 @@ brw_update_renderbuffer_surfaces(struct brw_context *brw)
} else { } else {
brw->vtbl.update_null_renderbuffer_surface(brw, 0); brw->vtbl.update_null_renderbuffer_surface(brw, 0);
} }
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES); brw->state.dirty.brw |= BRW_NEW_SURFACES;
} }
const struct brw_tracked_state brw_renderbuffer_surfaces = { const struct brw_tracked_state brw_renderbuffer_surfaces = {
@ -817,7 +817,7 @@ brw_update_texture_surfaces(struct brw_context *brw)
update_stage_texture_surfaces(brw, fs, &brw->wm.base, true); update_stage_texture_surfaces(brw, fs, &brw->wm.base, true);
} }
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES); brw->state.dirty.brw |= BRW_NEW_SURFACES;
} }
const struct brw_tracked_state brw_texture_surfaces = { const struct brw_tracked_state brw_texture_surfaces = {
@ -869,7 +869,7 @@ brw_upload_ubo_surfaces(struct brw_context *brw,
} }
if (shader->NumUniformBlocks) if (shader->NumUniformBlocks)
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES); brw->state.dirty.brw |= BRW_NEW_SURFACES;
} }
static void static void
@ -920,7 +920,7 @@ brw_upload_abo_surfaces(struct brw_context *brw,
} }
if (prog->NumUniformBlocks) if (prog->NumUniformBlocks)
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES); brw->state.dirty.brw |= BRW_NEW_SURFACES;
} }
static void static void

View file

@ -65,7 +65,7 @@ gen6_update_sol_surfaces(struct brw_context *brw)
} }
} }
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES); brw->state.dirty.brw |= BRW_NEW_SURFACES;
} }
const struct brw_tracked_state gen6_sol_surface = { const struct brw_tracked_state gen6_sol_surface = {
@ -103,7 +103,7 @@ brw_gs_upload_binding_table(struct brw_context *brw)
/* Skip making a binding table if we don't have anything to put in it. */ /* Skip making a binding table if we don't have anything to put in it. */
if (!has_surfaces) { if (!has_surfaces) {
if (brw->ff_gs.bind_bo_offset != 0) { if (brw->ff_gs.bind_bo_offset != 0) {
SET_DIRTY_BIT(brw, BRW_NEW_GS_BINDING_TABLE); brw->state.dirty.brw |= BRW_NEW_GS_BINDING_TABLE;
brw->ff_gs.bind_bo_offset = 0; brw->ff_gs.bind_bo_offset = 0;
} }
return; return;
@ -119,7 +119,7 @@ brw_gs_upload_binding_table(struct brw_context *brw)
/* BRW_NEW_SURFACES */ /* BRW_NEW_SURFACES */
memcpy(bind, brw->ff_gs.surf_offset, BRW_MAX_GEN6_GS_SURFACES * sizeof(uint32_t)); memcpy(bind, brw->ff_gs.surf_offset, BRW_MAX_GEN6_GS_SURFACES * sizeof(uint32_t));
SET_DIRTY_BIT(brw, BRW_NEW_GS_BINDING_TABLE); brw->state.dirty.brw |= BRW_NEW_GS_BINDING_TABLE;
} }
const struct brw_tracked_state gen6_gs_binding_table = { const struct brw_tracked_state gen6_gs_binding_table = {

View file

@ -63,7 +63,7 @@ gen6_upload_clip_vp(struct brw_context *brw)
vp->ymin = -gby; vp->ymin = -gby;
vp->ymax = gby; vp->ymax = gby;
SET_DIRTY_BIT(cache, CACHE_NEW_CLIP_VP); brw->state.dirty.cache |= CACHE_NEW_CLIP_VP;
} }
const struct brw_tracked_state gen6_clip_vp = { const struct brw_tracked_state gen6_clip_vp = {
@ -106,7 +106,7 @@ gen6_upload_sf_vp(struct brw_context *brw)
sfv->viewport.m31 = v[MAT_TY] * y_scale + y_bias; sfv->viewport.m31 = v[MAT_TY] * y_scale + y_bias;
sfv->viewport.m32 = v[MAT_TZ] * depth_scale; sfv->viewport.m32 = v[MAT_TZ] * depth_scale;
SET_DIRTY_BIT(cache, CACHE_NEW_SF_VP); brw->state.dirty.cache |= CACHE_NEW_SF_VP;
} }
const struct brw_tracked_state gen6_sf_vp = { const struct brw_tracked_state gen6_sf_vp = {

View file

@ -94,7 +94,7 @@ gen7_allocate_push_constants(struct brw_context *brw)
* Similar text exists for the other 3DSTATE_PUSH_CONSTANT_ALLOC_* * Similar text exists for the other 3DSTATE_PUSH_CONSTANT_ALLOC_*
* commands. * commands.
*/ */
SET_DIRTY_BIT(brw, BRW_NEW_PUSH_CONSTANT_ALLOCATION); brw->state.dirty.brw |= BRW_NEW_PUSH_CONSTANT_ALLOCATION;
} }
void void

View file

@ -324,5 +324,5 @@ gen8_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
* *
* Setting _NEW_DEPTH and _NEW_BUFFERS covers it, but is rather overkill. * Setting _NEW_DEPTH and _NEW_BUFFERS covers it, but is rather overkill.
*/ */
SET_DIRTY_BIT(mesa, _NEW_DEPTH | _NEW_BUFFERS); brw->state.dirty.mesa |= _NEW_DEPTH | _NEW_BUFFERS;
} }

View file

@ -61,7 +61,7 @@ static void upload_state_base_address(struct brw_context *brw)
OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1); OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
ADVANCE_BATCH(); ADVANCE_BATCH();
SET_DIRTY_BIT(brw, BRW_NEW_STATE_BASE_ADDRESS); brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
} }
const struct brw_tracked_state gen8_state_base_address = { const struct brw_tracked_state gen8_state_base_address = {

View file

@ -178,9 +178,9 @@ brw_new_batch(struct brw_context *brw)
* purposes means everything). * purposes means everything).
*/ */
if (brw->hw_ctx == NULL) if (brw->hw_ctx == NULL)
SET_DIRTY_BIT(brw, BRW_NEW_CONTEXT); brw->state.dirty.brw |= BRW_NEW_CONTEXT;
SET_DIRTY_BIT(brw, BRW_NEW_BATCH); brw->state.dirty.brw |= BRW_NEW_BATCH;
/* Assume that the last command before the start of our batch was a /* Assume that the last command before the start of our batch was a
* primitive, for safety. * primitive, for safety.

View file

@ -110,7 +110,7 @@ intel_bufferobj_alloc_buffer(struct brw_context *brw,
/* the buffer might be bound as a uniform buffer, need to update it /* the buffer might be bound as a uniform buffer, need to update it
*/ */
SET_DIRTY_BIT(brw, BRW_NEW_UNIFORM_BUFFER); brw->state.dirty.brw |= BRW_NEW_UNIFORM_BUFFER;
intel_bufferobj_mark_inactive(intel_obj); intel_bufferobj_mark_inactive(intel_obj);
} }