Reverts
* "i965: Modify state upload to allow 2 different sets of state atoms."
   8e27a4d2b3
* "i965: Modify dirty bit handling to support 2 pipelines."
   373143ed91
* "i965: Create a macro for checking a dirty bit."
   c5bdf9be1e
   Conflicts:
	src/mesa/drivers/dri/i965/brw_context.h
* "i965: Create a macro for setting all dirty bits."
   6f56e1424d
   Conflicts:
	src/mesa/drivers/dri/i965/brw_blorp.cpp
	src/mesa/drivers/dri/i965/brw_state_cache.c
	src/mesa/drivers/dri/i965/brw_state_upload.c
* "i965: Create a macro for setting a dirty bit."
   88e3d404da

Signed-off-by: Jordan Justen <jordan.l.justen@intel.com>
This commit is contained in:
Jordan Justen 2014-09-03 14:28:59 -07:00
parent 5d8f40a53a
commit 864c463485
34 changed files with 112 additions and 185 deletions

View file

@ -82,7 +82,7 @@ brw_upload_binding_table(struct brw_context *brw,
prog_data->binding_table.size_bytes);
}
SET_DIRTY_BIT(brw, brw_new_binding_table);
brw->state.dirty.brw |= brw_new_binding_table;
if (brw->gen >= 7) {
BEGIN_BATCH(2);

View file

@ -273,8 +273,8 @@ retry:
/* We've smashed all state compared to what the normal 3D pipeline
* rendering tracks for GL.
*/
SET_DIRTY64_ALL(brw);
SET_DIRTY_ALL(cache);
brw->state.dirty.brw = ~0;
brw->state.dirty.cache = ~0;
brw->no_depth_or_stencil = false;
brw->ib.type = -1;

View file

@ -62,7 +62,7 @@ brw_upload_cc_vp(struct brw_context *brw)
}
}
SET_DIRTY_BIT(cache, CACHE_NEW_CC_VP);
brw->state.dirty.cache |= CACHE_NEW_CC_VP;
}
const struct brw_tracked_state brw_cc_vp = {
@ -223,7 +223,7 @@ static void upload_cc_unit(struct brw_context *brw)
cc->cc4.cc_viewport_state_offset = (brw->batch.bo->offset64 +
brw->cc.vp_offset) >> 5; /* reloc */
SET_DIRTY_BIT(cache, CACHE_NEW_CC_UNIT);
brw->state.dirty.cache |= CACHE_NEW_CC_UNIT;
/* Emit CC viewport relocation */
drm_intel_bo_emit_reloc(brw->batch.bo,

View file

@ -158,7 +158,7 @@ brw_upload_clip_unit(struct brw_context *brw)
clip->viewport_ymin = -1;
clip->viewport_ymax = 1;
SET_DIRTY_BIT(cache, CACHE_NEW_CLIP_UNIT);
brw->state.dirty.cache |= CACHE_NEW_CLIP_UNIT;
}
const struct brw_tracked_state brw_clip_unit = {

View file

@ -241,67 +241,6 @@ struct brw_state_flags {
GLuint cache;
};
/**
* Enum representing the different pipelines.
*/
typedef enum {
/**
* 3D rendering pipeline (vertex through fragment shader).
*/
BRW_PIPELINE_3D,
/**
* Compute shader pipeline.
*/
BRW_PIPELINE_COMPUTE,
BRW_NUM_PIPELINES
} brw_pipeline;
/**
* Set one of the bits in a field of brw_state_flags.
*/
#define SET_DIRTY_BIT(FIELD, FLAG) \
do { \
for (int pipeline = 0; pipeline < BRW_NUM_PIPELINES; pipeline++) \
brw->state.pipeline_dirty[pipeline].FIELD |= (FLAG); \
} while (false)
/**
* Set all of the bits in a field of brw_state_flags.
*/
#define SET_DIRTY_ALL(FIELD) \
do { \
/* ~0 == 0xffffffff, so make sure field is <= 32 bits */ \
STATIC_ASSERT(sizeof(brw->state.pipeline_dirty[0].FIELD) == 4); \
for (int pipeline = 0; pipeline < BRW_NUM_PIPELINES; pipeline++) \
brw->state.pipeline_dirty[pipeline].FIELD = ~0; \
} while (false)
/**
* Set all of the bits in a field of brw_state_flags.
*/
#define SET_DIRTY64_ALL(FIELD) \
do { \
/* ~0ULL == 0xffffffffffffffff, so make sure field is <= 64 bits */ \
STATIC_ASSERT(sizeof(brw->state.pipeline_dirty[0].FIELD) == 8); \
for (int pipeline = 0; pipeline < BRW_NUM_PIPELINES; pipeline++) \
brw->state.pipeline_dirty[pipeline].FIELD = ~(0ULL); \
} while (false)
/**
* Check one of the bits in a field of brw_state_flags.
*/
#define CHECK_DIRTY_BIT(FIELD, FLAG) \
((brw->state.pipeline_dirty[brw->state.current_pipeline].FIELD & (FLAG)) \
!= 0)
/** Subclass of Mesa vertex program */
struct brw_vertex_program {
struct gl_vertex_program program;
@ -1107,8 +1046,7 @@ struct brw_context
GLuint NewGLState;
struct {
struct brw_state_flags pipeline_dirty[BRW_NUM_PIPELINES];
brw_pipeline current_pipeline;
struct brw_state_flags dirty;
} state;
struct brw_cache cache;
@ -1367,8 +1305,8 @@ struct brw_context
int entries_per_oa_snapshot;
} perfmon;
int num_atoms[BRW_NUM_PIPELINES];
const struct brw_tracked_state **atoms[BRW_NUM_PIPELINES];
int num_atoms;
const struct brw_tracked_state **atoms;
/* If (INTEL_DEBUG & DEBUG_BATCH) */
struct {

View file

@ -134,7 +134,7 @@ static void calculate_curbe_offsets( struct brw_context *brw )
brw->curbe.vs_start,
brw->curbe.vs_size );
SET_DIRTY_BIT(brw, BRW_NEW_CURBE_OFFSETS);
brw->state.dirty.brw |= BRW_NEW_CURBE_OFFSETS;
}
}

View file

@ -126,11 +126,11 @@ static void brw_set_prim(struct brw_context *brw,
if (hw_prim != brw->primitive) {
brw->primitive = hw_prim;
SET_DIRTY_BIT(brw, BRW_NEW_PRIMITIVE);
brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
if (reduced_prim[prim->mode] != brw->reduced_primitive) {
brw->reduced_primitive = reduced_prim[prim->mode];
SET_DIRTY_BIT(brw, BRW_NEW_REDUCED_PRIMITIVE);
brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
}
}
}
@ -146,7 +146,7 @@ static void gen6_set_prim(struct brw_context *brw,
if (hw_prim != brw->primitive) {
brw->primitive = hw_prim;
SET_DIRTY_BIT(brw, BRW_NEW_PRIMITIVE);
brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
}
}
@ -403,11 +403,11 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
brw_merge_inputs( brw, arrays );
brw->ib.ib = ib;
SET_DIRTY_BIT(brw, BRW_NEW_INDICES);
brw->state.dirty.brw |= BRW_NEW_INDICES;
brw->vb.min_index = min_index;
brw->vb.max_index = max_index;
SET_DIRTY_BIT(brw, BRW_NEW_VERTICES);
brw->state.dirty.brw |= BRW_NEW_VERTICES;
for (i = 0; i < nr_prims; i++) {
int estimated_max_prim_size;
@ -432,7 +432,7 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
brw->num_instances = prims[i].num_instances;
brw->basevertex = prims[i].basevertex;
if (i > 0) { /* For i == 0 we just did this before the loop */
SET_DIRTY_BIT(brw, BRW_NEW_VERTICES);
brw->state.dirty.brw |= BRW_NEW_VERTICES;
brw_merge_inputs(brw, arrays);
}
}
@ -447,9 +447,9 @@ retry:
* *_set_prim or intel_batchbuffer_flush(), which only impacts
* brw->state.dirty.brw.
*/
if (brw->state.pipeline_dirty[BRW_PIPELINE_3D].brw) {
if (brw->state.dirty.brw) {
brw->no_batch_wrap = true;
brw_upload_state(brw, BRW_PIPELINE_3D);
brw_upload_state(brw);
}
brw_emit_prim(brw, &prims[i], brw->primitive);
@ -480,8 +480,8 @@ retry:
/* Now that we know we haven't run out of aperture space, we can safely
* reset the dirty bits.
*/
if (brw->state.pipeline_dirty[BRW_PIPELINE_3D].brw)
brw_clear_dirty_bits(brw, BRW_PIPELINE_3D);
if (brw->state.dirty.brw)
brw_clear_dirty_bits(brw);
}
if (brw->always_flush_batch)

View file

@ -872,11 +872,11 @@ static void brw_upload_indices(struct brw_context *brw)
brw->ib.start_vertex_offset = offset / ib_type_size;
if (brw->ib.bo != old_bo)
SET_DIRTY_BIT(brw, BRW_NEW_INDEX_BUFFER);
brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
if (index_buffer->type != brw->ib.type) {
brw->ib.type = index_buffer->type;
SET_DIRTY_BIT(brw, BRW_NEW_INDEX_BUFFER);
brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
}
}

View file

@ -230,7 +230,7 @@ brw_upload_ff_gs_prog(struct brw_context *brw)
populate_key(brw, &key);
if (brw->ff_gs.prog_active != key.need_gs_prog) {
SET_DIRTY_BIT(cache, CACHE_NEW_FF_GS_PROG);
brw->state.dirty.cache |= CACHE_NEW_FF_GS_PROG;
brw->ff_gs.prog_active = key.need_gs_prog;
}

View file

@ -85,7 +85,7 @@ brw_upload_gs_unit(struct brw_context *brw)
gs->gs6.max_vp_index = brw->ctx.Const.MaxViewports - 1;
SET_DIRTY_BIT(cache, CACHE_NEW_FF_GS_UNIT);
brw->state.dirty.cache |= CACHE_NEW_FF_GS_UNIT;
}
const struct brw_tracked_state brw_gs_unit = {

View file

@ -44,7 +44,7 @@ brw_setup_vue_interpolation(struct brw_context *brw)
memset(&brw->interpolation_mode, INTERP_QUALIFIER_NONE, sizeof(brw->interpolation_mode));
SET_DIRTY_BIT(brw, BRW_NEW_INTERPOLATION_MAP);
brw->state.dirty.brw |= BRW_NEW_INTERPOLATION_MAP;
if (!fprog)
return;

View file

@ -376,7 +376,7 @@ set_fast_clear_op(struct brw_context *brw, uint32_t op)
* 3DSTATE_PS.
*/
brw->wm.fast_clear_op = op;
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
}
static void
@ -400,8 +400,8 @@ use_rectlist(struct brw_context *brw, bool enable)
* _NEW_BUFFERS to make sure we emit new SURFACE_STATE with the new fast
* clear color value.
*/
SET_DIRTY_BIT(mesa, _NEW_LIGHT | _NEW_BUFFERS);
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
brw->state.dirty.mesa |= _NEW_LIGHT | _NEW_BUFFERS;
brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
}
bool
@ -602,7 +602,8 @@ brw_meta_fast_clear(struct brw_context *brw, struct gl_framebuffer *fb,
* color before resolve and sets irb->mt->fast_clear_state to UNRESOLVED if
* we render to it.
*/
SET_DIRTY_BIT(mesa, _NEW_BUFFERS);
brw->state.dirty.mesa |= _NEW_BUFFERS;
/* Set the custom state back to normal and dirty the same bits as above */
use_rectlist(brw, false);

View file

@ -103,7 +103,7 @@ static void upload_pipelined_state_pointers(struct brw_context *brw )
brw->cc.state_offset);
ADVANCE_BATCH();
SET_DIRTY_BIT(brw, BRW_NEW_PSP);
brw->state.dirty.brw |= BRW_NEW_PSP;
}
static void upload_psp_urb_cbs(struct brw_context *brw )
@ -1054,7 +1054,7 @@ static void upload_state_base_address( struct brw_context *brw )
* obvious.
*/
SET_DIRTY_BIT(brw, BRW_NEW_STATE_BASE_ADDRESS);
brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
}
const struct brw_tracked_state brw_state_base_address = {

View file

@ -62,13 +62,13 @@ static void brwBindProgram( struct gl_context *ctx,
switch (target) {
case GL_VERTEX_PROGRAM_ARB:
SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
break;
case MESA_GEOMETRY_PROGRAM:
SET_DIRTY_BIT(brw, BRW_NEW_GEOMETRY_PROGRAM);
brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM;
break;
case GL_FRAGMENT_PROGRAM_ARB:
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
break;
}
}
@ -161,7 +161,7 @@ brwProgramStringNotify(struct gl_context *ctx,
brw_fragment_program_const(brw->fragment_program);
if (newFP == curFP)
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
newFP->id = get_new_program_id(brw->intelScreen);
break;
}
@ -172,7 +172,7 @@ brwProgramStringNotify(struct gl_context *ctx,
brw_vertex_program_const(brw->vertex_program);
if (newVP == curVP)
SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
if (newVP->program.IsPositionInvariant) {
_mesa_insert_mvp_code(ctx, &newVP->program);
}

View file

@ -255,7 +255,7 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
* so turn them on now.
*/
brw->stats_wm++;
SET_DIRTY_BIT(brw, BRW_NEW_STATS_WM);
brw->state.dirty.brw |= BRW_NEW_STATS_WM;
break;
default:
@ -312,7 +312,7 @@ brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
brw->query.obj = NULL;
brw->stats_wm--;
SET_DIRTY_BIT(brw, BRW_NEW_STATS_WM);
brw->state.dirty.brw |= BRW_NEW_STATS_WM;
break;
default:

View file

@ -494,7 +494,7 @@ brw_upload_sampler_state_table(struct brw_context *brw,
/* Flag that the sampler state table pointer has changed; later atoms
* will handle it.
*/
SET_DIRTY_BIT(cache, CACHE_NEW_SAMPLER);
brw->state.dirty.cache |= CACHE_NEW_SAMPLER;
}
}

View file

@ -109,7 +109,7 @@ static void upload_sf_vp(struct brw_context *brw)
sfv->scissor.ymax = ctx->DrawBuffer->Height - ctx->DrawBuffer->_Ymin - 1;
}
SET_DIRTY_BIT(cache, CACHE_NEW_SF_VP);
brw->state.dirty.cache |= CACHE_NEW_SF_VP;
}
const struct brw_tracked_state brw_sf_vp = {
@ -292,7 +292,7 @@ static void upload_sf_unit( struct brw_context *brw )
(sf->sf5.viewport_transform << 1)),
I915_GEM_DOMAIN_INSTRUCTION, 0);
SET_DIRTY_BIT(cache, CACHE_NEW_SF_UNIT);
brw->state.dirty.cache |= CACHE_NEW_SF_UNIT;
}
const struct brw_tracked_state brw_sf_unit = {

View file

@ -160,8 +160,8 @@ brw_depthbuffer_format(struct brw_context *brw);
/***********************************************************************
* brw_state.c
*/
void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline);
void brw_clear_dirty_bits(struct brw_context *brw, brw_pipeline pipeline);
void brw_upload_state(struct brw_context *brw);
void brw_clear_dirty_bits(struct brw_context *brw);
void brw_init_state(struct brw_context *brw);
void brw_destroy_state(struct brw_context *brw);

View file

@ -158,7 +158,7 @@ brw_search_cache(struct brw_cache *cache,
*(void **)out_aux = ((char *)item->key + item->key_size);
if (item->offset != *inout_offset) {
SET_DIRTY_BIT(cache, 1 << cache_id);
brw->state.dirty.cache |= (1 << cache_id);
*inout_offset = item->offset;
}
@ -187,7 +187,7 @@ brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
/* Since we have a new BO in place, we need to signal the units
* that depend on it (state base address on gen5+, or unit state before).
*/
SET_DIRTY_BIT(brw, BRW_NEW_PROGRAM_CACHE);
brw->state.dirty.brw |= BRW_NEW_PROGRAM_CACHE;
}
/**
@ -276,7 +276,6 @@ brw_upload_cache(struct brw_cache *cache,
uint32_t *out_offset,
void *out_aux)
{
struct brw_context *brw = cache->brw;
struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
GLuint hash;
void *tmp;
@ -321,7 +320,7 @@ brw_upload_cache(struct brw_cache *cache,
*out_offset = item->offset;
*(void **)out_aux = (void *)((char *)item->key + item->key_size);
SET_DIRTY_BIT(cache, 1 << cache_id);
cache->brw->state.dirty.cache |= 1 << cache_id;
}
void
@ -379,9 +378,9 @@ brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
/* We need to make sure that the programs get regenerated, since
* any offsets leftover in brw_context will no longer be valid.
*/
SET_DIRTY_ALL(mesa);
SET_DIRTY64_ALL(brw);
SET_DIRTY_ALL(cache);
brw->state.dirty.mesa |= ~0;
brw->state.dirty.brw |= ~0;
brw->state.dirty.cache |= ~0;
intel_batchbuffer_flush(brw);
}

View file

@ -331,11 +331,6 @@ static const struct brw_tracked_state *gen8_atoms[] =
&haswell_cut_index,
};
static const struct brw_tracked_state *gen7_compute_atoms[] =
{
};
static void
brw_upload_initial_gpu_state(struct brw_context *brw)
{
@ -356,48 +351,45 @@ brw_upload_initial_gpu_state(struct brw_context *brw)
void brw_init_state( struct brw_context *brw )
{
struct gl_context *ctx = &brw->ctx;
int i, j;
const struct brw_tracked_state **atoms;
int num_atoms;
brw_init_caches(brw);
memset(brw->atoms, 0, sizeof(brw->atoms));
memset(brw->num_atoms, 0, sizeof(brw->num_atoms));
if (brw->gen >= 8) {
brw->atoms[BRW_PIPELINE_3D] = gen8_atoms;
brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen8_atoms);
atoms = gen8_atoms;
num_atoms = ARRAY_SIZE(gen8_atoms);
} else if (brw->gen == 7) {
brw->atoms[BRW_PIPELINE_3D] = gen7_atoms;
brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen7_atoms);
brw->atoms[BRW_PIPELINE_COMPUTE] = gen7_compute_atoms;
brw->num_atoms[BRW_PIPELINE_COMPUTE] = ARRAY_SIZE(gen7_compute_atoms);
atoms = gen7_atoms;
num_atoms = ARRAY_SIZE(gen7_atoms);
} else if (brw->gen == 6) {
brw->atoms[BRW_PIPELINE_3D] = gen6_atoms;
brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen6_atoms);
atoms = gen6_atoms;
num_atoms = ARRAY_SIZE(gen6_atoms);
} else {
brw->atoms[BRW_PIPELINE_3D] = gen4_atoms;
brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen4_atoms);
atoms = gen4_atoms;
num_atoms = ARRAY_SIZE(gen4_atoms);
}
for (i = 0; i < BRW_NUM_PIPELINES; i++) {
for (j = 0; j < brw->num_atoms[i]; j++) {
assert(brw->atoms[i][j]->dirty.mesa |
brw->atoms[i][j]->dirty.brw |
brw->atoms[i][j]->dirty.cache);
assert(brw->atoms[i][j]->emit);
}
brw->atoms = atoms;
brw->num_atoms = num_atoms;
while (num_atoms--) {
assert((*atoms)->dirty.mesa |
(*atoms)->dirty.brw |
(*atoms)->dirty.cache);
assert((*atoms)->emit);
atoms++;
}
brw_upload_initial_gpu_state(brw);
SET_DIRTY_ALL(mesa);
SET_DIRTY64_ALL(brw);
brw->state.dirty.mesa = ~0;
brw->state.dirty.brw = ~0;
/* Make sure that brw->state.dirty.brw has enough bits to hold all possible
* dirty flags.
*/
STATIC_ASSERT(BRW_NUM_STATE_BITS <=
8 * sizeof(brw->state.pipeline_dirty[0].brw));
STATIC_ASSERT(BRW_NUM_STATE_BITS <= 8 * sizeof(brw->state.dirty.brw));
ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK;
@ -573,20 +565,17 @@ brw_print_dirty_count(struct dirty_bit_map *bit_map)
/***********************************************************************
* Emit all state:
*/
void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline)
void brw_upload_state(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
struct brw_state_flags *state = &brw->state.pipeline_dirty[pipeline];
struct brw_state_flags *state = &brw->state.dirty;
int i;
static int dirty_count = 0;
assert(0 <= pipeline && pipeline < BRW_NUM_PIPELINES);
brw->state.current_pipeline = pipeline;
SET_DIRTY_BIT(mesa, brw->NewGLState);
state->mesa |= brw->NewGLState;
brw->NewGLState = 0;
SET_DIRTY_BIT(brw, ctx->NewDriverState);
state->brw |= ctx->NewDriverState;
ctx->NewDriverState = 0;
if (0) {
@ -598,27 +587,27 @@ void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline)
if (brw->fragment_program != ctx->FragmentProgram._Current) {
brw->fragment_program = ctx->FragmentProgram._Current;
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
}
if (brw->geometry_program != ctx->GeometryProgram._Current) {
brw->geometry_program = ctx->GeometryProgram._Current;
SET_DIRTY_BIT(brw, BRW_NEW_GEOMETRY_PROGRAM);
brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM;
}
if (brw->vertex_program != ctx->VertexProgram._Current) {
brw->vertex_program = ctx->VertexProgram._Current;
SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
}
if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
brw->meta_in_progress = _mesa_meta_in_progress(ctx);
SET_DIRTY_BIT(brw, BRW_NEW_META_IN_PROGRESS);
brw->state.dirty.brw |= BRW_NEW_META_IN_PROGRESS;
}
if (brw->num_samples != ctx->DrawBuffer->Visual.samples) {
brw->num_samples = ctx->DrawBuffer->Visual.samples;
SET_DIRTY_BIT(brw, BRW_NEW_NUM_SAMPLES);
brw->state.dirty.brw |= BRW_NEW_NUM_SAMPLES;
}
if ((state->mesa | state->cache | state->brw) == 0)
@ -633,8 +622,8 @@ void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline)
memset(&examined, 0, sizeof(examined));
prev = *state;
for (i = 0; i < brw->num_atoms[pipeline]; i++) {
const struct brw_tracked_state *atom = brw->atoms[pipeline][i];
for (i = 0; i < brw->num_atoms; i++) {
const struct brw_tracked_state *atom = brw->atoms[i];
struct brw_state_flags generated;
if (check_state(state, &atom->dirty)) {
@ -653,8 +642,8 @@ void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline)
}
}
else {
for (i = 0; i < brw->num_atoms[pipeline]; i++) {
const struct brw_tracked_state *atom = brw->atoms[pipeline][i];
for (i = 0; i < brw->num_atoms; i++) {
const struct brw_tracked_state *atom = brw->atoms[i];
if (check_state(state, &atom->dirty)) {
atom->emit(brw);
@ -688,8 +677,8 @@ void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline)
* brw_upload_state() call.
*/
void
brw_clear_dirty_bits(struct brw_context *brw, brw_pipeline pipeline)
brw_clear_dirty_bits(struct brw_context *brw)
{
struct brw_state_flags *state = &brw->state.pipeline_dirty[pipeline];
struct brw_state_flags *state = &brw->state.dirty;
memset(state, 0, sizeof(*state));
}

View file

@ -204,7 +204,7 @@ done:
brw->urb.cs_start,
brw->urb.size);
SET_DIRTY_BIT(brw, BRW_NEW_URB_FENCE);
brw->state.dirty.brw |= BRW_NEW_URB_FENCE;
}
}

View file

@ -273,9 +273,9 @@ brw_upload_gs_prog(struct brw_context *brw)
if (gp == NULL) {
/* No geometry shader. Vertex data just passes straight through. */
if (CHECK_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_VS)) {
if (brw->state.dirty.brw & BRW_NEW_VUE_MAP_VS) {
brw->vue_map_geom_out = brw->vue_map_vs;
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
}
/* Other state atoms had better not try to access prog_data, since
@ -319,7 +319,7 @@ brw_upload_gs_prog(struct brw_context *brw)
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
sizeof(brw->vue_map_geom_out)) != 0) {
brw->vue_map_geom_out = brw->gs.prog_data->base.vue_map;
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
}
}

View file

@ -497,13 +497,13 @@ static void brw_upload_vs_prog(struct brw_context *brw)
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
sizeof(brw->vue_map_geom_out)) != 0) {
brw->vue_map_vs = brw->vs.prog_data->base.vue_map;
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_VS);
brw->state.dirty.brw |= BRW_NEW_VUE_MAP_VS;
if (brw->gen < 7) {
/* No geometry shader support, so the VS VUE map is the VUE map for
* the output of the "geometry" portion of the pipeline.
*/
brw->vue_map_geom_out = brw->vue_map_vs;
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
}
}
}

View file

@ -181,7 +181,7 @@ brw_upload_vs_unit(struct brw_context *brw)
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
}
SET_DIRTY_BIT(cache, CACHE_NEW_VS_UNIT);
brw->state.dirty.cache |= CACHE_NEW_VS_UNIT;
}
const struct brw_tracked_state brw_vs_unit = {

View file

@ -67,7 +67,7 @@ brw_upload_pull_constants(struct brw_context *brw,
if (!prog_data->nr_pull_params) {
if (stage_state->surf_offset[surf_index]) {
stage_state->surf_offset[surf_index] = 0;
SET_DIRTY_BIT(brw, brw_new_constbuf);
brw->state.dirty.brw |= brw_new_constbuf;
}
return;
}
@ -98,7 +98,7 @@ brw_upload_pull_constants(struct brw_context *brw,
dword_pitch);
drm_intel_bo_unreference(const_bo);
SET_DIRTY_BIT(brw, brw_new_constbuf);
brw->state.dirty.brw |= brw_new_constbuf;
}

View file

@ -239,7 +239,7 @@ brw_upload_wm_unit(struct brw_context *brw)
I915_GEM_DOMAIN_INSTRUCTION, 0);
}
SET_DIRTY_BIT(cache, CACHE_NEW_WM_UNIT);
brw->state.dirty.cache |= CACHE_NEW_WM_UNIT;
}
const struct brw_tracked_state brw_wm_unit = {

View file

@ -729,7 +729,7 @@ brw_update_renderbuffer_surfaces(struct brw_context *brw)
} else {
brw->vtbl.update_null_renderbuffer_surface(brw, 0);
}
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
const struct brw_tracked_state brw_renderbuffer_surfaces = {
@ -817,7 +817,7 @@ brw_update_texture_surfaces(struct brw_context *brw)
update_stage_texture_surfaces(brw, fs, &brw->wm.base, true);
}
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
const struct brw_tracked_state brw_texture_surfaces = {
@ -869,7 +869,7 @@ brw_upload_ubo_surfaces(struct brw_context *brw,
}
if (shader->NumUniformBlocks)
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
static void
@ -920,7 +920,7 @@ brw_upload_abo_surfaces(struct brw_context *brw,
}
if (prog->NumUniformBlocks)
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
static void

View file

@ -65,7 +65,7 @@ gen6_update_sol_surfaces(struct brw_context *brw)
}
}
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
brw->state.dirty.brw |= BRW_NEW_SURFACES;
}
const struct brw_tracked_state gen6_sol_surface = {
@ -103,7 +103,7 @@ brw_gs_upload_binding_table(struct brw_context *brw)
/* Skip making a binding table if we don't have anything to put in it. */
if (!has_surfaces) {
if (brw->ff_gs.bind_bo_offset != 0) {
SET_DIRTY_BIT(brw, BRW_NEW_GS_BINDING_TABLE);
brw->state.dirty.brw |= BRW_NEW_GS_BINDING_TABLE;
brw->ff_gs.bind_bo_offset = 0;
}
return;
@ -119,7 +119,7 @@ brw_gs_upload_binding_table(struct brw_context *brw)
/* BRW_NEW_SURFACES */
memcpy(bind, brw->ff_gs.surf_offset, BRW_MAX_GEN6_GS_SURFACES * sizeof(uint32_t));
SET_DIRTY_BIT(brw, BRW_NEW_GS_BINDING_TABLE);
brw->state.dirty.brw |= BRW_NEW_GS_BINDING_TABLE;
}
const struct brw_tracked_state gen6_gs_binding_table = {

View file

@ -63,7 +63,7 @@ gen6_upload_clip_vp(struct brw_context *brw)
vp->ymin = -gby;
vp->ymax = gby;
SET_DIRTY_BIT(cache, CACHE_NEW_CLIP_VP);
brw->state.dirty.cache |= CACHE_NEW_CLIP_VP;
}
const struct brw_tracked_state gen6_clip_vp = {
@ -106,7 +106,7 @@ gen6_upload_sf_vp(struct brw_context *brw)
sfv->viewport.m31 = v[MAT_TY] * y_scale + y_bias;
sfv->viewport.m32 = v[MAT_TZ] * depth_scale;
SET_DIRTY_BIT(cache, CACHE_NEW_SF_VP);
brw->state.dirty.cache |= CACHE_NEW_SF_VP;
}
const struct brw_tracked_state gen6_sf_vp = {

View file

@ -94,7 +94,7 @@ gen7_allocate_push_constants(struct brw_context *brw)
* Similar text exists for the other 3DSTATE_PUSH_CONSTANT_ALLOC_*
* commands.
*/
SET_DIRTY_BIT(brw, BRW_NEW_PUSH_CONSTANT_ALLOCATION);
brw->state.dirty.brw |= BRW_NEW_PUSH_CONSTANT_ALLOCATION;
}
void

View file

@ -324,5 +324,5 @@ gen8_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
*
* Setting _NEW_DEPTH and _NEW_BUFFERS covers it, but is rather overkill.
*/
SET_DIRTY_BIT(mesa, _NEW_DEPTH | _NEW_BUFFERS);
brw->state.dirty.mesa |= _NEW_DEPTH | _NEW_BUFFERS;
}

View file

@ -61,7 +61,7 @@ static void upload_state_base_address(struct brw_context *brw)
OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
ADVANCE_BATCH();
SET_DIRTY_BIT(brw, BRW_NEW_STATE_BASE_ADDRESS);
brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
}
const struct brw_tracked_state gen8_state_base_address = {

View file

@ -178,9 +178,9 @@ brw_new_batch(struct brw_context *brw)
* purposes means everything).
*/
if (brw->hw_ctx == NULL)
SET_DIRTY_BIT(brw, BRW_NEW_CONTEXT);
brw->state.dirty.brw |= BRW_NEW_CONTEXT;
SET_DIRTY_BIT(brw, BRW_NEW_BATCH);
brw->state.dirty.brw |= BRW_NEW_BATCH;
/* Assume that the last command before the start of our batch was a
* primitive, for safety.

View file

@ -110,7 +110,7 @@ intel_bufferobj_alloc_buffer(struct brw_context *brw,
/* the buffer might be bound as a uniform buffer, need to update it
*/
SET_DIRTY_BIT(brw, BRW_NEW_UNIFORM_BUFFER);
brw->state.dirty.brw |= BRW_NEW_UNIFORM_BUFFER;
intel_bufferobj_mark_inactive(intel_obj);
}