mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-24 00:10:10 +01:00
i965: Create a macro for setting a dirty bit.
This will make it easier to extend dirty bit handling to support compute shaders. Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
This commit is contained in:
parent
94a909ec2d
commit
88e3d404da
32 changed files with 74 additions and 67 deletions
|
|
@ -82,7 +82,7 @@ brw_upload_binding_table(struct brw_context *brw,
|
|||
prog_data->binding_table.size_bytes);
|
||||
}
|
||||
|
||||
brw->state.dirty.brw |= brw_new_binding_table;
|
||||
SET_DIRTY_BIT(brw, brw_new_binding_table);
|
||||
|
||||
if (brw->gen >= 7) {
|
||||
BEGIN_BATCH(2);
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ brw_upload_cc_vp(struct brw_context *brw)
|
|||
}
|
||||
}
|
||||
|
||||
brw->state.dirty.cache |= CACHE_NEW_CC_VP;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_CC_VP);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state brw_cc_vp = {
|
||||
|
|
@ -223,7 +223,7 @@ static void upload_cc_unit(struct brw_context *brw)
|
|||
cc->cc4.cc_viewport_state_offset = (brw->batch.bo->offset64 +
|
||||
brw->cc.vp_offset) >> 5; /* reloc */
|
||||
|
||||
brw->state.dirty.cache |= CACHE_NEW_CC_UNIT;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_CC_UNIT);
|
||||
|
||||
/* Emit CC viewport relocation */
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ brw_upload_clip_unit(struct brw_context *brw)
|
|||
clip->viewport_ymin = -1;
|
||||
clip->viewport_ymax = 1;
|
||||
|
||||
brw->state.dirty.cache |= CACHE_NEW_CLIP_UNIT;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_CLIP_UNIT);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state brw_clip_unit = {
|
||||
|
|
|
|||
|
|
@ -241,6 +241,13 @@ struct brw_state_flags {
|
|||
GLuint cache;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Set one of the bits in a field of brw_state_flags.
|
||||
*/
|
||||
#define SET_DIRTY_BIT(FIELD, FLAG) brw->state.dirty.FIELD |= (FLAG)
|
||||
|
||||
|
||||
/** Subclass of Mesa vertex program */
|
||||
struct brw_vertex_program {
|
||||
struct gl_vertex_program program;
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ static void calculate_curbe_offsets( struct brw_context *brw )
|
|||
brw->curbe.vs_start,
|
||||
brw->curbe.vs_size );
|
||||
|
||||
brw->state.dirty.brw |= BRW_NEW_CURBE_OFFSETS;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_CURBE_OFFSETS);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -126,11 +126,11 @@ static void brw_set_prim(struct brw_context *brw,
|
|||
|
||||
if (hw_prim != brw->primitive) {
|
||||
brw->primitive = hw_prim;
|
||||
brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_PRIMITIVE);
|
||||
|
||||
if (reduced_prim[prim->mode] != brw->reduced_primitive) {
|
||||
brw->reduced_primitive = reduced_prim[prim->mode];
|
||||
brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_REDUCED_PRIMITIVE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -146,7 +146,7 @@ static void gen6_set_prim(struct brw_context *brw,
|
|||
|
||||
if (hw_prim != brw->primitive) {
|
||||
brw->primitive = hw_prim;
|
||||
brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_PRIMITIVE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -403,11 +403,11 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
|
|||
brw_merge_inputs( brw, arrays );
|
||||
|
||||
brw->ib.ib = ib;
|
||||
brw->state.dirty.brw |= BRW_NEW_INDICES;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_INDICES);
|
||||
|
||||
brw->vb.min_index = min_index;
|
||||
brw->vb.max_index = max_index;
|
||||
brw->state.dirty.brw |= BRW_NEW_VERTICES;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_VERTICES);
|
||||
|
||||
for (i = 0; i < nr_prims; i++) {
|
||||
int estimated_max_prim_size;
|
||||
|
|
@ -432,7 +432,7 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
|
|||
brw->num_instances = prims[i].num_instances;
|
||||
brw->basevertex = prims[i].basevertex;
|
||||
if (i > 0) { /* For i == 0 we just did this before the loop */
|
||||
brw->state.dirty.brw |= BRW_NEW_VERTICES;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_VERTICES);
|
||||
brw_merge_inputs(brw, arrays);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -872,11 +872,11 @@ static void brw_upload_indices(struct brw_context *brw)
|
|||
brw->ib.start_vertex_offset = offset / ib_type_size;
|
||||
|
||||
if (brw->ib.bo != old_bo)
|
||||
brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_INDEX_BUFFER);
|
||||
|
||||
if (index_buffer->type != brw->ib.type) {
|
||||
brw->ib.type = index_buffer->type;
|
||||
brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_INDEX_BUFFER);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ brw_upload_ff_gs_prog(struct brw_context *brw)
|
|||
populate_key(brw, &key);
|
||||
|
||||
if (brw->ff_gs.prog_active != key.need_gs_prog) {
|
||||
brw->state.dirty.cache |= CACHE_NEW_FF_GS_PROG;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_FF_GS_PROG);
|
||||
brw->ff_gs.prog_active = key.need_gs_prog;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ brw_upload_gs_unit(struct brw_context *brw)
|
|||
|
||||
gs->gs6.max_vp_index = brw->ctx.Const.MaxViewports - 1;
|
||||
|
||||
brw->state.dirty.cache |= CACHE_NEW_FF_GS_UNIT;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_FF_GS_UNIT);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state brw_gs_unit = {
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ brw_setup_vue_interpolation(struct brw_context *brw)
|
|||
|
||||
memset(&brw->interpolation_mode, INTERP_QUALIFIER_NONE, sizeof(brw->interpolation_mode));
|
||||
|
||||
brw->state.dirty.brw |= BRW_NEW_INTERPOLATION_MAP;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_INTERPOLATION_MAP);
|
||||
|
||||
if (!fprog)
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -376,7 +376,7 @@ set_fast_clear_op(struct brw_context *brw, uint32_t op)
|
|||
* 3DSTATE_PS.
|
||||
*/
|
||||
brw->wm.fast_clear_op = op;
|
||||
brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -400,8 +400,8 @@ use_rectlist(struct brw_context *brw, bool enable)
|
|||
* _NEW_BUFFERS to make sure we emit new SURFACE_STATE with the new fast
|
||||
* clear color value.
|
||||
*/
|
||||
brw->state.dirty.mesa |= _NEW_LIGHT | _NEW_BUFFERS;
|
||||
brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
|
||||
SET_DIRTY_BIT(mesa, _NEW_LIGHT | _NEW_BUFFERS);
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
|
||||
}
|
||||
|
||||
bool
|
||||
|
|
@ -602,8 +602,7 @@ brw_meta_fast_clear(struct brw_context *brw, struct gl_framebuffer *fb,
|
|||
* color before resolve and sets irb->mt->fast_clear_state to UNRESOLVED if
|
||||
* we render to it.
|
||||
*/
|
||||
brw->state.dirty.mesa |= _NEW_BUFFERS;
|
||||
|
||||
SET_DIRTY_BIT(mesa, _NEW_BUFFERS);
|
||||
|
||||
/* Set the custom state back to normal and dirty the same bits as above */
|
||||
use_rectlist(brw, false);
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ static void upload_pipelined_state_pointers(struct brw_context *brw )
|
|||
brw->cc.state_offset);
|
||||
ADVANCE_BATCH();
|
||||
|
||||
brw->state.dirty.brw |= BRW_NEW_PSP;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_PSP);
|
||||
}
|
||||
|
||||
static void upload_psp_urb_cbs(struct brw_context *brw )
|
||||
|
|
@ -1054,7 +1054,7 @@ static void upload_state_base_address( struct brw_context *brw )
|
|||
* obvious.
|
||||
*/
|
||||
|
||||
brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_STATE_BASE_ADDRESS);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state brw_state_base_address = {
|
||||
|
|
|
|||
|
|
@ -62,13 +62,13 @@ static void brwBindProgram( struct gl_context *ctx,
|
|||
|
||||
switch (target) {
|
||||
case GL_VERTEX_PROGRAM_ARB:
|
||||
brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
|
||||
break;
|
||||
case MESA_GEOMETRY_PROGRAM:
|
||||
brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_GEOMETRY_PROGRAM);
|
||||
break;
|
||||
case GL_FRAGMENT_PROGRAM_ARB:
|
||||
brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
@ -161,7 +161,7 @@ brwProgramStringNotify(struct gl_context *ctx,
|
|||
brw_fragment_program_const(brw->fragment_program);
|
||||
|
||||
if (newFP == curFP)
|
||||
brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
|
||||
newFP->id = get_new_program_id(brw->intelScreen);
|
||||
break;
|
||||
}
|
||||
|
|
@ -172,7 +172,7 @@ brwProgramStringNotify(struct gl_context *ctx,
|
|||
brw_vertex_program_const(brw->vertex_program);
|
||||
|
||||
if (newVP == curVP)
|
||||
brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
|
||||
if (newVP->program.IsPositionInvariant) {
|
||||
_mesa_insert_mvp_code(ctx, &newVP->program);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -255,7 +255,7 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
* so turn them on now.
|
||||
*/
|
||||
brw->stats_wm++;
|
||||
brw->state.dirty.brw |= BRW_NEW_STATS_WM;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_STATS_WM);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
@ -312,7 +312,7 @@ brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
brw->query.obj = NULL;
|
||||
|
||||
brw->stats_wm--;
|
||||
brw->state.dirty.brw |= BRW_NEW_STATS_WM;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_STATS_WM);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -494,7 +494,7 @@ brw_upload_sampler_state_table(struct brw_context *brw,
|
|||
/* Flag that the sampler state table pointer has changed; later atoms
|
||||
* will handle it.
|
||||
*/
|
||||
brw->state.dirty.cache |= CACHE_NEW_SAMPLER;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_SAMPLER);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ static void upload_sf_vp(struct brw_context *brw)
|
|||
sfv->scissor.ymax = ctx->DrawBuffer->Height - ctx->DrawBuffer->_Ymin - 1;
|
||||
}
|
||||
|
||||
brw->state.dirty.cache |= CACHE_NEW_SF_VP;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_SF_VP);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state brw_sf_vp = {
|
||||
|
|
@ -292,7 +292,7 @@ static void upload_sf_unit( struct brw_context *brw )
|
|||
(sf->sf5.viewport_transform << 1)),
|
||||
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||
|
||||
brw->state.dirty.cache |= CACHE_NEW_SF_UNIT;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_SF_UNIT);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state brw_sf_unit = {
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ brw_search_cache(struct brw_cache *cache,
|
|||
*(void **)out_aux = ((char *)item->key + item->key_size);
|
||||
|
||||
if (item->offset != *inout_offset) {
|
||||
brw->state.dirty.cache |= (1 << cache_id);
|
||||
SET_DIRTY_BIT(cache, 1 << cache_id);
|
||||
*inout_offset = item->offset;
|
||||
}
|
||||
|
||||
|
|
@ -187,7 +187,7 @@ brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
|
|||
/* Since we have a new BO in place, we need to signal the units
|
||||
* that depend on it (state base address on gen5+, or unit state before).
|
||||
*/
|
||||
brw->state.dirty.brw |= BRW_NEW_PROGRAM_CACHE;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_PROGRAM_CACHE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -276,6 +276,7 @@ brw_upload_cache(struct brw_cache *cache,
|
|||
uint32_t *out_offset,
|
||||
void *out_aux)
|
||||
{
|
||||
struct brw_context *brw = cache->brw;
|
||||
struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
|
||||
GLuint hash;
|
||||
void *tmp;
|
||||
|
|
@ -320,7 +321,7 @@ brw_upload_cache(struct brw_cache *cache,
|
|||
|
||||
*out_offset = item->offset;
|
||||
*(void **)out_aux = (void *)((char *)item->key + item->key_size);
|
||||
cache->brw->state.dirty.cache |= 1 << cache_id;
|
||||
SET_DIRTY_BIT(cache, 1 << cache_id);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
|||
|
|
@ -572,10 +572,10 @@ void brw_upload_state(struct brw_context *brw)
|
|||
int i;
|
||||
static int dirty_count = 0;
|
||||
|
||||
state->mesa |= brw->NewGLState;
|
||||
SET_DIRTY_BIT(mesa, brw->NewGLState);
|
||||
brw->NewGLState = 0;
|
||||
|
||||
state->brw |= ctx->NewDriverState;
|
||||
SET_DIRTY_BIT(brw, ctx->NewDriverState);
|
||||
ctx->NewDriverState = 0;
|
||||
|
||||
if (0) {
|
||||
|
|
@ -587,27 +587,27 @@ void brw_upload_state(struct brw_context *brw)
|
|||
|
||||
if (brw->fragment_program != ctx->FragmentProgram._Current) {
|
||||
brw->fragment_program = ctx->FragmentProgram._Current;
|
||||
brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
|
||||
}
|
||||
|
||||
if (brw->geometry_program != ctx->GeometryProgram._Current) {
|
||||
brw->geometry_program = ctx->GeometryProgram._Current;
|
||||
brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_GEOMETRY_PROGRAM);
|
||||
}
|
||||
|
||||
if (brw->vertex_program != ctx->VertexProgram._Current) {
|
||||
brw->vertex_program = ctx->VertexProgram._Current;
|
||||
brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
|
||||
}
|
||||
|
||||
if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
|
||||
brw->meta_in_progress = _mesa_meta_in_progress(ctx);
|
||||
brw->state.dirty.brw |= BRW_NEW_META_IN_PROGRESS;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_META_IN_PROGRESS);
|
||||
}
|
||||
|
||||
if (brw->num_samples != ctx->DrawBuffer->Visual.samples) {
|
||||
brw->num_samples = ctx->DrawBuffer->Visual.samples;
|
||||
brw->state.dirty.brw |= BRW_NEW_NUM_SAMPLES;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_NUM_SAMPLES);
|
||||
}
|
||||
|
||||
if ((state->mesa | state->cache | state->brw) == 0)
|
||||
|
|
|
|||
|
|
@ -204,7 +204,7 @@ done:
|
|||
brw->urb.cs_start,
|
||||
brw->urb.size);
|
||||
|
||||
brw->state.dirty.brw |= BRW_NEW_URB_FENCE;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_URB_FENCE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -274,7 +274,7 @@ brw_upload_gs_prog(struct brw_context *brw)
|
|||
/* No geometry shader. Vertex data just passes straight through. */
|
||||
if (brw->state.dirty.brw & BRW_NEW_VUE_MAP_VS) {
|
||||
brw->vue_map_geom_out = brw->vue_map_vs;
|
||||
brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
|
||||
}
|
||||
|
||||
/* Other state atoms had better not try to access prog_data, since
|
||||
|
|
@ -318,7 +318,7 @@ brw_upload_gs_prog(struct brw_context *brw)
|
|||
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
|
||||
sizeof(brw->vue_map_geom_out)) != 0) {
|
||||
brw->vue_map_geom_out = brw->gs.prog_data->base.vue_map;
|
||||
brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -496,13 +496,13 @@ static void brw_upload_vs_prog(struct brw_context *brw)
|
|||
if (memcmp(&brw->vs.prog_data->base.vue_map, &brw->vue_map_geom_out,
|
||||
sizeof(brw->vue_map_geom_out)) != 0) {
|
||||
brw->vue_map_vs = brw->vs.prog_data->base.vue_map;
|
||||
brw->state.dirty.brw |= BRW_NEW_VUE_MAP_VS;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_VS);
|
||||
if (brw->gen < 7) {
|
||||
/* No geometry shader support, so the VS VUE map is the VUE map for
|
||||
* the output of the "geometry" portion of the pipeline.
|
||||
*/
|
||||
brw->vue_map_geom_out = brw->vue_map_vs;
|
||||
brw->state.dirty.brw |= BRW_NEW_VUE_MAP_GEOM_OUT;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_VUE_MAP_GEOM_OUT);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ brw_upload_vs_unit(struct brw_context *brw)
|
|||
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
|
||||
}
|
||||
|
||||
brw->state.dirty.cache |= CACHE_NEW_VS_UNIT;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_VS_UNIT);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state brw_vs_unit = {
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ brw_upload_pull_constants(struct brw_context *brw,
|
|||
if (!prog_data->nr_pull_params) {
|
||||
if (stage_state->surf_offset[surf_index]) {
|
||||
stage_state->surf_offset[surf_index] = 0;
|
||||
brw->state.dirty.brw |= brw_new_constbuf;
|
||||
SET_DIRTY_BIT(brw, brw_new_constbuf);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
@ -98,7 +98,7 @@ brw_upload_pull_constants(struct brw_context *brw,
|
|||
dword_pitch);
|
||||
drm_intel_bo_unreference(const_bo);
|
||||
|
||||
brw->state.dirty.brw |= brw_new_constbuf;
|
||||
SET_DIRTY_BIT(brw, brw_new_constbuf);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -239,7 +239,7 @@ brw_upload_wm_unit(struct brw_context *brw)
|
|||
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||
}
|
||||
|
||||
brw->state.dirty.cache |= CACHE_NEW_WM_UNIT;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_WM_UNIT);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state brw_wm_unit = {
|
||||
|
|
|
|||
|
|
@ -729,7 +729,7 @@ brw_update_renderbuffer_surfaces(struct brw_context *brw)
|
|||
} else {
|
||||
brw->vtbl.update_null_renderbuffer_surface(brw, 0);
|
||||
}
|
||||
brw->state.dirty.brw |= BRW_NEW_SURFACES;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state brw_renderbuffer_surfaces = {
|
||||
|
|
@ -817,7 +817,7 @@ brw_update_texture_surfaces(struct brw_context *brw)
|
|||
update_stage_texture_surfaces(brw, fs, &brw->wm.base, true);
|
||||
}
|
||||
|
||||
brw->state.dirty.brw |= BRW_NEW_SURFACES;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state brw_texture_surfaces = {
|
||||
|
|
@ -869,7 +869,7 @@ brw_upload_ubo_surfaces(struct brw_context *brw,
|
|||
}
|
||||
|
||||
if (shader->NumUniformBlocks)
|
||||
brw->state.dirty.brw |= BRW_NEW_SURFACES;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -920,7 +920,7 @@ brw_upload_abo_surfaces(struct brw_context *brw,
|
|||
}
|
||||
|
||||
if (prog->NumUniformBlocks)
|
||||
brw->state.dirty.brw |= BRW_NEW_SURFACES;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ gen6_update_sol_surfaces(struct brw_context *brw)
|
|||
}
|
||||
}
|
||||
|
||||
brw->state.dirty.brw |= BRW_NEW_SURFACES;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_SURFACES);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state gen6_sol_surface = {
|
||||
|
|
@ -103,7 +103,7 @@ brw_gs_upload_binding_table(struct brw_context *brw)
|
|||
/* Skip making a binding table if we don't have anything to put in it. */
|
||||
if (!has_surfaces) {
|
||||
if (brw->ff_gs.bind_bo_offset != 0) {
|
||||
brw->state.dirty.brw |= BRW_NEW_GS_BINDING_TABLE;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_GS_BINDING_TABLE);
|
||||
brw->ff_gs.bind_bo_offset = 0;
|
||||
}
|
||||
return;
|
||||
|
|
@ -119,7 +119,7 @@ brw_gs_upload_binding_table(struct brw_context *brw)
|
|||
/* BRW_NEW_SURFACES */
|
||||
memcpy(bind, brw->ff_gs.surf_offset, BRW_MAX_GEN6_GS_SURFACES * sizeof(uint32_t));
|
||||
|
||||
brw->state.dirty.brw |= BRW_NEW_GS_BINDING_TABLE;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_GS_BINDING_TABLE);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state gen6_gs_binding_table = {
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ gen6_upload_clip_vp(struct brw_context *brw)
|
|||
vp->ymin = -gby;
|
||||
vp->ymax = gby;
|
||||
|
||||
brw->state.dirty.cache |= CACHE_NEW_CLIP_VP;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_CLIP_VP);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state gen6_clip_vp = {
|
||||
|
|
@ -106,7 +106,7 @@ gen6_upload_sf_vp(struct brw_context *brw)
|
|||
sfv->viewport.m31 = v[MAT_TY] * y_scale + y_bias;
|
||||
sfv->viewport.m32 = v[MAT_TZ] * depth_scale;
|
||||
|
||||
brw->state.dirty.cache |= CACHE_NEW_SF_VP;
|
||||
SET_DIRTY_BIT(cache, CACHE_NEW_SF_VP);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state gen6_sf_vp = {
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ gen7_allocate_push_constants(struct brw_context *brw)
|
|||
* Similar text exists for the other 3DSTATE_PUSH_CONSTANT_ALLOC_*
|
||||
* commands.
|
||||
*/
|
||||
brw->state.dirty.brw |= BRW_NEW_PUSH_CONSTANT_ALLOCATION;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_PUSH_CONSTANT_ALLOCATION);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
|||
|
|
@ -324,5 +324,5 @@ gen8_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
|
|||
*
|
||||
* Setting _NEW_DEPTH and _NEW_BUFFERS covers it, but is rather overkill.
|
||||
*/
|
||||
brw->state.dirty.mesa |= _NEW_DEPTH | _NEW_BUFFERS;
|
||||
SET_DIRTY_BIT(mesa, _NEW_DEPTH | _NEW_BUFFERS);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ static void upload_state_base_address(struct brw_context *brw)
|
|||
OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
|
||||
ADVANCE_BATCH();
|
||||
|
||||
brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_STATE_BASE_ADDRESS);
|
||||
}
|
||||
|
||||
const struct brw_tracked_state gen8_state_base_address = {
|
||||
|
|
|
|||
|
|
@ -178,9 +178,9 @@ brw_new_batch(struct brw_context *brw)
|
|||
* purposes means everything).
|
||||
*/
|
||||
if (brw->hw_ctx == NULL)
|
||||
brw->state.dirty.brw |= BRW_NEW_CONTEXT;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_CONTEXT);
|
||||
|
||||
brw->state.dirty.brw |= BRW_NEW_BATCH;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_BATCH);
|
||||
|
||||
/* Assume that the last command before the start of our batch was a
|
||||
* primitive, for safety.
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ intel_bufferobj_alloc_buffer(struct brw_context *brw,
|
|||
|
||||
/* the buffer might be bound as a uniform buffer, need to update it
|
||||
*/
|
||||
brw->state.dirty.brw |= BRW_NEW_UNIFORM_BUFFER;
|
||||
SET_DIRTY_BIT(brw, BRW_NEW_UNIFORM_BUFFER);
|
||||
|
||||
intel_bufferobj_mark_inactive(intel_obj);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue