[i915] Fix driver from cliprects changes, and clean up state emission.

The fix for pageflipping with cliprects ended up causing a batch flush at
an inopportune time, which is fixed by moving it up.

Additionally, the recovery code for handling batch wraps at bad times is
replaced by just checking for the space up front, and using a no_batch_wrap
assert like on 965 to make sure that we weren't wrong about how much space that
was.
This commit is contained in:
Eric Anholt 2008-01-14 11:03:05 -08:00
parent faeb1bc9f9
commit 8517079cbc
6 changed files with 26 additions and 103 deletions

View file

@ -414,7 +414,7 @@ get_state_size(struct i830_hw_state *state)
/* Push the state into the sarea and/or texture memory.
*/
static void
i830_do_emit_state(struct intel_context *intel)
i830_emit_state(struct intel_context *intel)
{
struct i830_context *i830 = i830_context(&intel->ctx);
struct i830_hw_state *state = i830->current;
@ -423,28 +423,18 @@ i830_do_emit_state(struct intel_context *intel)
BATCH_LOCALS;
/* We don't hold the lock at this point, so want to make sure that
* there won't be a buffer wrap.
* there won't be a buffer wrap between the state emits and the primitive
* emit header.
*
* It might be better to talk about explicit places where
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
/* Workaround. There are cases I haven't been able to track down
* where we aren't emitting a full state at the start of a new
* batchbuffer. This code spots that we are on a new batchbuffer
* and forces a full state emit no matter what.
*
* In the normal case state->emitted is already zero, this code is
* another set of checks to make sure it really is.
* Set the space as LOOP_CLIPRECTS now, since that's what our primitives
* will be emitted under.
*/
if (intel->batch->id != intel->last_state_batch_id ||
intel->batch->map == intel->batch->ptr)
{
state->emitted = 0;
intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
}
intel_batchbuffer_require_space(intel->batch, get_state_size(state) + 8,
LOOP_CLIPRECTS);
/* Do this here as we may have flushed the batchbuffer above,
* causing more state to be dirty!
@ -453,11 +443,6 @@ i830_do_emit_state(struct intel_context *intel)
state->emitted |= dirty;
assert(get_dirty(state) == 0);
if (intel->batch->id != intel->last_state_batch_id) {
assert(dirty & I830_UPLOAD_CTX);
intel->last_state_batch_id = intel->batch->id;
}
if (dirty & I830_UPLOAD_INVARIENT) {
DBG("I830_UPLOAD_INVARIENT:\n");
i830_emit_invarient_state(intel);
@ -537,27 +522,6 @@ i830_do_emit_state(struct intel_context *intel)
intel->batch->dirty_state &= ~dirty;
assert(get_dirty(state) == 0);
}
static void
i830_emit_state(struct intel_context *intel)
{
struct i830_context *i830 = i830_context(&intel->ctx);
i830_do_emit_state( intel );
/* Second chance - catch batchbuffer wrap in the middle of state
* emit. This shouldn't happen but it has been observed in
* testing.
*/
if (get_dirty( i830->current )) {
/* Force a full re-emit if this happens.
*/
i830->current->emitted = 0;
i830_do_emit_state( intel );
}
assert(get_dirty(i830->current) == 0);
assert((intel->batch->dirty_state & (1<<1)) == 0);
}
@ -679,6 +643,9 @@ i830_new_batch(struct intel_context *intel)
{
struct i830_context *i830 = i830_context(&intel->ctx);
i830->state.emitted = 0;
/* Check that we didn't just wrap our batchbuffer at a bad time. */
assert(!intel->no_batch_wrap);
}

View file

@ -287,11 +287,10 @@ get_state_size(struct i915_hw_state *state)
return sz;
}
/* Push the state into the sarea and/or texture memory.
*/
static void
i915_do_emit_state(struct intel_context *intel)
i915_emit_state(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
struct i915_hw_state *state = i915->current;
@ -300,28 +299,18 @@ i915_do_emit_state(struct intel_context *intel)
BATCH_LOCALS;
/* We don't hold the lock at this point, so want to make sure that
* there won't be a buffer wrap.
* there won't be a buffer wrap between the state emits and the primitive
* emit header.
*
* It might be better to talk about explicit places where
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
/* Workaround. There are cases I haven't been able to track down
* where we aren't emitting a full state at the start of a new
* batchbuffer. This code spots that we are on a new batchbuffer
* and forces a full state emit no matter what.
*
* In the normal case state->emitted is already zero, this code is
* another set of checks to make sure it really is.
* Set the space as LOOP_CLIPRECTS now, since that's what our primitives
* will be emitted under.
*/
if (intel->batch->id != intel->last_state_batch_id ||
intel->batch->map == intel->batch->ptr)
{
state->emitted = 0;
intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
}
intel_batchbuffer_require_space(intel->batch, get_state_size(state) + 8,
LOOP_CLIPRECTS);
/* Do this here as we may have flushed the batchbuffer above,
* causing more state to be dirty!
@ -330,11 +319,6 @@ i915_do_emit_state(struct intel_context *intel)
state->emitted |= dirty;
assert(get_dirty(state) == 0);
if (intel->batch->id != intel->last_state_batch_id) {
assert(dirty & I915_UPLOAD_CTX);
intel->last_state_batch_id = intel->batch->id;
}
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "%s dirty: %x\n", __FUNCTION__, dirty);
@ -457,27 +441,6 @@ i915_do_emit_state(struct intel_context *intel)
intel->batch->dirty_state &= ~dirty;
assert(get_dirty(state) == 0);
}
static void
i915_emit_state(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
i915_do_emit_state( intel );
/* Second chance - catch batchbuffer wrap in the middle of state
* emit. This shouldn't happen but it has been observed in
* testing.
*/
if (get_dirty( i915->current )) {
/* Force a full re-emit if this happens.
*/
i915->current->emitted = 0;
i915_do_emit_state( intel );
}
assert(get_dirty(i915->current) == 0);
assert((intel->batch->dirty_state & (1<<1)) == 0);
}
@ -588,6 +551,9 @@ i915_new_batch(struct intel_context *intel)
* difficulties associated with them (physical address requirements).
*/
i915->state.emitted = 0;
/* Check that we didn't just wrap our batchbuffer at a bad time. */
assert(!intel->no_batch_wrap);
}
static GLuint

View file

@ -169,8 +169,7 @@ struct intel_context
dri_fence *first_swap_fence;
struct intel_batchbuffer *batch;
unsigned batch_id;
GLuint last_state_batch_id;
GLboolean no_batch_wrap;
struct
{

View file

@ -89,29 +89,20 @@ intelStartInlinePrimitive(struct intel_context *intel,
{
BATCH_LOCALS;
intel_wait_flips(intel);
intel->vtbl.emit_state(intel);
/* Need to make sure at the very least that we don't wrap
* batchbuffers in BEGIN_BATCH below, otherwise the primitive will
* be emitted to a batchbuffer missing the required full-state
* preamble.
*/
if (intel_batchbuffer_space(intel->batch) < 100) {
intel_batchbuffer_flush(intel->batch);
intel->vtbl.emit_state(intel);
}
intel->no_batch_wrap = GL_TRUE;
/* _mesa_printf("%s *", __progname); */
intel_wait_flips(intel);
/* Emit a slot which will be filled with the inline primitive
* command later.
*/
BEGIN_BATCH(2, batch_flags);
OUT_BATCH(0);
assert(intel->batch->id == intel->last_state_batch_id);
assert((intel->batch->dirty_state & (1<<1)) == 0);
intel->prim.start_ptr = intel->batch->ptr;
@ -121,6 +112,8 @@ intelStartInlinePrimitive(struct intel_context *intel,
OUT_BATCH(0);
ADVANCE_BATCH();
intel->no_batch_wrap = GL_FALSE;
/* _mesa_printf(">"); */
}

View file

@ -86,7 +86,6 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch)
batch->size = intel->maxBatchSize;
batch->ptr = batch->map;
batch->dirty_state = ~0;
batch->id = batch->intel->batch_id++;
batch->cliprect_mode = IGNORE_CLIPRECTS;
}

View file

@ -50,7 +50,6 @@ struct intel_batchbuffer
GLuint size;
GLuint dirty_state;
GLuint id;
};
struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context