i915: Move the always_flush_cache code to triangle emit.

This could have broken always_flush_cache on i965, since
reserved_space doesn't reflect the size of the workaround flushes, and
we might run out of space.  This should make always_flush_cache more
useful on pre-i965, anyway (since the point is to flush around each
draw call, even within a batchbuffer).

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Acked-by: Paul Berry <stereotype441@gmail.com>
This commit is contained in:
Eric Anholt 2011-10-21 16:32:03 -07:00
parent 439d67f502
commit db364a8af0
2 changed files with 8 additions and 4 deletions

View file

@ -234,6 +234,10 @@ void intel_flush_prim(struct intel_context *intel)
*/
intel->no_batch_wrap = true;
if (intel->always_flush_cache) {
intel_batchbuffer_emit_mi_flush(intel);
}
#if 0
printf("emitting %d..%d=%d vertices size %d\n", offset,
intel->prim.current_offset, count,
@ -306,6 +310,10 @@ void intel_flush_prim(struct intel_context *intel)
ADVANCE_BATCH();
}
if (intel->always_flush_cache) {
intel_batchbuffer_emit_mi_flush(intel);
}
intel->no_batch_wrap = false;
drm_intel_bo_unreference(vb_bo);

View file

@ -169,10 +169,6 @@ _intel_batchbuffer_flush(struct intel_context *intel,
intel->batch.reserved_space = 0;
if (intel->always_flush_cache) {
intel_batchbuffer_emit_mi_flush(intel);
}
/* Mark the end of the buffer. */
intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
if (intel->batch.used & 1) {