i965: Delete BATCH_RESERVED handling.

Now that we can grow the batchbuffer if we absolutely need the extra
space, we don't need to reserve space for the final do-or-die ending
commands.

Reviewed-by: Matt Turner <mattst88@gmail.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Kenneth Graunke 2017-09-05 15:03:48 -07:00
parent 9034d157c0
commit 2c46a67b41
2 changed files with 3 additions and 34 deletions

View file

@ -204,7 +204,6 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch,
add_exec_bo(batch, batch->bo);
assert(batch->bo->index == 0);
batch->reserved_space = BATCH_RESERVED;
batch->needs_sol_reset = false;
batch->state_base_address_emitted = false;
@ -372,8 +371,7 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
/* For now, flush as if the batch and state buffers still shared a BO */
const unsigned batch_used = USED_BATCH(*batch) * 4;
if (batch_used + sz >=
BATCH_SZ - batch->reserved_space - batch->state_used) {
if (batch_used + sz >= BATCH_SZ - batch->state_used) {
if (!brw->no_batch_wrap) {
intel_batchbuffer_flush(brw);
} else {
@ -382,8 +380,7 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
grow_buffer(brw, &batch->bo, &batch->map, &batch->batch_cpu_map,
batch_used, new_size);
batch->map_next = (void *) batch->map + batch_used;
assert(batch_used + sz <
batch->bo->size - batch->reserved_space - batch->state_used);
assert(batch_used + sz < batch->bo->size - batch->state_used);
}
}
@ -896,8 +893,6 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
bytes_for_state, 100.0f * bytes_for_state / STATE_SZ);
}
brw->batch.reserved_space = 0;
brw_finish_batch(brw);
/* Mark the end of the buffer. */
@ -1032,7 +1027,7 @@ brw_state_batch(struct brw_context *brw,
uint32_t offset = ALIGN(batch->state_used, alignment);
/* For now, follow the old flushing behavior. */
int batch_space = batch->reserved_space + USED_BATCH(*batch) * 4;
int batch_space = USED_BATCH(*batch) * 4;
if (offset + size >= STATE_SZ - batch_space) {
if (!brw->no_batch_wrap) {

View file

@ -10,32 +10,6 @@
extern "C" {
#endif
/**
* Number of bytes to reserve for commands necessary to complete a batch.
*
* This includes:
* - MI_BATCHBUFFER_END (4 bytes)
* - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
* - Any state emitted by vtbl->finish_batch():
* - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
* - Disabling OA counters on Gen6+ (3 DWords = 12 bytes)
* - Ending MI_REPORT_PERF_COUNT on Gen5+, plus associated PIPE_CONTROLs:
* - Two sets of PIPE_CONTROLs, which become 4 PIPE_CONTROLs each on SNB,
* which are 5 DWords each ==> 2 * 4 * 5 * 4 = 160 bytes
* - 3 DWords for MI_REPORT_PERF_COUNT itself on Gen6+. ==> 12 bytes.
* On Ironlake, it's 6 DWords, but we have some slack due to the lack of
* Sandybridge PIPE_CONTROL madness.
* - CC_STATE workaround on HSW (17 * 4 = 68 bytes)
* - 10 dwords for initial mi_flush
* - 2 dwords for CC state setup
* - 5 dwords for the required pipe control at the end
* - Restoring L3 configuration: (24 dwords = 96 bytes)
* - 2*6 dwords for two PIPE_CONTROL flushes.
* - 7 dwords for L3 configuration set-up.
* - 5 dwords for L3 atomic set-up (on HSW).
*/
#define BATCH_RESERVED 308
struct intel_batchbuffer;
void intel_batchbuffer_init(struct intel_screen *screen,