mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-05 20:28:04 +02:00
i965/batch: avoid reverting batch buffer if saved state is an empty
There's no point reverting to the last saved point if that save point is
the empty batch, we will just repeat ourselves.
v2: Merge with new commits, changes was minimized, added the 'fixes' tag
v3: Added in to patch series
v4: Fixed the regression which was introduced by this patch
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108630
Reported-by: Mark Janes <mark.a.janes@intel.com>
The solution provided by: Jordan Justen <jordan.l.justen@intel.com>
CC: Chris Wilson <chris@chris-wilson.co.uk>
Fixes: 3faf56ffbd "intel: Add an interface for saving/restoring
the batchbuffer state."
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=107626
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108630 (fixed in v4)
Signed-off-by: Andrii Simiklit <andrii.simiklit@globallogic.com>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
parent
982e012b3a
commit
b787dcf57b
5 changed files with 13 additions and 2 deletions
|
|
@ -167,7 +167,7 @@ static void
|
|||
brw_dispatch_compute_common(struct gl_context *ctx)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
bool fail_next = false;
|
||||
bool fail_next;
|
||||
|
||||
if (!_mesa_check_conditional_render(ctx))
|
||||
return;
|
||||
|
|
@ -185,6 +185,7 @@ brw_dispatch_compute_common(struct gl_context *ctx)
|
|||
intel_batchbuffer_require_space(brw, 600);
|
||||
brw_require_statebuffer_space(brw, 2500);
|
||||
intel_batchbuffer_save_state(brw);
|
||||
fail_next = intel_batchbuffer_saved_state_is_empty(brw);
|
||||
|
||||
retry:
|
||||
brw->batch.no_wrap = true;
|
||||
|
|
|
|||
|
|
@ -885,7 +885,7 @@ brw_draw_single_prim(struct gl_context *ctx,
|
|||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
const struct gen_device_info *devinfo = &brw->screen->devinfo;
|
||||
bool fail_next = false;
|
||||
bool fail_next;
|
||||
|
||||
/* Flag BRW_NEW_DRAW_CALL on every draw. This allows us to have
|
||||
* atoms that happen on every draw call.
|
||||
|
|
@ -898,6 +898,7 @@ brw_draw_single_prim(struct gl_context *ctx,
|
|||
intel_batchbuffer_require_space(brw, 1500);
|
||||
brw_require_statebuffer_space(brw, 2400);
|
||||
intel_batchbuffer_save_state(brw);
|
||||
fail_next = intel_batchbuffer_saved_state_is_empty(brw);
|
||||
|
||||
if (brw->num_instances != prim->num_instances ||
|
||||
brw->basevertex != prim->basevertex ||
|
||||
|
|
|
|||
|
|
@ -309,6 +309,7 @@ retry:
|
|||
intel_batchbuffer_require_space(brw, 1400);
|
||||
brw_require_statebuffer_space(brw, 600);
|
||||
intel_batchbuffer_save_state(brw);
|
||||
check_aperture_failed_once |= intel_batchbuffer_saved_state_is_empty(brw);
|
||||
brw->batch.no_wrap = true;
|
||||
|
||||
#if GEN_GEN == 6
|
||||
|
|
|
|||
|
|
@ -301,6 +301,13 @@ intel_batchbuffer_save_state(struct brw_context *brw)
|
|||
brw->batch.saved.exec_count = brw->batch.exec_count;
|
||||
}
|
||||
|
||||
bool
|
||||
intel_batchbuffer_saved_state_is_empty(struct brw_context *brw)
|
||||
{
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
return (batch->saved.map_next == batch->batch.map);
|
||||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_reset_to_saved(struct brw_context *brw)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ struct intel_batchbuffer;
|
|||
void intel_batchbuffer_init(struct brw_context *brw);
|
||||
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
|
||||
void intel_batchbuffer_save_state(struct brw_context *brw);
|
||||
bool intel_batchbuffer_saved_state_is_empty(struct brw_context *brw);
|
||||
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
|
||||
void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz);
|
||||
int _intel_batchbuffer_flush_fence(struct brw_context *brw,
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue