mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-07 04:58:05 +02:00
i965: Don't inline intel_batchbuffer_require_space().
It's called by the inline intel_batchbuffer_begin() function which itself is used in BEGIN_BATCH. So in sequence of code emitting multiple packets, we have inlined this ~200 byte function multiple times. Making it an out-of-line function presumably improved icache usage. Improves performance of Gl32Batch7 by 3.39898% +/- 0.358674% (n=155) on Ivybridge. Reviewed-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
This commit is contained in:
parent
1faca438bd
commit
a74fc3fe8a
2 changed files with 28 additions and 26 deletions
|
|
@ -106,6 +106,32 @@ intel_batchbuffer_free(struct brw_context *brw)
|
||||||
drm_intel_bo_unreference(brw->batch.bo);
|
drm_intel_bo_unreference(brw->batch.bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
|
||||||
|
enum brw_gpu_ring ring)
|
||||||
|
{
|
||||||
|
/* If we're switching rings, implicitly flush the batch. */
|
||||||
|
if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
|
||||||
|
brw->gen >= 6) {
|
||||||
|
intel_batchbuffer_flush(brw);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
assert(sz < BATCH_SZ - BATCH_RESERVED);
|
||||||
|
#endif
|
||||||
|
if (intel_batchbuffer_space(brw) < sz)
|
||||||
|
intel_batchbuffer_flush(brw);
|
||||||
|
|
||||||
|
enum brw_gpu_ring prev_ring = brw->batch.ring;
|
||||||
|
/* The intel_batchbuffer_flush() calls above might have changed
|
||||||
|
* brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
|
||||||
|
*/
|
||||||
|
brw->batch.ring = ring;
|
||||||
|
|
||||||
|
if (unlikely(prev_ring == UNKNOWN_RING && ring == RENDER_RING))
|
||||||
|
intel_batchbuffer_emit_render_ring_prelude(brw);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
do_batch_dump(struct brw_context *brw)
|
do_batch_dump(struct brw_context *brw)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,8 @@ void intel_batchbuffer_init(struct brw_context *brw);
|
||||||
void intel_batchbuffer_free(struct brw_context *brw);
|
void intel_batchbuffer_free(struct brw_context *brw);
|
||||||
void intel_batchbuffer_save_state(struct brw_context *brw);
|
void intel_batchbuffer_save_state(struct brw_context *brw);
|
||||||
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
|
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
|
||||||
|
void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
|
||||||
|
enum brw_gpu_ring ring);
|
||||||
|
|
||||||
int _intel_batchbuffer_flush(struct brw_context *brw,
|
int _intel_batchbuffer_flush(struct brw_context *brw,
|
||||||
const char *file, int line);
|
const char *file, int line);
|
||||||
|
|
@ -116,32 +118,6 @@ intel_batchbuffer_emit_float(struct brw_context *brw, float f)
|
||||||
intel_batchbuffer_emit_dword(brw, float_as_int(f));
|
intel_batchbuffer_emit_dword(brw, float_as_int(f));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
|
||||||
intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
|
|
||||||
enum brw_gpu_ring ring)
|
|
||||||
{
|
|
||||||
/* If we're switching rings, implicitly flush the batch. */
|
|
||||||
if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
|
|
||||||
brw->gen >= 6) {
|
|
||||||
intel_batchbuffer_flush(brw);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
|
||||||
assert(sz < BATCH_SZ - BATCH_RESERVED);
|
|
||||||
#endif
|
|
||||||
if (intel_batchbuffer_space(brw) < sz)
|
|
||||||
intel_batchbuffer_flush(brw);
|
|
||||||
|
|
||||||
enum brw_gpu_ring prev_ring = brw->batch.ring;
|
|
||||||
/* The intel_batchbuffer_flush() calls above might have changed
|
|
||||||
* brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
|
|
||||||
*/
|
|
||||||
brw->batch.ring = ring;
|
|
||||||
|
|
||||||
if (unlikely(prev_ring == UNKNOWN_RING && ring == RENDER_RING))
|
|
||||||
intel_batchbuffer_emit_render_ring_prelude(brw);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
|
intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue