mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-24 11:00:11 +01:00
i965: Rename intel_batchbuffer_* to brw_batch_*.
Shorter, matching the convention in iris, and drops use of "intel_" on i965-specific code that isn't shared. Acked-by: Jason Ekstrand <jason@jlekstrand.net> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9207>
This commit is contained in:
parent
a56f4f2b4a
commit
462c9e173c
26 changed files with 137 additions and 137 deletions
|
|
@ -53,10 +53,10 @@ brw_dispatch_compute_common(struct gl_context *ctx)
|
|||
/* Flush the batch if the batch/state buffers are nearly full. We can
|
||||
* grow them if needed, but this is not free, so we'd like to avoid it.
|
||||
*/
|
||||
intel_batchbuffer_require_space(brw, 600);
|
||||
brw_batch_require_space(brw, 600);
|
||||
brw_require_statebuffer_space(brw, 2500);
|
||||
intel_batchbuffer_save_state(brw);
|
||||
fail_next = intel_batchbuffer_saved_state_is_empty(brw);
|
||||
brw_batch_save_state(brw);
|
||||
fail_next = brw_batch_saved_state_is_empty(brw);
|
||||
|
||||
retry:
|
||||
brw->batch.no_wrap = true;
|
||||
|
|
@ -68,12 +68,12 @@ brw_dispatch_compute_common(struct gl_context *ctx)
|
|||
|
||||
if (!brw_batch_has_aperture_space(brw, 0)) {
|
||||
if (!fail_next) {
|
||||
intel_batchbuffer_reset_to_saved(brw);
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_reset_to_saved(brw);
|
||||
brw_batch_flush(brw);
|
||||
fail_next = true;
|
||||
goto retry;
|
||||
} else {
|
||||
int ret = intel_batchbuffer_flush(brw);
|
||||
int ret = brw_batch_flush(brw);
|
||||
WARN_ONCE(ret == -ENOSPC,
|
||||
"i965: Single compute shader dispatch "
|
||||
"exceeded available aperture space\n");
|
||||
|
|
@ -86,7 +86,7 @@ brw_dispatch_compute_common(struct gl_context *ctx)
|
|||
brw_compute_state_finished(brw);
|
||||
|
||||
if (brw->always_flush_batch)
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
brw_program_cache_check_size(brw);
|
||||
|
||||
|
|
|
|||
|
|
@ -237,7 +237,7 @@ intel_flush_front(struct gl_context *ctx)
|
|||
* performance.
|
||||
*/
|
||||
intel_resolve_for_dri2_flush(brw, driDrawable);
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
flushFront(dri_screen)(driDrawable, driDrawable->loaderPrivate);
|
||||
|
||||
|
|
@ -268,7 +268,7 @@ brw_display_shared_buffer(struct brw_context *brw)
|
|||
* no need to flush again here. But we want to provide a fence_fd to the
|
||||
* loader, and a redundant flush is the easiest way to acquire one.
|
||||
*/
|
||||
if (intel_batchbuffer_flush_fence(brw, -1, &fence_fd))
|
||||
if (brw_batch_flush_fence(brw, -1, &fence_fd))
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -283,7 +283,7 @@ intel_glFlush(struct gl_context *ctx)
|
|||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
intel_flush_front(ctx);
|
||||
brw_display_shared_buffer(brw);
|
||||
brw->need_flush_throttle = true;
|
||||
|
|
@ -297,8 +297,8 @@ intel_glEnable(struct gl_context *ctx, GLenum cap, GLboolean state)
|
|||
switch (cap) {
|
||||
case GL_BLACKHOLE_RENDER_INTEL:
|
||||
brw->frontend_noop = state;
|
||||
intel_batchbuffer_flush(brw);
|
||||
intel_batchbuffer_maybe_noop(brw);
|
||||
brw_batch_flush(brw);
|
||||
brw_batch_maybe_noop(brw);
|
||||
/* Because we started previous batches with a potential
|
||||
* MI_BATCH_BUFFER_END if NOOP was enabled, that means that anything
|
||||
* that was ever emitted after that never made it to the HW. So when the
|
||||
|
|
@ -1057,7 +1057,7 @@ brwCreateContext(gl_api api,
|
|||
|
||||
intel_fbo_init(brw);
|
||||
|
||||
intel_batchbuffer_init(brw);
|
||||
brw_batch_init(brw);
|
||||
|
||||
/* Create a new hardware context. Using a hardware context means that
|
||||
* our GPU state will be saved/restored on context switch, allowing us
|
||||
|
|
@ -1259,7 +1259,7 @@ intelDestroyContext(__DRIcontext * driContextPriv)
|
|||
_swrast_DestroyContext(&brw->ctx);
|
||||
|
||||
brw_fini_pipe_control(brw);
|
||||
intel_batchbuffer_free(&brw->batch);
|
||||
brw_batch_free(&brw->batch);
|
||||
|
||||
brw_bo_unreference(brw->throttle_batch[1]);
|
||||
brw_bo_unreference(brw->throttle_batch[0]);
|
||||
|
|
@ -1628,7 +1628,7 @@ intel_query_dri2_buffers(struct brw_context *brw,
|
|||
* query, we need to make sure all the pending drawing has landed in the
|
||||
* real front buffer.
|
||||
*/
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
intel_flush_front(&brw->ctx);
|
||||
|
||||
attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
|
||||
|
|
@ -1640,7 +1640,7 @@ intel_query_dri2_buffers(struct brw_context *brw,
|
|||
* So before doing the query, make sure all the pending drawing has
|
||||
* landed in the real front buffer.
|
||||
*/
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
intel_flush_front(&brw->ctx);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -484,7 +484,7 @@ struct brw_growing_bo {
|
|||
enum brw_memory_zone memzone;
|
||||
};
|
||||
|
||||
struct intel_batchbuffer {
|
||||
struct brw_batch {
|
||||
/** Current batchbuffer being queued up. */
|
||||
struct brw_growing_bo batch;
|
||||
/** Current statebuffer being queued up. */
|
||||
|
|
@ -761,7 +761,7 @@ struct brw_context
|
|||
*/
|
||||
uint32_t reset_count;
|
||||
|
||||
struct intel_batchbuffer batch;
|
||||
struct brw_batch batch;
|
||||
|
||||
struct brw_uploader upload;
|
||||
|
||||
|
|
@ -1328,7 +1328,7 @@ void hsw_init_queryobj_functions(struct dd_function_table *functions);
|
|||
void brw_init_conditional_render_functions(struct dd_function_table *functions);
|
||||
bool brw_check_conditional_render(struct brw_context *brw);
|
||||
|
||||
/** intel_batchbuffer.c */
|
||||
/** brw_batch.c */
|
||||
void brw_load_register_mem(struct brw_context *brw,
|
||||
uint32_t reg,
|
||||
struct brw_bo *bo,
|
||||
|
|
|
|||
|
|
@ -897,7 +897,7 @@ brw_finish_drawing(struct gl_context *ctx)
|
|||
struct brw_context *brw = brw_context(ctx);
|
||||
|
||||
if (brw->always_flush_batch)
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
brw_program_cache_check_size(brw);
|
||||
brw_postdraw_reconcile_align_wa_slices(brw);
|
||||
|
|
@ -1005,10 +1005,10 @@ brw_draw_single_prim(struct gl_context *ctx,
|
|||
/* Flush the batch if the batch/state buffers are nearly full. We can
|
||||
* grow them if needed, but this is not free, so we'd like to avoid it.
|
||||
*/
|
||||
intel_batchbuffer_require_space(brw, 1500);
|
||||
brw_batch_require_space(brw, 1500);
|
||||
brw_require_statebuffer_space(brw, 2400);
|
||||
intel_batchbuffer_save_state(brw);
|
||||
fail_next = intel_batchbuffer_saved_state_is_empty(brw);
|
||||
brw_batch_save_state(brw);
|
||||
fail_next = brw_batch_saved_state_is_empty(brw);
|
||||
|
||||
if (brw->num_instances != num_instances ||
|
||||
brw->basevertex != prim->basevertex ||
|
||||
|
|
@ -1089,7 +1089,7 @@ retry:
|
|||
|
||||
/* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
|
||||
* that the state updated in the loop outside of this block is that in
|
||||
* *_set_prim or intel_batchbuffer_flush(), which only impacts
|
||||
* *_set_prim or brw_batch_flush(), which only impacts
|
||||
* brw->ctx.NewDriverState.
|
||||
*/
|
||||
if (brw->ctx.NewDriverState) {
|
||||
|
|
@ -1108,12 +1108,12 @@ retry:
|
|||
|
||||
if (!brw_batch_has_aperture_space(brw, 0)) {
|
||||
if (!fail_next) {
|
||||
intel_batchbuffer_reset_to_saved(brw);
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_reset_to_saved(brw);
|
||||
brw_batch_flush(brw);
|
||||
fail_next = true;
|
||||
goto retry;
|
||||
} else {
|
||||
int ret = intel_batchbuffer_flush(brw);
|
||||
int ret = brw_batch_flush(brw);
|
||||
WARN_ONCE(ret == -ENOSPC,
|
||||
"i965: Single primitive emit exceeded "
|
||||
"available aperture space\n");
|
||||
|
|
|
|||
|
|
@ -350,7 +350,7 @@ brw_emit_depthbuffer(struct brw_context *brw)
|
|||
brw_emit_depth_stall_flushes(brw);
|
||||
|
||||
const unsigned ds_dwords = brw->isl_dev.ds.size / 4;
|
||||
intel_batchbuffer_begin(brw, ds_dwords);
|
||||
brw_batch_begin(brw, ds_dwords);
|
||||
uint32_t *ds_map = brw->batch.map_next;
|
||||
const uint32_t ds_offset = (char *)ds_map - (char *)brw->batch.batch.map;
|
||||
|
||||
|
|
@ -450,7 +450,7 @@ brw_emit_depthbuffer(struct brw_context *brw)
|
|||
isl_emit_depth_stencil_hiz_s(&brw->isl_dev, ds_map, &info);
|
||||
|
||||
brw->batch.map_next += ds_dwords;
|
||||
intel_batchbuffer_advance(brw);
|
||||
brw_batch_advance(brw);
|
||||
|
||||
brw->no_depth_or_stencil = !depth_mt && !stencil_mt;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -442,7 +442,7 @@ static void
|
|||
brw_oa_batchbuffer_flush(void *c, const char *file, int line)
|
||||
{
|
||||
struct brw_context *ctx = c;
|
||||
_intel_batchbuffer_flush_fence(ctx, -1, NULL, file, line);
|
||||
_brw_batch_flush_fence(ctx, -1, NULL, file, line);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
|||
|
|
@ -436,7 +436,7 @@ brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
|
|||
brw->wm.base.prog_data = NULL;
|
||||
brw->cs.base.prog_data = NULL;
|
||||
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ brw_queryobj_get_results(struct gl_context *ctx,
|
|||
* when mapped.
|
||||
*/
|
||||
if (brw_batch_references(&brw->batch, query->bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
if (brw_bo_busy(query->bo)) {
|
||||
|
|
@ -411,7 +411,7 @@ static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
* the async query will return true in finite time.
|
||||
*/
|
||||
if (query->bo && brw_batch_references(&brw->batch, query->bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
if (query->bo == NULL || !brw_bo_busy(query->bo)) {
|
||||
brw_queryobj_get_results(ctx, query);
|
||||
|
|
|
|||
|
|
@ -209,7 +209,7 @@ void brw_print_program_cache(struct brw_context *brw);
|
|||
|
||||
enum brw_cache_id brw_stage_cache_id(gl_shader_stage stage);
|
||||
|
||||
/* intel_batchbuffer.c */
|
||||
/* brw_batch.c */
|
||||
void brw_require_statebuffer_space(struct brw_context *brw, int size);
|
||||
void *brw_state_batch(struct brw_context *brw,
|
||||
int size, int alignment, uint32_t *out_offset);
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ brw_fence_insert_locked(struct brw_context *brw, struct brw_fence *fence)
|
|||
fence->batch_bo = brw->batch.batch.bo;
|
||||
brw_bo_reference(fence->batch_bo);
|
||||
|
||||
if (intel_batchbuffer_flush(brw) < 0) {
|
||||
if (brw_batch_flush(brw) < 0) {
|
||||
brw_bo_unreference(fence->batch_bo);
|
||||
fence->batch_bo = NULL;
|
||||
return false;
|
||||
|
|
@ -162,19 +162,19 @@ brw_fence_insert_locked(struct brw_context *brw, struct brw_fence *fence)
|
|||
/* Create an out-fence that signals after all pending commands
|
||||
* complete.
|
||||
*/
|
||||
if (intel_batchbuffer_flush_fence(brw, -1, &fence->sync_fd) < 0)
|
||||
if (brw_batch_flush_fence(brw, -1, &fence->sync_fd) < 0)
|
||||
return false;
|
||||
assert(fence->sync_fd != -1);
|
||||
} else {
|
||||
/* Wait on the in-fence before executing any subsequently submitted
|
||||
* commands.
|
||||
*/
|
||||
if (intel_batchbuffer_flush(brw) < 0)
|
||||
if (brw_batch_flush(brw) < 0)
|
||||
return false;
|
||||
|
||||
/* Emit a dummy batch just for the fence. */
|
||||
brw_emit_mi_flush(brw);
|
||||
if (intel_batchbuffer_flush_fence(brw, fence->sync_fd, NULL) < 0)
|
||||
if (brw_batch_flush_fence(brw, fence->sync_fd, NULL) < 0)
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
|
@ -204,7 +204,7 @@ brw_fence_has_completed_locked(struct brw_fence *fence)
|
|||
switch (fence->type) {
|
||||
case BRW_FENCE_TYPE_BO_WAIT:
|
||||
if (!fence->batch_bo) {
|
||||
/* There may be no batch if intel_batchbuffer_flush() failed. */
|
||||
/* There may be no batch if brw_batch_flush() failed. */
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -255,7 +255,7 @@ brw_fence_client_wait_locked(struct brw_context *brw, struct brw_fence *fence,
|
|||
switch (fence->type) {
|
||||
case BRW_FENCE_TYPE_BO_WAIT:
|
||||
if (!fence->batch_bo) {
|
||||
/* There may be no batch if intel_batchbuffer_flush() failed. */
|
||||
/* There may be no batch if brw_batch_flush() failed. */
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -264,5 +264,5 @@ void brw_upload_urb_fence(struct brw_context *brw)
|
|||
while (--pad);
|
||||
}
|
||||
|
||||
intel_batchbuffer_data(brw, &uf, sizeof(uf));
|
||||
brw_batch_data(brw, &uf, sizeof(uf));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -486,7 +486,7 @@ flush_batch_if_needed(struct brw_context *brw, struct brw_query_object *query)
|
|||
!brw_batch_references(&brw->batch, query->bo);
|
||||
|
||||
if (!query->flushed)
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -243,7 +243,7 @@ aggregate_transform_feedback_counter(
|
|||
* generated, flush it now so the results will be present when mapped.
|
||||
*/
|
||||
if (brw_batch_references(&brw->batch, bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
if (unlikely(brw->perf_debug && brw_bo_busy(bo)))
|
||||
perf_debug("Stalling for # of transform feedback primitives written.\n");
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ gen7_begin_transform_feedback(struct gl_context *ctx, GLenum mode,
|
|||
|
||||
/* Reset the SO buffer offsets to 0. */
|
||||
if (!can_do_pipelined_register_writes(brw->screen)) {
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
brw->batch.needs_sol_reset = true;
|
||||
} else {
|
||||
for (int i = 0; i < 4; i++) {
|
||||
|
|
|
|||
|
|
@ -47,10 +47,10 @@ blorp_emit_dwords(struct blorp_batch *batch, unsigned n)
|
|||
assert(batch->blorp->driver_ctx == batch->driver_batch);
|
||||
struct brw_context *brw = batch->driver_batch;
|
||||
|
||||
intel_batchbuffer_begin(brw, n);
|
||||
brw_batch_begin(brw, n);
|
||||
uint32_t *map = brw->batch.map_next;
|
||||
brw->batch.map_next += n;
|
||||
intel_batchbuffer_advance(brw);
|
||||
brw_batch_advance(brw);
|
||||
return map;
|
||||
}
|
||||
|
||||
|
|
@ -328,10 +328,10 @@ genX(blorp_exec)(struct blorp_batch *batch,
|
|||
brw_emit_l3_state(brw);
|
||||
|
||||
retry:
|
||||
intel_batchbuffer_require_space(brw, 1400);
|
||||
brw_batch_require_space(brw, 1400);
|
||||
brw_require_statebuffer_space(brw, 600);
|
||||
intel_batchbuffer_save_state(brw);
|
||||
check_aperture_failed_once |= intel_batchbuffer_saved_state_is_empty(brw);
|
||||
brw_batch_save_state(brw);
|
||||
check_aperture_failed_once |= brw_batch_saved_state_is_empty(brw);
|
||||
brw->batch.no_wrap = true;
|
||||
|
||||
#if GEN_GEN == 6
|
||||
|
|
@ -375,18 +375,18 @@ retry:
|
|||
if (!brw_batch_has_aperture_space(brw, 0)) {
|
||||
if (!check_aperture_failed_once) {
|
||||
check_aperture_failed_once = true;
|
||||
intel_batchbuffer_reset_to_saved(brw);
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_reset_to_saved(brw);
|
||||
brw_batch_flush(brw);
|
||||
goto retry;
|
||||
} else {
|
||||
int ret = intel_batchbuffer_flush(brw);
|
||||
int ret = brw_batch_flush(brw);
|
||||
WARN_ONCE(ret == -ENOSPC,
|
||||
"i965: blorp emit exceeded available aperture space\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(brw->always_flush_batch))
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
/* We've smashed all state compared to what the normal 3D pipeline
|
||||
* rendering tracks for GL.
|
||||
|
|
|
|||
|
|
@ -33,10 +33,10 @@
|
|||
UNUSED static void *
|
||||
emit_dwords(struct brw_context *brw, unsigned n)
|
||||
{
|
||||
intel_batchbuffer_begin(brw, n);
|
||||
brw_batch_begin(brw, n);
|
||||
uint32_t *map = brw->batch.map_next;
|
||||
brw->batch.map_next += n;
|
||||
intel_batchbuffer_advance(brw);
|
||||
brw_batch_advance(brw);
|
||||
return map;
|
||||
}
|
||||
|
||||
|
|
@ -53,7 +53,7 @@ static uint64_t
|
|||
__gen_combine_address(struct brw_context *brw, void *location,
|
||||
struct brw_address address, uint32_t delta)
|
||||
{
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
uint32_t offset;
|
||||
|
||||
if (address.bo == NULL) {
|
||||
|
|
|
|||
|
|
@ -54,12 +54,12 @@
|
|||
#define STATE_SZ (16 * 1024)
|
||||
|
||||
static void
|
||||
intel_batchbuffer_reset(struct brw_context *brw);
|
||||
brw_batch_reset(struct brw_context *brw);
|
||||
static void
|
||||
brw_new_batch(struct brw_context *brw);
|
||||
|
||||
static void
|
||||
dump_validation_list(struct intel_batchbuffer *batch)
|
||||
dump_validation_list(struct brw_batch *batch)
|
||||
{
|
||||
fprintf(stderr, "Validation list (length %d):\n", batch->exec_count);
|
||||
|
||||
|
|
@ -84,7 +84,7 @@ static struct gen_batch_decode_bo
|
|||
decode_get_bo(void *v_brw, bool ppgtt, uint64_t address)
|
||||
{
|
||||
struct brw_context *brw = v_brw;
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
|
||||
for (int i = 0; i < batch->exec_count; i++) {
|
||||
struct brw_bo *bo = batch->exec_bos[i];
|
||||
|
|
@ -107,7 +107,7 @@ static unsigned
|
|||
decode_get_state_size(void *v_brw, uint64_t address, uint64_t base_address)
|
||||
{
|
||||
struct brw_context *brw = v_brw;
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
unsigned size = (uintptr_t)
|
||||
_mesa_hash_table_u64_search(batch->state_batch_sizes,
|
||||
address - base_address);
|
||||
|
|
@ -124,10 +124,10 @@ init_reloc_list(struct brw_reloc_list *rlist, int count)
|
|||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_init(struct brw_context *brw)
|
||||
brw_batch_init(struct brw_context *brw)
|
||||
{
|
||||
struct intel_screen *screen = brw->screen;
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
const struct gen_device_info *devinfo = &screen->devinfo;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BATCH) {
|
||||
|
|
@ -172,13 +172,13 @@ intel_batchbuffer_init(struct brw_context *brw)
|
|||
if (devinfo->gen == 6)
|
||||
batch->valid_reloc_flags |= EXEC_OBJECT_NEEDS_GTT;
|
||||
|
||||
intel_batchbuffer_reset(brw);
|
||||
brw_batch_reset(brw);
|
||||
}
|
||||
|
||||
#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
|
||||
|
||||
static unsigned
|
||||
add_exec_bo(struct intel_batchbuffer *batch, struct brw_bo *bo)
|
||||
add_exec_bo(struct brw_batch *batch, struct brw_bo *bo)
|
||||
{
|
||||
assert(bo->bufmgr == batch->batch.bo->bufmgr);
|
||||
|
||||
|
|
@ -226,7 +226,7 @@ recreate_growing_buffer(struct brw_context *brw,
|
|||
enum brw_memory_zone memzone)
|
||||
{
|
||||
struct intel_screen *screen = brw->screen;
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
struct brw_bufmgr *bufmgr = screen->bufmgr;
|
||||
|
||||
/* We can't grow buffers when using softpin, so just overallocate them. */
|
||||
|
|
@ -247,9 +247,9 @@ recreate_growing_buffer(struct brw_context *brw,
|
|||
}
|
||||
|
||||
static void
|
||||
intel_batchbuffer_reset(struct brw_context *brw)
|
||||
brw_batch_reset(struct brw_context *brw)
|
||||
{
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
|
||||
if (batch->last_bo != NULL) {
|
||||
brw_bo_unreference(batch->last_bo);
|
||||
|
|
@ -287,14 +287,14 @@ intel_batchbuffer_reset(struct brw_context *brw)
|
|||
}
|
||||
|
||||
static void
|
||||
intel_batchbuffer_reset_and_clear_render_cache(struct brw_context *brw)
|
||||
brw_batch_reset_and_clear_render_cache(struct brw_context *brw)
|
||||
{
|
||||
intel_batchbuffer_reset(brw);
|
||||
brw_batch_reset(brw);
|
||||
brw_cache_sets_clear(brw);
|
||||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_save_state(struct brw_context *brw)
|
||||
brw_batch_save_state(struct brw_context *brw)
|
||||
{
|
||||
brw->batch.saved.map_next = brw->batch.map_next;
|
||||
brw->batch.saved.batch_reloc_count = brw->batch.batch_relocs.reloc_count;
|
||||
|
|
@ -303,14 +303,14 @@ intel_batchbuffer_save_state(struct brw_context *brw)
|
|||
}
|
||||
|
||||
bool
|
||||
intel_batchbuffer_saved_state_is_empty(struct brw_context *brw)
|
||||
brw_batch_saved_state_is_empty(struct brw_context *brw)
|
||||
{
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
return (batch->saved.map_next == batch->batch.map);
|
||||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_reset_to_saved(struct brw_context *brw)
|
||||
brw_batch_reset_to_saved(struct brw_context *brw)
|
||||
{
|
||||
for (int i = brw->batch.saved.exec_count;
|
||||
i < brw->batch.exec_count; i++) {
|
||||
|
|
@ -326,7 +326,7 @@ intel_batchbuffer_reset_to_saved(struct brw_context *brw)
|
|||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_free(struct intel_batchbuffer *batch)
|
||||
brw_batch_free(struct brw_batch *batch)
|
||||
{
|
||||
if (batch->use_shadow_copy) {
|
||||
free(batch->batch.map);
|
||||
|
|
@ -395,7 +395,7 @@ grow_buffer(struct brw_context *brw,
|
|||
unsigned existing_bytes,
|
||||
unsigned new_size)
|
||||
{
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
struct brw_bufmgr *bufmgr = brw->bufmgr;
|
||||
struct brw_bo *bo = grow->bo;
|
||||
|
||||
|
|
@ -530,13 +530,13 @@ grow_buffer(struct brw_context *brw,
|
|||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz)
|
||||
brw_batch_require_space(struct brw_context *brw, GLuint sz)
|
||||
{
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
|
||||
const unsigned batch_used = USED_BATCH(*batch) * 4;
|
||||
if (batch_used + sz >= BATCH_SZ && !batch->no_wrap) {
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
} else if (batch_used + sz >= batch->batch.bo->size) {
|
||||
const unsigned new_size =
|
||||
MIN2(batch->batch.bo->size + batch->batch.bo->size / 2,
|
||||
|
|
@ -566,7 +566,7 @@ brw_new_batch(struct brw_context *brw)
|
|||
brw_bo_unreference(brw->batch.state.bo);
|
||||
|
||||
/* Create a new batchbuffer and reset the associated state: */
|
||||
intel_batchbuffer_reset_and_clear_render_cache(brw);
|
||||
brw_batch_reset_and_clear_render_cache(brw);
|
||||
|
||||
/* If the kernel supports hardware contexts, then most hardware state is
|
||||
* preserved between batches; we only need to re-emit state that is required
|
||||
|
|
@ -591,11 +591,11 @@ brw_new_batch(struct brw_context *brw)
|
|||
if (INTEL_DEBUG & DEBUG_SHADER_TIME)
|
||||
brw_collect_and_report_shader_time(brw);
|
||||
|
||||
intel_batchbuffer_maybe_noop(brw);
|
||||
brw_batch_maybe_noop(brw);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called from intel_batchbuffer_flush before emitting MI_BATCHBUFFER_END and
|
||||
* Called from brw_batch_flush before emitting MI_BATCHBUFFER_END and
|
||||
* sending it off.
|
||||
*
|
||||
* This function can emit state (say, to preserve registers that aren't saved
|
||||
|
|
@ -650,7 +650,7 @@ brw_finish_batch(struct brw_context *brw)
|
|||
* requires our batch size to be QWord aligned, so we pad it out if
|
||||
* necessary by emitting an extra MI_NOOP after the end.
|
||||
*/
|
||||
intel_batchbuffer_require_space(brw, 8);
|
||||
brw_batch_require_space(brw, 8);
|
||||
*brw->batch.map_next++ = MI_BATCH_BUFFER_END;
|
||||
if (USED_BATCH(brw->batch) & 1) {
|
||||
*brw->batch.map_next++ = MI_NOOP;
|
||||
|
|
@ -699,7 +699,7 @@ throttle(struct brw_context *brw)
|
|||
|
||||
static int
|
||||
execbuffer(int fd,
|
||||
struct intel_batchbuffer *batch,
|
||||
struct brw_batch *batch,
|
||||
uint32_t ctx_id,
|
||||
int used,
|
||||
int in_fence,
|
||||
|
|
@ -757,7 +757,7 @@ execbuffer(int fd,
|
|||
static int
|
||||
submit_batch(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
|
||||
{
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
int ret = 0;
|
||||
|
||||
if (batch->use_shadow_copy) {
|
||||
|
|
@ -856,7 +856,7 @@ submit_batch(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
|
|||
* of the returned fd.
|
||||
*/
|
||||
int
|
||||
_intel_batchbuffer_flush_fence(struct brw_context *brw,
|
||||
_brw_batch_flush_fence(struct brw_context *brw,
|
||||
int in_fence_fd, int *out_fence_fd,
|
||||
const char *file, int line)
|
||||
{
|
||||
|
|
@ -909,7 +909,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
|
|||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_maybe_noop(struct brw_context *brw)
|
||||
brw_batch_maybe_noop(struct brw_context *brw)
|
||||
{
|
||||
if (!brw->frontend_noop || USED_BATCH(brw->batch) != 0)
|
||||
return;
|
||||
|
|
@ -920,7 +920,7 @@ intel_batchbuffer_maybe_noop(struct brw_context *brw)
|
|||
}
|
||||
|
||||
bool
|
||||
brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo)
|
||||
brw_batch_references(struct brw_batch *batch, struct brw_bo *bo)
|
||||
{
|
||||
unsigned index = READ_ONCE(bo->index);
|
||||
if (index < batch->exec_count && batch->exec_bos[index] == bo)
|
||||
|
|
@ -936,7 +936,7 @@ brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo)
|
|||
/* This is the only way buffers get added to the validate list.
|
||||
*/
|
||||
static uint64_t
|
||||
emit_reloc(struct intel_batchbuffer *batch,
|
||||
emit_reloc(struct brw_batch *batch,
|
||||
struct brw_reloc_list *rlist, uint32_t offset,
|
||||
struct brw_bo *target, int32_t target_offset,
|
||||
unsigned int reloc_flags)
|
||||
|
|
@ -992,7 +992,7 @@ emit_reloc(struct intel_batchbuffer *batch,
|
|||
}
|
||||
|
||||
void
|
||||
brw_use_pinned_bo(struct intel_batchbuffer *batch, struct brw_bo *bo,
|
||||
brw_use_pinned_bo(struct brw_batch *batch, struct brw_bo *bo,
|
||||
unsigned writable_flag)
|
||||
{
|
||||
assert(bo->kflags & EXEC_OBJECT_PINNED);
|
||||
|
|
@ -1007,7 +1007,7 @@ brw_use_pinned_bo(struct intel_batchbuffer *batch, struct brw_bo *bo,
|
|||
}
|
||||
|
||||
uint64_t
|
||||
brw_batch_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
|
||||
brw_batch_reloc(struct brw_batch *batch, uint32_t batch_offset,
|
||||
struct brw_bo *target, uint32_t target_offset,
|
||||
unsigned int reloc_flags)
|
||||
{
|
||||
|
|
@ -1018,7 +1018,7 @@ brw_batch_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
|
|||
}
|
||||
|
||||
uint64_t
|
||||
brw_state_reloc(struct intel_batchbuffer *batch, uint32_t state_offset,
|
||||
brw_state_reloc(struct brw_batch *batch, uint32_t state_offset,
|
||||
struct brw_bo *target, uint32_t target_offset,
|
||||
unsigned int reloc_flags)
|
||||
{
|
||||
|
|
@ -1038,7 +1038,7 @@ void
|
|||
brw_require_statebuffer_space(struct brw_context *brw, int size)
|
||||
{
|
||||
if (brw->batch.state_used + size >= STATE_SZ)
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1050,14 +1050,14 @@ brw_state_batch(struct brw_context *brw,
|
|||
int alignment,
|
||||
uint32_t *out_offset)
|
||||
{
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
|
||||
assert(size < batch->state.bo->size);
|
||||
|
||||
uint32_t offset = ALIGN(batch->state_used, alignment);
|
||||
|
||||
if (offset + size >= STATE_SZ && !batch->no_wrap) {
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
offset = ALIGN(batch->state_used, alignment);
|
||||
} else if (offset + size >= batch->state.bo->size) {
|
||||
const unsigned new_size =
|
||||
|
|
@ -1079,11 +1079,11 @@ brw_state_batch(struct brw_context *brw,
|
|||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_data(struct brw_context *brw,
|
||||
brw_batch_data(struct brw_context *brw,
|
||||
const void *data, GLuint bytes)
|
||||
{
|
||||
assert((bytes & 3) == 0);
|
||||
intel_batchbuffer_require_space(brw, bytes);
|
||||
brw_batch_require_space(brw, bytes);
|
||||
memcpy(brw->batch.map_next, data, bytes);
|
||||
brw->batch.map_next += bytes >> 2;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,31 +19,31 @@ extern "C" {
|
|||
*/
|
||||
#define MAX_STATE_SIZE (64 * 1024)
|
||||
|
||||
struct intel_batchbuffer;
|
||||
struct brw_batch;
|
||||
|
||||
void intel_batchbuffer_init(struct brw_context *brw);
|
||||
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
|
||||
void intel_batchbuffer_save_state(struct brw_context *brw);
|
||||
bool intel_batchbuffer_saved_state_is_empty(struct brw_context *brw);
|
||||
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
|
||||
void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz);
|
||||
int _intel_batchbuffer_flush_fence(struct brw_context *brw,
|
||||
void brw_batch_init(struct brw_context *brw);
|
||||
void brw_batch_free(struct brw_batch *batch);
|
||||
void brw_batch_save_state(struct brw_context *brw);
|
||||
bool brw_batch_saved_state_is_empty(struct brw_context *brw);
|
||||
void brw_batch_reset_to_saved(struct brw_context *brw);
|
||||
void brw_batch_require_space(struct brw_context *brw, GLuint sz);
|
||||
int _brw_batch_flush_fence(struct brw_context *brw,
|
||||
int in_fence_fd, int *out_fence_fd,
|
||||
const char *file, int line);
|
||||
void intel_batchbuffer_maybe_noop(struct brw_context *brw);
|
||||
void brw_batch_maybe_noop(struct brw_context *brw);
|
||||
|
||||
#define intel_batchbuffer_flush(brw) \
|
||||
_intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
|
||||
#define brw_batch_flush(brw) \
|
||||
_brw_batch_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
|
||||
|
||||
#define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \
|
||||
_intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
|
||||
#define brw_batch_flush_fence(brw, in_fence_fd, out_fence_fd) \
|
||||
_brw_batch_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
|
||||
__FILE__, __LINE__)
|
||||
|
||||
/* Unlike bmBufferData, this currently requires the buffer be mapped.
|
||||
* Consider it a convenience function wrapping multple
|
||||
* intel_buffer_dword() calls.
|
||||
*/
|
||||
void intel_batchbuffer_data(struct brw_context *brw,
|
||||
void brw_batch_data(struct brw_context *brw,
|
||||
const void *data, GLuint bytes);
|
||||
|
||||
static inline bool
|
||||
|
|
@ -53,22 +53,22 @@ brw_batch_has_aperture_space(struct brw_context *brw, uint64_t extra_space)
|
|||
brw->screen->aperture_threshold;
|
||||
}
|
||||
|
||||
bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
|
||||
bool brw_batch_references(struct brw_batch *batch, struct brw_bo *bo);
|
||||
|
||||
#define RELOC_WRITE EXEC_OBJECT_WRITE
|
||||
#define RELOC_NEEDS_GGTT EXEC_OBJECT_NEEDS_GTT
|
||||
/* Inverted meaning, but using the same bit...emit_reloc will flip it. */
|
||||
#define RELOC_32BIT EXEC_OBJECT_SUPPORTS_48B_ADDRESS
|
||||
|
||||
void brw_use_pinned_bo(struct intel_batchbuffer *batch, struct brw_bo *bo,
|
||||
void brw_use_pinned_bo(struct brw_batch *batch, struct brw_bo *bo,
|
||||
unsigned writeable_flag);
|
||||
|
||||
uint64_t brw_batch_reloc(struct intel_batchbuffer *batch,
|
||||
uint64_t brw_batch_reloc(struct brw_batch *batch,
|
||||
uint32_t batch_offset,
|
||||
struct brw_bo *target,
|
||||
uint32_t target_offset,
|
||||
unsigned flags);
|
||||
uint64_t brw_state_reloc(struct intel_batchbuffer *batch,
|
||||
uint64_t brw_state_reloc(struct brw_batch *batch,
|
||||
uint32_t batch_offset,
|
||||
struct brw_bo *target,
|
||||
uint32_t target_offset,
|
||||
|
|
@ -89,9 +89,9 @@ static inline uint32_t float_as_int(float f)
|
|||
}
|
||||
|
||||
static inline void
|
||||
intel_batchbuffer_begin(struct brw_context *brw, int n)
|
||||
brw_batch_begin(struct brw_context *brw, int n)
|
||||
{
|
||||
intel_batchbuffer_require_space(brw, n * 4);
|
||||
brw_batch_require_space(brw, n * 4);
|
||||
|
||||
#ifdef DEBUG
|
||||
brw->batch.emit = USED_BATCH(brw->batch);
|
||||
|
|
@ -100,10 +100,10 @@ intel_batchbuffer_begin(struct brw_context *brw, int n)
|
|||
}
|
||||
|
||||
static inline void
|
||||
intel_batchbuffer_advance(struct brw_context *brw)
|
||||
brw_batch_advance(struct brw_context *brw)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
struct brw_batch *batch = &brw->batch;
|
||||
unsigned int _n = USED_BATCH(*batch) - batch->emit;
|
||||
assert(batch->total != 0);
|
||||
if (_n != batch->total) {
|
||||
|
|
@ -118,20 +118,20 @@ intel_batchbuffer_advance(struct brw_context *brw)
|
|||
}
|
||||
|
||||
static inline bool
|
||||
brw_ptr_in_state_buffer(struct intel_batchbuffer *batch, void *p)
|
||||
brw_ptr_in_state_buffer(struct brw_batch *batch, void *p)
|
||||
{
|
||||
return (char *) p >= (char *) batch->state.map &&
|
||||
(char *) p < (char *) batch->state.map + batch->state.bo->size;
|
||||
}
|
||||
|
||||
#define BEGIN_BATCH(n) do { \
|
||||
intel_batchbuffer_begin(brw, (n)); \
|
||||
brw_batch_begin(brw, (n)); \
|
||||
uint32_t *__map = brw->batch.map_next; \
|
||||
brw->batch.map_next += (n)
|
||||
|
||||
#define BEGIN_BATCH_BLT(n) do { \
|
||||
assert(brw->screen->devinfo.gen < 6); \
|
||||
intel_batchbuffer_begin(brw, (n)); \
|
||||
brw_batch_begin(brw, (n)); \
|
||||
uint32_t *__map = brw->batch.map_next; \
|
||||
brw->batch.map_next += (n)
|
||||
|
||||
|
|
@ -156,7 +156,7 @@ brw_ptr_in_state_buffer(struct intel_batchbuffer *batch, void *p)
|
|||
|
||||
#define ADVANCE_BATCH() \
|
||||
assert(__map == brw->batch.map_next); \
|
||||
intel_batchbuffer_advance(brw); \
|
||||
brw_batch_advance(brw); \
|
||||
} while (0)
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
|||
|
|
@ -272,14 +272,14 @@ emit_copy_blit(struct brw_context *brw,
|
|||
|
||||
/* do space check before going any further */
|
||||
if (!brw_batch_has_aperture_space(brw, bo_sizes))
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
if (!brw_batch_has_aperture_space(brw, bo_sizes))
|
||||
return false;
|
||||
|
||||
unsigned length = devinfo->gen >= 8 ? 10 : 8;
|
||||
|
||||
intel_batchbuffer_require_space(brw, length * 4);
|
||||
brw_batch_require_space(brw, length * 4);
|
||||
DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
|
||||
__func__,
|
||||
src_buffer, src_pitch, src_offset, src_x, src_y,
|
||||
|
|
@ -652,7 +652,7 @@ intelEmitImmediateColorExpandBlit(struct brw_context *brw,
|
|||
dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
|
||||
|
||||
unsigned xy_setup_blt_length = devinfo->gen >= 8 ? 10 : 8;
|
||||
intel_batchbuffer_require_space(brw, (xy_setup_blt_length * 4) +
|
||||
brw_batch_require_space(brw, (xy_setup_blt_length * 4) +
|
||||
(3 * 4) + dwords * 4);
|
||||
|
||||
opcode = XY_SETUP_BLT_CMD;
|
||||
|
|
@ -691,7 +691,7 @@ intelEmitImmediateColorExpandBlit(struct brw_context *brw,
|
|||
OUT_BATCH(SET_FIELD(y + h, BLT_Y) | SET_FIELD(x + w, BLT_X));
|
||||
ADVANCE_BATCH();
|
||||
|
||||
intel_batchbuffer_data(brw, src_bits, dwords * 4);
|
||||
brw_batch_data(brw, src_bits, dwords * 4);
|
||||
|
||||
brw_emit_mi_flush(brw);
|
||||
|
||||
|
|
@ -737,7 +737,7 @@ intel_miptree_set_alpha_to_one(struct brw_context *brw,
|
|||
|
||||
/* do space check before going any further */
|
||||
if (!brw_batch_has_aperture_space(brw, mt->bo->size))
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
unsigned length = devinfo->gen >= 8 ? 7 : 6;
|
||||
const bool dst_y_tiled = mt->surf.tiling == ISL_TILING_Y0;
|
||||
|
|
|
|||
|
|
@ -310,7 +310,7 @@ brw_buffer_subdata(struct gl_context *ctx,
|
|||
(long)offset, (long)offset + size, (long)(size/1024),
|
||||
intel_obj->gpu_active_start,
|
||||
intel_obj->gpu_active_end);
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -340,7 +340,7 @@ brw_get_buffer_subdata(struct gl_context *ctx,
|
|||
|
||||
assert(intel_obj);
|
||||
if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
}
|
||||
|
||||
unsigned int map_flags = MAP_READ;
|
||||
|
|
@ -432,7 +432,7 @@ brw_map_buffer_range(struct gl_context *ctx,
|
|||
} else {
|
||||
perf_debug("Stalling on the GPU for mapping a busy buffer "
|
||||
"object\n");
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
}
|
||||
} else if (brw_bo_busy(intel_obj->buffer) &&
|
||||
(access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
|
||||
|
|
|
|||
|
|
@ -2357,7 +2357,7 @@ intel_miptree_map_raw(struct brw_context *brw,
|
|||
struct brw_bo *bo = mt->bo;
|
||||
|
||||
if (brw_batch_references(&brw->batch, bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
return brw_bo_map(brw, bo, mode);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -310,7 +310,7 @@ do_blit_bitmap( struct gl_context *ctx,
|
|||
out:
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_SYNC)
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
if (unpack->BufferObj) {
|
||||
/* done with PBO so unmap it now */
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ do_blit_copypixels(struct gl_context * ctx,
|
|||
return false;
|
||||
}
|
||||
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
|
||||
/* Clip to destination buffer. */
|
||||
orig_dstx = dstx;
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx,
|
|||
|
||||
if (brw_batch_references(&brw->batch, bo)) {
|
||||
perf_debug("Flushing before mapping a referenced bo.\n");
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
}
|
||||
|
||||
void *map = brw_bo_map(brw, bo, MAP_READ | MAP_RAW);
|
||||
|
|
|
|||
|
|
@ -171,7 +171,7 @@ intel_dri2_flush_with_flags(__DRIcontext *cPriv,
|
|||
if (reason == __DRI2_THROTTLE_FLUSHFRONT)
|
||||
brw->need_flush_throttle = true;
|
||||
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -263,7 +263,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
|
|||
|
||||
if (brw_batch_references(&brw->batch, bo)) {
|
||||
perf_debug("Flushing before mapping a referenced bo.\n");
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
}
|
||||
|
||||
void *map = brw_bo_map(brw, bo, MAP_WRITE | MAP_RAW);
|
||||
|
|
@ -816,7 +816,7 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx,
|
|||
|
||||
if (brw_batch_references(&brw->batch, bo)) {
|
||||
perf_debug("Flushing before mapping a referenced bo.\n");
|
||||
intel_batchbuffer_flush(brw);
|
||||
brw_batch_flush(brw);
|
||||
}
|
||||
|
||||
void *map = brw_bo_map(brw, bo, MAP_READ | MAP_RAW);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue