mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-19 06:40:32 +01:00
i965/drm: Use our internal libdrm (drm_bacon) rather than the real one.
Now we can actually test our changes. Acked-by: Jason Ekstrand <jason@jlekstrand.net>
This commit is contained in:
parent
91b973e3a3
commit
eed86b975e
60 changed files with 442 additions and 436 deletions
|
|
@ -40,7 +40,8 @@ AM_CFLAGS = \
|
|||
-I$(top_srcdir)/src/intel \
|
||||
$(DEFINES) \
|
||||
$(VISIBILITY_CFLAGS) \
|
||||
$(INTEL_CFLAGS)
|
||||
$(LIBDRM_CFLAGS) \
|
||||
$(VALGRIND_CFLAGS)
|
||||
|
||||
AM_CXXFLAGS = $(AM_CFLAGS)
|
||||
|
||||
|
|
@ -79,7 +80,7 @@ libi965_dri_la_LIBADD = \
|
|||
$(top_builddir)/src/intel/compiler/libintel_compiler.la \
|
||||
$(top_builddir)/src/intel/blorp/libblorp.la \
|
||||
$(I965_PERGEN_LIBS) \
|
||||
$(INTEL_LIBS)
|
||||
$(LIBDRM_LIBS)
|
||||
|
||||
BUILT_SOURCES = $(i965_oa_GENERATED_FILES)
|
||||
CLEANFILES = $(BUILT_SOURCES)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ i965_FILES = \
|
|||
brw_binding_tables.c \
|
||||
brw_blorp.c \
|
||||
brw_blorp.h \
|
||||
brw_bufmgr.h \
|
||||
brw_cc.c \
|
||||
brw_clear.c \
|
||||
brw_clip.c \
|
||||
|
|
@ -130,6 +131,10 @@ i965_FILES = \
|
|||
intel_buffer_objects.h \
|
||||
intel_buffers.c \
|
||||
intel_buffers.h \
|
||||
intel_bufmgr.c \
|
||||
intel_bufmgr_gem.c \
|
||||
intel_bufmgr_priv.h \
|
||||
intel_chipset.h \
|
||||
intel_copy_image.c \
|
||||
intel_extensions.c \
|
||||
intel_fbo.c \
|
||||
|
|
@ -157,7 +162,11 @@ i965_FILES = \
|
|||
intel_tex_validate.c \
|
||||
intel_tiled_memcpy.c \
|
||||
intel_tiled_memcpy.h \
|
||||
intel_upload.c
|
||||
intel_upload.c \
|
||||
libdrm_lists.h \
|
||||
libdrm_macros.h \
|
||||
uthash.h \
|
||||
xf86atomic.h
|
||||
|
||||
i965_gen6_FILES = \
|
||||
genX_blorp_exec.c
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@
|
|||
*/
|
||||
|
||||
/**
|
||||
* @file intel_bufmgr.h
|
||||
* @file brw_bufmgr.h
|
||||
*
|
||||
* Public definitions of Intel-specific bufmgr functions.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -236,7 +236,7 @@ static void upload_cc_unit(struct brw_context *brw)
|
|||
brw->ctx.NewDriverState |= BRW_NEW_GEN4_UNIT_STATE;
|
||||
|
||||
/* Emit CC viewport relocation */
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
(brw->cc.state_offset +
|
||||
offsetof(struct brw_cc_unit_state, cc4)),
|
||||
brw->batch.bo, brw->cc.vp_offset,
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ brw_upload_clip_unit(struct brw_context *brw)
|
|||
(brw->batch.bo->offset64 + brw->clip.vp_offset) >> 5;
|
||||
|
||||
/* emit clip viewport relocation */
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
(brw->clip.state_offset +
|
||||
offsetof(struct brw_clip_unit_state, clip6)),
|
||||
brw->batch.bo, brw->clip.vp_offset,
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ static void
|
|||
prepare_indirect_gpgpu_walker(struct brw_context *brw)
|
||||
{
|
||||
GLintptr indirect_offset = brw->compute.num_work_groups_offset;
|
||||
drm_intel_bo *bo = brw->compute.num_work_groups_bo;
|
||||
drm_bacon_bo *bo = brw->compute.num_work_groups_bo;
|
||||
|
||||
brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMX, bo,
|
||||
I915_GEM_DOMAIN_VERTEX, 0,
|
||||
|
|
@ -212,7 +212,7 @@ brw_dispatch_compute_common(struct gl_context *ctx)
|
|||
|
||||
brw->no_batch_wrap = false;
|
||||
|
||||
if (drm_intel_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
|
||||
if (drm_bacon_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
|
||||
if (!fail_next) {
|
||||
intel_batchbuffer_reset_to_saved(brw);
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
|
@ -258,7 +258,7 @@ brw_dispatch_compute_indirect(struct gl_context *ctx, GLintptr indirect)
|
|||
struct brw_context *brw = brw_context(ctx);
|
||||
static const GLuint indirect_group_counts[3] = { 0, 0, 0 };
|
||||
struct gl_buffer_object *indirect_buffer = ctx->DispatchIndirectBuffer;
|
||||
drm_intel_bo *bo =
|
||||
drm_bacon_bo *bo =
|
||||
intel_bufferobj_buffer(brw,
|
||||
intel_buffer_object(indirect_buffer),
|
||||
indirect, 3 * sizeof(GLuint));
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ intel_update_framebuffer(struct gl_context *ctx,
|
|||
}
|
||||
|
||||
static bool
|
||||
intel_disable_rb_aux_buffer(struct brw_context *brw, const drm_intel_bo *bo)
|
||||
intel_disable_rb_aux_buffer(struct brw_context *brw, const drm_bacon_bo *bo)
|
||||
{
|
||||
const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
|
||||
bool found = false;
|
||||
|
|
@ -413,7 +413,7 @@ intel_finish(struct gl_context * ctx)
|
|||
intel_glFlush(ctx);
|
||||
|
||||
if (brw->batch.last_bo)
|
||||
drm_intel_bo_wait_rendering(brw->batch.last_bo);
|
||||
drm_bacon_bo_wait_rendering(brw->batch.last_bo);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -891,7 +891,7 @@ brw_process_driconf_options(struct brw_context *brw)
|
|||
case DRI_CONF_BO_REUSE_DISABLED:
|
||||
break;
|
||||
case DRI_CONF_BO_REUSE_ALL:
|
||||
drm_intel_bufmgr_gem_enable_reuse(brw->bufmgr);
|
||||
drm_bacon_bufmgr_gem_enable_reuse(brw->bufmgr);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -1094,7 +1094,7 @@ brwCreateContext(gl_api api,
|
|||
* This is required for transform feedback buffer offsets, query objects,
|
||||
* and also allows us to reduce how much state we have to emit.
|
||||
*/
|
||||
brw->hw_ctx = drm_intel_gem_context_create(brw->bufmgr);
|
||||
brw->hw_ctx = drm_bacon_gem_context_create(brw->bufmgr);
|
||||
|
||||
if (!brw->hw_ctx) {
|
||||
fprintf(stderr, "Failed to create hardware context.\n");
|
||||
|
|
@ -1188,19 +1188,19 @@ intelDestroyContext(__DRIcontext * driContextPriv)
|
|||
brw_destroy_state(brw);
|
||||
brw_draw_destroy(brw);
|
||||
|
||||
drm_intel_bo_unreference(brw->curbe.curbe_bo);
|
||||
drm_bacon_bo_unreference(brw->curbe.curbe_bo);
|
||||
if (brw->vs.base.scratch_bo)
|
||||
drm_intel_bo_unreference(brw->vs.base.scratch_bo);
|
||||
drm_bacon_bo_unreference(brw->vs.base.scratch_bo);
|
||||
if (brw->tcs.base.scratch_bo)
|
||||
drm_intel_bo_unreference(brw->tcs.base.scratch_bo);
|
||||
drm_bacon_bo_unreference(brw->tcs.base.scratch_bo);
|
||||
if (brw->tes.base.scratch_bo)
|
||||
drm_intel_bo_unreference(brw->tes.base.scratch_bo);
|
||||
drm_bacon_bo_unreference(brw->tes.base.scratch_bo);
|
||||
if (brw->gs.base.scratch_bo)
|
||||
drm_intel_bo_unreference(brw->gs.base.scratch_bo);
|
||||
drm_bacon_bo_unreference(brw->gs.base.scratch_bo);
|
||||
if (brw->wm.base.scratch_bo)
|
||||
drm_intel_bo_unreference(brw->wm.base.scratch_bo);
|
||||
drm_bacon_bo_unreference(brw->wm.base.scratch_bo);
|
||||
|
||||
drm_intel_gem_context_destroy(brw->hw_ctx);
|
||||
drm_bacon_gem_context_destroy(brw->hw_ctx);
|
||||
|
||||
if (ctx->swrast_context) {
|
||||
_swsetup_DestroyContext(&brw->ctx);
|
||||
|
|
@ -1214,8 +1214,8 @@ intelDestroyContext(__DRIcontext * driContextPriv)
|
|||
brw_fini_pipe_control(brw);
|
||||
intel_batchbuffer_free(&brw->batch);
|
||||
|
||||
drm_intel_bo_unreference(brw->throttle_batch[1]);
|
||||
drm_intel_bo_unreference(brw->throttle_batch[0]);
|
||||
drm_bacon_bo_unreference(brw->throttle_batch[1]);
|
||||
drm_bacon_bo_unreference(brw->throttle_batch[0]);
|
||||
brw->throttle_batch[1] = NULL;
|
||||
brw->throttle_batch[0] = NULL;
|
||||
|
||||
|
|
@ -1600,7 +1600,7 @@ intel_query_dri2_buffers(struct brw_context *brw,
|
|||
* DRI2BufferDepthStencil are handled as special cases.
|
||||
*
|
||||
* \param buffer_name is a human readable name, such as "dri2 front buffer",
|
||||
* that is passed to drm_intel_bo_gem_create_from_name().
|
||||
* that is passed to drm_bacon_bo_gem_create_from_name().
|
||||
*
|
||||
* \see intel_update_renderbuffers()
|
||||
*/
|
||||
|
|
@ -1612,7 +1612,7 @@ intel_process_dri2_buffer(struct brw_context *brw,
|
|||
const char *buffer_name)
|
||||
{
|
||||
struct gl_framebuffer *fb = drawable->driverPrivate;
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
|
||||
if (!rb)
|
||||
return;
|
||||
|
|
@ -1633,10 +1633,10 @@ intel_process_dri2_buffer(struct brw_context *brw,
|
|||
if (last_mt) {
|
||||
/* The bo already has a name because the miptree was created by a
|
||||
* previous call to intel_process_dri2_buffer(). If a bo already has a
|
||||
* name, then drm_intel_bo_flink() is a low-cost getter. It does not
|
||||
* name, then drm_bacon_bo_flink() is a low-cost getter. It does not
|
||||
* create a new name.
|
||||
*/
|
||||
drm_intel_bo_flink(last_mt->bo, &old_name);
|
||||
drm_bacon_bo_flink(last_mt->bo, &old_name);
|
||||
}
|
||||
|
||||
if (old_name == buffer->name)
|
||||
|
|
@ -1649,7 +1649,7 @@ intel_process_dri2_buffer(struct brw_context *brw,
|
|||
buffer->cpp, buffer->pitch);
|
||||
}
|
||||
|
||||
bo = drm_intel_bo_gem_create_from_name(brw->bufmgr, buffer_name,
|
||||
bo = drm_bacon_bo_gem_create_from_name(brw->bufmgr, buffer_name,
|
||||
buffer->name);
|
||||
if (!bo) {
|
||||
fprintf(stderr,
|
||||
|
|
@ -1674,7 +1674,7 @@ intel_process_dri2_buffer(struct brw_context *brw,
|
|||
|
||||
assert(rb->mt);
|
||||
|
||||
drm_intel_bo_unreference(bo);
|
||||
drm_bacon_bo_unreference(bo);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@
|
|||
#include "isl/isl.h"
|
||||
#include "blorp/blorp.h"
|
||||
|
||||
#include <intel_bufmgr.h>
|
||||
#include <brw_bufmgr.h>
|
||||
|
||||
#include "common/gen_debug.h"
|
||||
#include "intel_screen.h"
|
||||
|
|
@ -390,7 +390,7 @@ struct brw_cache {
|
|||
struct brw_context *brw;
|
||||
|
||||
struct brw_cache_item **items;
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
GLuint size, n_items;
|
||||
|
||||
uint32_t next_offset;
|
||||
|
|
@ -422,7 +422,7 @@ enum shader_time_shader_type {
|
|||
|
||||
struct brw_vertex_buffer {
|
||||
/** Buffer object containing the uploaded vertex data */
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
uint32_t offset;
|
||||
uint32_t size;
|
||||
/** Byte stride between elements in the uploaded array */
|
||||
|
|
@ -442,7 +442,7 @@ struct brw_query_object {
|
|||
struct gl_query_object Base;
|
||||
|
||||
/** Last query BO associated with this query. */
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
|
||||
/** Last index in bo with query data for this object. */
|
||||
int last_index;
|
||||
|
|
@ -459,9 +459,9 @@ enum brw_gpu_ring {
|
|||
|
||||
struct intel_batchbuffer {
|
||||
/** Current batchbuffer being queued up. */
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
/** Last BO submitted to the hardware. Used for glFinish(). */
|
||||
drm_intel_bo *last_bo;
|
||||
drm_bacon_bo *last_bo;
|
||||
|
||||
#ifdef DEBUG
|
||||
uint16_t emit, total;
|
||||
|
|
@ -492,7 +492,7 @@ struct brw_transform_feedback_object {
|
|||
struct gl_transform_feedback_object base;
|
||||
|
||||
/** A buffer to hold SO_WRITE_OFFSET(n) values while paused. */
|
||||
drm_intel_bo *offset_bo;
|
||||
drm_bacon_bo *offset_bo;
|
||||
|
||||
/** If true, SO_WRITE_OFFSET(n) should be reset to zero at next use. */
|
||||
bool zero_offsets;
|
||||
|
|
@ -511,7 +511,7 @@ struct brw_transform_feedback_object {
|
|||
* @{
|
||||
*/
|
||||
uint64_t prims_generated[BRW_MAX_XFB_STREAMS];
|
||||
drm_intel_bo *prim_count_bo;
|
||||
drm_bacon_bo *prim_count_bo;
|
||||
unsigned prim_count_buffer_index; /**< in number of uint64_t units */
|
||||
/** @} */
|
||||
|
||||
|
|
@ -550,7 +550,7 @@ struct brw_stage_state
|
|||
* unless you're taking additional measures to synchronize thread execution
|
||||
* across slot size changes.
|
||||
*/
|
||||
drm_intel_bo *scratch_bo;
|
||||
drm_bacon_bo *scratch_bo;
|
||||
|
||||
/**
|
||||
* Scratch slot size allocated for each thread in the buffer object given
|
||||
|
|
@ -656,16 +656,16 @@ struct brw_context
|
|||
|
||||
} vtbl;
|
||||
|
||||
drm_intel_bufmgr *bufmgr;
|
||||
drm_bacon_bufmgr *bufmgr;
|
||||
|
||||
drm_intel_context *hw_ctx;
|
||||
drm_bacon_context *hw_ctx;
|
||||
|
||||
/** BO for post-sync nonzero writes for gen6 workaround. */
|
||||
drm_intel_bo *workaround_bo;
|
||||
drm_bacon_bo *workaround_bo;
|
||||
uint8_t pipe_controls_since_last_cs_stall;
|
||||
|
||||
/**
|
||||
* Set of drm_intel_bo * that have been rendered to within this batchbuffer
|
||||
* Set of drm_bacon_bo * that have been rendered to within this batchbuffer
|
||||
* and would need flushing before being used from another cache domain that
|
||||
* isn't coherent with it (i.e. the sampler).
|
||||
*/
|
||||
|
|
@ -683,7 +683,7 @@ struct brw_context
|
|||
bool no_batch_wrap;
|
||||
|
||||
struct {
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
uint32_t next_offset;
|
||||
} upload;
|
||||
|
||||
|
|
@ -696,7 +696,7 @@ struct brw_context
|
|||
bool front_buffer_dirty;
|
||||
|
||||
/** Framerate throttling: @{ */
|
||||
drm_intel_bo *throttle_batch[2];
|
||||
drm_bacon_bo *throttle_batch[2];
|
||||
|
||||
/* Limit the number of outstanding SwapBuffers by waiting for an earlier
|
||||
* frame of rendering to complete. This gives a very precise cap to the
|
||||
|
|
@ -810,7 +810,7 @@ struct brw_context
|
|||
* Buffer and offset used for GL_ARB_shader_draw_parameters
|
||||
* (for now, only gl_BaseVertex).
|
||||
*/
|
||||
drm_intel_bo *draw_params_bo;
|
||||
drm_bacon_bo *draw_params_bo;
|
||||
uint32_t draw_params_offset;
|
||||
|
||||
/**
|
||||
|
|
@ -819,7 +819,7 @@ struct brw_context
|
|||
* draw parameters.
|
||||
*/
|
||||
int gl_drawid;
|
||||
drm_intel_bo *draw_id_bo;
|
||||
drm_bacon_bo *draw_id_bo;
|
||||
uint32_t draw_id_offset;
|
||||
} draw;
|
||||
|
||||
|
|
@ -829,7 +829,7 @@ struct brw_context
|
|||
* an indirect call, and num_work_groups_offset is valid. Otherwise,
|
||||
* num_work_groups is set based on glDispatchCompute.
|
||||
*/
|
||||
drm_intel_bo *num_work_groups_bo;
|
||||
drm_bacon_bo *num_work_groups_bo;
|
||||
GLintptr num_work_groups_offset;
|
||||
const GLuint *num_work_groups;
|
||||
} compute;
|
||||
|
|
@ -871,7 +871,7 @@ struct brw_context
|
|||
const struct _mesa_index_buffer *ib;
|
||||
|
||||
/* Updates are signaled by BRW_NEW_INDEX_BUFFER. */
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
uint32_t size;
|
||||
GLuint type;
|
||||
|
||||
|
|
@ -959,7 +959,7 @@ struct brw_context
|
|||
* Pointer to the (intel_upload.c-generated) BO containing the uniforms
|
||||
* for upload to the CURBE.
|
||||
*/
|
||||
drm_intel_bo *curbe_bo;
|
||||
drm_bacon_bo *curbe_bo;
|
||||
/** Offset within curbe_bo of space for current curbe entry */
|
||||
GLuint curbe_offset;
|
||||
} curbe;
|
||||
|
|
@ -1065,7 +1065,7 @@ struct brw_context
|
|||
* Buffer object used in place of multisampled null render targets on
|
||||
* Gen6. See brw_emit_null_surface_state().
|
||||
*/
|
||||
drm_intel_bo *multisampled_null_render_target_bo;
|
||||
drm_bacon_bo *multisampled_null_render_target_bo;
|
||||
uint32_t fast_clear_op;
|
||||
|
||||
float offset_clamp;
|
||||
|
|
@ -1207,7 +1207,7 @@ struct brw_context
|
|||
} l3;
|
||||
|
||||
struct {
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
const char **names;
|
||||
int *ids;
|
||||
enum shader_time_shader_type *types;
|
||||
|
|
@ -1297,8 +1297,8 @@ uint64_t brw_raw_timestamp_delta(struct brw_context *brw,
|
|||
|
||||
/** gen6_queryobj.c */
|
||||
void gen6_init_queryobj_functions(struct dd_function_table *functions);
|
||||
void brw_write_timestamp(struct brw_context *brw, drm_intel_bo *bo, int idx);
|
||||
void brw_write_depth_count(struct brw_context *brw, drm_intel_bo *bo, int idx);
|
||||
void brw_write_timestamp(struct brw_context *brw, drm_bacon_bo *bo, int idx);
|
||||
void brw_write_depth_count(struct brw_context *brw, drm_bacon_bo *bo, int idx);
|
||||
|
||||
/** hsw_queryobj.c */
|
||||
void hsw_overflow_result_to_gpr0(struct brw_context *brw,
|
||||
|
|
@ -1313,18 +1313,18 @@ bool brw_check_conditional_render(struct brw_context *brw);
|
|||
/** intel_batchbuffer.c */
|
||||
void brw_load_register_mem(struct brw_context *brw,
|
||||
uint32_t reg,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t offset);
|
||||
void brw_load_register_mem64(struct brw_context *brw,
|
||||
uint32_t reg,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t offset);
|
||||
void brw_store_register_mem32(struct brw_context *brw,
|
||||
drm_intel_bo *bo, uint32_t reg, uint32_t offset);
|
||||
drm_bacon_bo *bo, uint32_t reg, uint32_t offset);
|
||||
void brw_store_register_mem64(struct brw_context *brw,
|
||||
drm_intel_bo *bo, uint32_t reg, uint32_t offset);
|
||||
drm_bacon_bo *bo, uint32_t reg, uint32_t offset);
|
||||
void brw_load_register_imm32(struct brw_context *brw,
|
||||
uint32_t reg, uint32_t imm);
|
||||
void brw_load_register_imm64(struct brw_context *brw,
|
||||
|
|
@ -1333,9 +1333,9 @@ void brw_load_register_reg(struct brw_context *brw, uint32_t src,
|
|||
uint32_t dest);
|
||||
void brw_load_register_reg64(struct brw_context *brw, uint32_t src,
|
||||
uint32_t dest);
|
||||
void brw_store_data_imm32(struct brw_context *brw, drm_intel_bo *bo,
|
||||
void brw_store_data_imm32(struct brw_context *brw, drm_bacon_bo *bo,
|
||||
uint32_t offset, uint32_t imm);
|
||||
void brw_store_data_imm64(struct brw_context *brw, drm_intel_bo *bo,
|
||||
void brw_store_data_imm64(struct brw_context *brw, drm_bacon_bo *bo,
|
||||
uint32_t offset, uint64_t imm);
|
||||
|
||||
/*======================================================================
|
||||
|
|
@ -1360,7 +1360,7 @@ key_debug(struct brw_context *brw, const char *name, int a, int b)
|
|||
void brwInitFragProgFuncs( struct dd_function_table *functions );
|
||||
|
||||
void brw_get_scratch_bo(struct brw_context *brw,
|
||||
drm_intel_bo **scratch_bo, int size);
|
||||
drm_bacon_bo **scratch_bo, int size);
|
||||
void brw_alloc_stage_scratch(struct brw_context *brw,
|
||||
struct brw_stage_state *stage_state,
|
||||
unsigned per_thread_size,
|
||||
|
|
@ -1413,12 +1413,12 @@ void brw_prepare_vertices(struct brw_context *brw);
|
|||
/* brw_wm_surface_state.c */
|
||||
void brw_init_surface_formats(struct brw_context *brw);
|
||||
void brw_create_constant_surface(struct brw_context *brw,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t offset,
|
||||
uint32_t size,
|
||||
uint32_t *out_offset);
|
||||
void brw_create_buffer_surface(struct brw_context *brw,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t offset,
|
||||
uint32_t size,
|
||||
uint32_t *out_offset);
|
||||
|
|
@ -1451,9 +1451,9 @@ uint32_t brw_depth_format(struct brw_context *brw, mesa_format format);
|
|||
void brw_init_performance_queries(struct brw_context *brw);
|
||||
|
||||
/* intel_buffer_objects.c */
|
||||
int brw_bo_map(struct brw_context *brw, drm_intel_bo *bo, int write_enable,
|
||||
int brw_bo_map(struct brw_context *brw, drm_bacon_bo *bo, int write_enable,
|
||||
const char *bo_name);
|
||||
int brw_bo_map_gtt(struct brw_context *brw, drm_intel_bo *bo,
|
||||
int brw_bo_map_gtt(struct brw_context *brw, drm_bacon_bo *bo,
|
||||
const char *bo_name);
|
||||
|
||||
/* intel_extensions.c */
|
||||
|
|
@ -1618,7 +1618,7 @@ brw_program_reloc(struct brw_context *brw, uint32_t state_offset,
|
|||
return prog_offset;
|
||||
}
|
||||
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
state_offset,
|
||||
brw->cache.bo,
|
||||
prog_offset,
|
||||
|
|
@ -1711,7 +1711,7 @@ void brw_fini_pipe_control(struct brw_context *brw);
|
|||
|
||||
void brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags);
|
||||
void brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
|
||||
drm_intel_bo *bo, uint32_t offset,
|
||||
drm_bacon_bo *bo, uint32_t offset,
|
||||
uint32_t imm_lower, uint32_t imm_upper);
|
||||
void brw_emit_mi_flush(struct brw_context *brw);
|
||||
void brw_emit_post_sync_nonzero_flush(struct brw_context *brw);
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ brw_codegen_cs_prog(struct brw_context *brw,
|
|||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
start_busy = (brw->batch.last_bo &&
|
||||
drm_intel_bo_busy(brw->batch.last_bo));
|
||||
drm_bacon_bo_busy(brw->batch.last_bo));
|
||||
start_time = get_time();
|
||||
}
|
||||
|
||||
|
|
@ -131,7 +131,7 @@ brw_codegen_cs_prog(struct brw_context *brw,
|
|||
}
|
||||
cp->compiled_once = true;
|
||||
|
||||
if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
|
||||
if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
|
||||
perf_debug("CS compile took %.03f ms and stalled the GPU\n",
|
||||
(get_time() - start_time) * 1000);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -220,7 +220,7 @@ brw_emit_prim(struct brw_context *brw,
|
|||
ADVANCE_BATCH();
|
||||
} else if (prim->is_indirect) {
|
||||
struct gl_buffer_object *indirect_buffer = brw->ctx.DrawIndirectBuffer;
|
||||
drm_intel_bo *bo = intel_bufferobj_buffer(brw,
|
||||
drm_bacon_bo *bo = intel_bufferobj_buffer(brw,
|
||||
intel_buffer_object(indirect_buffer),
|
||||
prim->indirect_offset, 5 * sizeof(GLuint));
|
||||
|
||||
|
|
@ -291,7 +291,7 @@ brw_merge_inputs(struct brw_context *brw,
|
|||
GLuint i;
|
||||
|
||||
for (i = 0; i < brw->vb.nr_buffers; i++) {
|
||||
drm_intel_bo_unreference(brw->vb.buffers[i].bo);
|
||||
drm_bacon_bo_unreference(brw->vb.buffers[i].bo);
|
||||
brw->vb.buffers[i].bo = NULL;
|
||||
}
|
||||
brw->vb.nr_buffers = 0;
|
||||
|
|
@ -551,13 +551,13 @@ brw_try_draw_prims(struct gl_context *ctx,
|
|||
|
||||
brw->draw.params.gl_basevertex = new_basevertex;
|
||||
brw->draw.params.gl_baseinstance = new_baseinstance;
|
||||
drm_intel_bo_unreference(brw->draw.draw_params_bo);
|
||||
drm_bacon_bo_unreference(brw->draw.draw_params_bo);
|
||||
|
||||
if (prims[i].is_indirect) {
|
||||
/* Point draw_params_bo at the indirect buffer. */
|
||||
brw->draw.draw_params_bo =
|
||||
intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
|
||||
drm_intel_bo_reference(brw->draw.draw_params_bo);
|
||||
drm_bacon_bo_reference(brw->draw.draw_params_bo);
|
||||
brw->draw.draw_params_offset =
|
||||
prims[i].indirect_offset + (prims[i].indexed ? 12 : 8);
|
||||
} else {
|
||||
|
|
@ -575,7 +575,7 @@ brw_try_draw_prims(struct gl_context *ctx,
|
|||
* the loop.
|
||||
*/
|
||||
brw->draw.gl_drawid = prims[i].draw_id;
|
||||
drm_intel_bo_unreference(brw->draw.draw_id_bo);
|
||||
drm_bacon_bo_unreference(brw->draw.draw_id_bo);
|
||||
brw->draw.draw_id_bo = NULL;
|
||||
if (i > 0 && vs_prog_data->uses_drawid)
|
||||
brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
|
||||
|
|
@ -601,7 +601,7 @@ retry:
|
|||
|
||||
brw->no_batch_wrap = false;
|
||||
|
||||
if (drm_intel_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
|
||||
if (drm_bacon_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
|
||||
if (!fail_next) {
|
||||
intel_batchbuffer_reset_to_saved(brw);
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
|
@ -711,7 +711,7 @@ brw_draw_destroy(struct brw_context *brw)
|
|||
unsigned i;
|
||||
|
||||
for (i = 0; i < brw->vb.nr_buffers; i++) {
|
||||
drm_intel_bo_unreference(brw->vb.buffers[i].bo);
|
||||
drm_bacon_bo_unreference(brw->vb.buffers[i].bo);
|
||||
brw->vb.buffers[i].bo = NULL;
|
||||
}
|
||||
brw->vb.nr_buffers = 0;
|
||||
|
|
@ -721,6 +721,6 @@ brw_draw_destroy(struct brw_context *brw)
|
|||
}
|
||||
brw->vb.nr_enabled = 0;
|
||||
|
||||
drm_intel_bo_unreference(brw->ib.bo);
|
||||
drm_bacon_bo_unreference(brw->ib.bo);
|
||||
brw->ib.bo = NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,14 +27,14 @@
|
|||
#define BRW_DRAW_H
|
||||
|
||||
#include "main/mtypes.h" /* for struct gl_context... */
|
||||
#include "intel_bufmgr.h"
|
||||
#include "brw_bufmgr.h"
|
||||
|
||||
struct brw_context;
|
||||
|
||||
uint32_t *
|
||||
brw_emit_vertex_buffer_state(struct brw_context *brw,
|
||||
unsigned buffer_nr,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
unsigned start_offset,
|
||||
unsigned end_offset,
|
||||
unsigned stride,
|
||||
|
|
|
|||
|
|
@ -702,7 +702,7 @@ brw_prepare_vertices(struct brw_context *brw)
|
|||
const uint32_t range = buffer_range_end[i] - buffer_range_start[i];
|
||||
|
||||
buffer->bo = intel_bufferobj_buffer(brw, enabled_buffer[i], start, range);
|
||||
drm_intel_bo_reference(buffer->bo);
|
||||
drm_bacon_bo_reference(buffer->bo);
|
||||
}
|
||||
|
||||
/* If we need to upload all the arrays, then we can trim those arrays to
|
||||
|
|
@ -792,7 +792,7 @@ brw_prepare_shader_draw_parameters(struct brw_context *brw)
|
|||
uint32_t *
|
||||
brw_emit_vertex_buffer_state(struct brw_context *brw,
|
||||
unsigned buffer_nr,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
unsigned start_offset,
|
||||
unsigned end_offset,
|
||||
unsigned stride,
|
||||
|
|
@ -1166,7 +1166,7 @@ brw_upload_indices(struct brw_context *brw)
|
|||
struct gl_context *ctx = &brw->ctx;
|
||||
const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
|
||||
GLuint ib_size;
|
||||
drm_intel_bo *old_bo = brw->ib.bo;
|
||||
drm_bacon_bo *old_bo = brw->ib.bo;
|
||||
struct gl_buffer_object *bufferobj;
|
||||
GLuint offset;
|
||||
GLuint ib_type_size;
|
||||
|
|
@ -1210,14 +1210,14 @@ brw_upload_indices(struct brw_context *brw)
|
|||
|
||||
ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL);
|
||||
} else {
|
||||
drm_intel_bo *bo =
|
||||
drm_bacon_bo *bo =
|
||||
intel_bufferobj_buffer(brw, intel_buffer_object(bufferobj),
|
||||
offset, ib_size);
|
||||
if (bo != brw->ib.bo) {
|
||||
drm_intel_bo_unreference(brw->ib.bo);
|
||||
drm_bacon_bo_unreference(brw->ib.bo);
|
||||
brw->ib.bo = bo;
|
||||
brw->ib.size = bufferobj->Size;
|
||||
drm_intel_bo_reference(bo);
|
||||
drm_bacon_bo_reference(bo);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ brw_codegen_gs_prog(struct brw_context *brw,
|
|||
st_index = brw_get_shader_time_index(brw, &gp->program, ST_GS, true);
|
||||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
start_busy = brw->batch.last_bo && drm_intel_bo_busy(brw->batch.last_bo);
|
||||
start_busy = brw->batch.last_bo && drm_bacon_bo_busy(brw->batch.last_bo);
|
||||
start_time = get_time();
|
||||
}
|
||||
|
||||
|
|
@ -147,7 +147,7 @@ brw_codegen_gs_prog(struct brw_context *brw,
|
|||
if (gp->compiled_once) {
|
||||
brw_gs_debug_recompile(brw, &gp->program, key);
|
||||
}
|
||||
if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
|
||||
if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
|
||||
perf_debug("GS compile took %.03f ms and stalled the GPU\n",
|
||||
(get_time() - start_time) * 1000);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,12 +38,12 @@
|
|||
#include "intel_mipmap_tree.h"
|
||||
|
||||
static GLenum
|
||||
intel_buffer_purgeable(drm_intel_bo *buffer)
|
||||
intel_buffer_purgeable(drm_bacon_bo *buffer)
|
||||
{
|
||||
int retained = 0;
|
||||
|
||||
if (buffer != NULL)
|
||||
retained = drm_intel_bo_madvise(buffer, I915_MADV_DONTNEED);
|
||||
retained = drm_bacon_bo_madvise(buffer, I915_MADV_DONTNEED);
|
||||
|
||||
return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
|
||||
}
|
||||
|
|
@ -101,13 +101,13 @@ intel_render_object_purgeable(struct gl_context * ctx,
|
|||
}
|
||||
|
||||
static int
|
||||
intel_bo_unpurgeable(drm_intel_bo *buffer)
|
||||
intel_bo_unpurgeable(drm_bacon_bo *buffer)
|
||||
{
|
||||
int retained;
|
||||
|
||||
retained = 0;
|
||||
if (buffer != NULL)
|
||||
retained = drm_intel_bo_madvise(buffer, I915_MADV_WILLNEED);
|
||||
retained = drm_bacon_bo_madvise(buffer, I915_MADV_WILLNEED);
|
||||
|
||||
return retained;
|
||||
}
|
||||
|
|
@ -125,7 +125,7 @@ intel_buffer_object_unpurgeable(struct gl_context * ctx,
|
|||
return GL_UNDEFINED_APPLE;
|
||||
|
||||
if (option == GL_UNDEFINED_APPLE || !intel_bo_unpurgeable(intel->buffer)) {
|
||||
drm_intel_bo_unreference(intel->buffer);
|
||||
drm_bacon_bo_unreference(intel->buffer);
|
||||
intel->buffer = NULL;
|
||||
return GL_UNDEFINED_APPLE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -224,7 +224,7 @@ struct brw_perf_query_object
|
|||
/**
|
||||
* BO containing OA counter snapshots at query Begin/End time.
|
||||
*/
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
|
||||
/**
|
||||
* The MI_REPORT_PERF_COUNT command lets us specify a unique
|
||||
|
|
@ -264,7 +264,7 @@ struct brw_perf_query_object
|
|||
* BO containing starting and ending snapshots for the
|
||||
* statistics counters.
|
||||
*/
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
} pipeline_stats;
|
||||
};
|
||||
};
|
||||
|
|
@ -476,7 +476,7 @@ snapshot_statistics_registers(struct brw_context *brw,
|
|||
*/
|
||||
static void
|
||||
emit_mi_report_perf_count(struct brw_context *brw,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t offset_in_bytes,
|
||||
uint32_t report_id)
|
||||
{
|
||||
|
|
@ -713,7 +713,7 @@ accumulate_oa_reports(struct brw_context *brw,
|
|||
if (!read_oa_samples(brw))
|
||||
goto error;
|
||||
|
||||
drm_intel_bo_map(obj->oa.bo, false);
|
||||
drm_bacon_bo_map(obj->oa.bo, false);
|
||||
query_buffer = obj->oa.bo->virtual;
|
||||
|
||||
start = last = query_buffer;
|
||||
|
|
@ -793,7 +793,7 @@ end:
|
|||
|
||||
DBG("Marking %d accumulated - results gathered\n", o->Id);
|
||||
|
||||
drm_intel_bo_unmap(obj->oa.bo);
|
||||
drm_bacon_bo_unmap(obj->oa.bo);
|
||||
obj->oa.results_accumulated = true;
|
||||
drop_from_unaccumulated_query_list(brw, obj);
|
||||
dec_n_oa_users(brw);
|
||||
|
|
@ -802,7 +802,7 @@ end:
|
|||
|
||||
error:
|
||||
|
||||
drm_intel_bo_unmap(obj->oa.bo);
|
||||
drm_bacon_bo_unmap(obj->oa.bo);
|
||||
discard_all_queries(brw);
|
||||
}
|
||||
|
||||
|
|
@ -951,7 +951,7 @@ brw_begin_perf_query(struct gl_context *ctx,
|
|||
uint32_t ctx_id;
|
||||
int period_exponent;
|
||||
|
||||
if (drm_intel_gem_context_get_id(brw->hw_ctx, &ctx_id) != 0)
|
||||
if (drm_bacon_gem_context_get_id(brw->hw_ctx, &ctx_id) != 0)
|
||||
return false;
|
||||
|
||||
/* The timestamp for HSW+ increments every 80ns
|
||||
|
|
@ -988,18 +988,18 @@ brw_begin_perf_query(struct gl_context *ctx,
|
|||
}
|
||||
|
||||
if (obj->oa.bo) {
|
||||
drm_intel_bo_unreference(obj->oa.bo);
|
||||
drm_bacon_bo_unreference(obj->oa.bo);
|
||||
obj->oa.bo = NULL;
|
||||
}
|
||||
|
||||
obj->oa.bo =
|
||||
drm_intel_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo",
|
||||
drm_bacon_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo",
|
||||
MI_RPC_BO_SIZE, 64);
|
||||
#ifdef DEBUG
|
||||
/* Pre-filling the BO helps debug whether writes landed. */
|
||||
drm_intel_bo_map(obj->oa.bo, true);
|
||||
drm_bacon_bo_map(obj->oa.bo, true);
|
||||
memset((char *) obj->oa.bo->virtual, 0x80, MI_RPC_BO_SIZE);
|
||||
drm_intel_bo_unmap(obj->oa.bo);
|
||||
drm_bacon_bo_unmap(obj->oa.bo);
|
||||
#endif
|
||||
|
||||
obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
|
||||
|
|
@ -1035,12 +1035,12 @@ brw_begin_perf_query(struct gl_context *ctx,
|
|||
|
||||
case PIPELINE_STATS:
|
||||
if (obj->pipeline_stats.bo) {
|
||||
drm_intel_bo_unreference(obj->pipeline_stats.bo);
|
||||
drm_bacon_bo_unreference(obj->pipeline_stats.bo);
|
||||
obj->pipeline_stats.bo = NULL;
|
||||
}
|
||||
|
||||
obj->pipeline_stats.bo =
|
||||
drm_intel_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
|
||||
drm_bacon_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
|
||||
STATS_BO_SIZE, 64);
|
||||
|
||||
/* Take starting snapshots. */
|
||||
|
|
@ -1112,7 +1112,7 @@ brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
|
|||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct brw_perf_query_object *obj = brw_perf_query(o);
|
||||
drm_intel_bo *bo = NULL;
|
||||
drm_bacon_bo *bo = NULL;
|
||||
|
||||
assert(!o->Ready);
|
||||
|
||||
|
|
@ -1132,15 +1132,15 @@ brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
|
|||
/* If the current batch references our results bo then we need to
|
||||
* flush first...
|
||||
*/
|
||||
if (drm_intel_bo_references(brw->batch.bo, bo))
|
||||
if (drm_bacon_bo_references(brw->batch.bo, bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
if (drm_intel_bo_busy(bo))
|
||||
if (drm_bacon_bo_busy(bo))
|
||||
perf_debug("Stalling GPU waiting for a performance query object.\n");
|
||||
}
|
||||
|
||||
drm_intel_bo_wait_rendering(bo);
|
||||
drm_bacon_bo_wait_rendering(bo);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
|
@ -1157,13 +1157,13 @@ brw_is_perf_query_ready(struct gl_context *ctx,
|
|||
case OA_COUNTERS:
|
||||
return (obj->oa.results_accumulated ||
|
||||
(obj->oa.bo &&
|
||||
!drm_intel_bo_references(brw->batch.bo, obj->oa.bo) &&
|
||||
!drm_intel_bo_busy(obj->oa.bo)));
|
||||
!drm_bacon_bo_references(brw->batch.bo, obj->oa.bo) &&
|
||||
!drm_bacon_bo_busy(obj->oa.bo)));
|
||||
|
||||
case PIPELINE_STATS:
|
||||
return (obj->pipeline_stats.bo &&
|
||||
!drm_intel_bo_references(brw->batch.bo, obj->pipeline_stats.bo) &&
|
||||
!drm_intel_bo_busy(obj->pipeline_stats.bo));
|
||||
!drm_bacon_bo_references(brw->batch.bo, obj->pipeline_stats.bo) &&
|
||||
!drm_bacon_bo_busy(obj->pipeline_stats.bo));
|
||||
}
|
||||
|
||||
unreachable("missing ready check for unknown query kind");
|
||||
|
|
@ -1224,7 +1224,7 @@ get_pipeline_stats_data(struct brw_context *brw,
|
|||
int n_counters = obj->query->n_counters;
|
||||
uint8_t *p = data;
|
||||
|
||||
drm_intel_bo_map(obj->pipeline_stats.bo, false);
|
||||
drm_bacon_bo_map(obj->pipeline_stats.bo, false);
|
||||
uint64_t *start = obj->pipeline_stats.bo->virtual;
|
||||
uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
|
||||
|
||||
|
|
@ -1242,7 +1242,7 @@ get_pipeline_stats_data(struct brw_context *brw,
|
|||
p += 8;
|
||||
}
|
||||
|
||||
drm_intel_bo_unmap(obj->pipeline_stats.bo);
|
||||
drm_bacon_bo_unmap(obj->pipeline_stats.bo);
|
||||
|
||||
return p - data;
|
||||
}
|
||||
|
|
@ -1333,7 +1333,7 @@ brw_delete_perf_query(struct gl_context *ctx,
|
|||
dec_n_oa_users(brw);
|
||||
}
|
||||
|
||||
drm_intel_bo_unreference(obj->oa.bo);
|
||||
drm_bacon_bo_unreference(obj->oa.bo);
|
||||
obj->oa.bo = NULL;
|
||||
}
|
||||
|
||||
|
|
@ -1342,7 +1342,7 @@ brw_delete_perf_query(struct gl_context *ctx,
|
|||
|
||||
case PIPELINE_STATS:
|
||||
if (obj->pipeline_stats.bo) {
|
||||
drm_intel_bo_unreference(obj->pipeline_stats.bo);
|
||||
drm_bacon_bo_unreference(obj->pipeline_stats.bo);
|
||||
obj->pipeline_stats.bo = NULL;
|
||||
}
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags)
|
|||
*/
|
||||
void
|
||||
brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
|
||||
drm_intel_bo *bo, uint32_t offset,
|
||||
drm_bacon_bo *bo, uint32_t offset,
|
||||
uint32_t imm_lower, uint32_t imm_upper)
|
||||
{
|
||||
if (brw->gen >= 8) {
|
||||
|
|
@ -372,7 +372,7 @@ brw_init_pipe_control(struct brw_context *brw,
|
|||
* the gen6 workaround because it involves actually writing to
|
||||
* the buffer, and the kernel doesn't let us write to the batch.
|
||||
*/
|
||||
brw->workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
|
||||
brw->workaround_bo = drm_bacon_bo_alloc(brw->bufmgr,
|
||||
"pipe_control workaround",
|
||||
4096, 4096);
|
||||
if (brw->workaround_bo == NULL)
|
||||
|
|
@ -386,5 +386,5 @@ brw_init_pipe_control(struct brw_context *brw,
|
|||
void
|
||||
brw_fini_pipe_control(struct brw_context *brw)
|
||||
{
|
||||
drm_intel_bo_unreference(brw->workaround_bo);
|
||||
drm_bacon_bo_unreference(brw->workaround_bo);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -343,17 +343,17 @@ brw_blend_barrier(struct gl_context *ctx)
|
|||
|
||||
void
|
||||
brw_get_scratch_bo(struct brw_context *brw,
|
||||
drm_intel_bo **scratch_bo, int size)
|
||||
drm_bacon_bo **scratch_bo, int size)
|
||||
{
|
||||
drm_intel_bo *old_bo = *scratch_bo;
|
||||
drm_bacon_bo *old_bo = *scratch_bo;
|
||||
|
||||
if (old_bo && old_bo->size < size) {
|
||||
drm_intel_bo_unreference(old_bo);
|
||||
drm_bacon_bo_unreference(old_bo);
|
||||
old_bo = NULL;
|
||||
}
|
||||
|
||||
if (!old_bo) {
|
||||
*scratch_bo = drm_intel_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
|
||||
*scratch_bo = drm_bacon_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -371,10 +371,10 @@ brw_alloc_stage_scratch(struct brw_context *brw,
|
|||
stage_state->per_thread_scratch = per_thread_size;
|
||||
|
||||
if (stage_state->scratch_bo)
|
||||
drm_intel_bo_unreference(stage_state->scratch_bo);
|
||||
drm_bacon_bo_unreference(stage_state->scratch_bo);
|
||||
|
||||
stage_state->scratch_bo =
|
||||
drm_intel_bo_alloc(brw->bufmgr, "shader scratch space",
|
||||
drm_bacon_bo_alloc(brw->bufmgr, "shader scratch space",
|
||||
per_thread_size * thread_count, 4096);
|
||||
}
|
||||
}
|
||||
|
|
@ -404,7 +404,7 @@ brw_init_shader_time(struct brw_context *brw)
|
|||
{
|
||||
const int max_entries = 2048;
|
||||
brw->shader_time.bo =
|
||||
drm_intel_bo_alloc(brw->bufmgr, "shader time",
|
||||
drm_bacon_bo_alloc(brw->bufmgr, "shader time",
|
||||
max_entries * BRW_SHADER_TIME_STRIDE * 3, 4096);
|
||||
brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
|
||||
brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
|
||||
|
|
@ -580,7 +580,7 @@ brw_collect_shader_time(struct brw_context *brw)
|
|||
* delaying reading the reports, but it doesn't look like it's a big
|
||||
* overhead compared to the cost of tracking the time in the first place.
|
||||
*/
|
||||
drm_intel_bo_map(brw->shader_time.bo, true);
|
||||
drm_bacon_bo_map(brw->shader_time.bo, true);
|
||||
void *bo_map = brw->shader_time.bo->virtual;
|
||||
|
||||
for (int i = 0; i < brw->shader_time.num_entries; i++) {
|
||||
|
|
@ -594,7 +594,7 @@ brw_collect_shader_time(struct brw_context *brw)
|
|||
/* Zero the BO out to clear it out for our next collection.
|
||||
*/
|
||||
memset(bo_map, 0, brw->shader_time.bo->size);
|
||||
drm_intel_bo_unmap(brw->shader_time.bo);
|
||||
drm_bacon_bo_unmap(brw->shader_time.bo);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -643,7 +643,7 @@ brw_get_shader_time_index(struct brw_context *brw, struct gl_program *prog,
|
|||
void
|
||||
brw_destroy_shader_time(struct brw_context *brw)
|
||||
{
|
||||
drm_intel_bo_unreference(brw->shader_time.bo);
|
||||
drm_bacon_bo_unreference(brw->shader_time.bo);
|
||||
brw->shader_time.bo = NULL;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -213,27 +213,27 @@ static void
|
|||
brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
|
||||
{
|
||||
struct brw_context *brw = cache->brw;
|
||||
drm_intel_bo *new_bo;
|
||||
drm_bacon_bo *new_bo;
|
||||
|
||||
new_bo = drm_intel_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
|
||||
new_bo = drm_bacon_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
|
||||
if (brw->has_llc)
|
||||
drm_intel_gem_bo_map_unsynchronized(new_bo);
|
||||
drm_bacon_gem_bo_map_unsynchronized(new_bo);
|
||||
|
||||
/* Copy any existing data that needs to be saved. */
|
||||
if (cache->next_offset != 0) {
|
||||
if (brw->has_llc) {
|
||||
memcpy(new_bo->virtual, cache->bo->virtual, cache->next_offset);
|
||||
} else {
|
||||
drm_intel_bo_map(cache->bo, false);
|
||||
drm_intel_bo_subdata(new_bo, 0, cache->next_offset,
|
||||
drm_bacon_bo_map(cache->bo, false);
|
||||
drm_bacon_bo_subdata(new_bo, 0, cache->next_offset,
|
||||
cache->bo->virtual);
|
||||
drm_intel_bo_unmap(cache->bo);
|
||||
drm_bacon_bo_unmap(cache->bo);
|
||||
}
|
||||
}
|
||||
|
||||
if (brw->has_llc)
|
||||
drm_intel_bo_unmap(cache->bo);
|
||||
drm_intel_bo_unreference(cache->bo);
|
||||
drm_bacon_bo_unmap(cache->bo);
|
||||
drm_bacon_bo_unreference(cache->bo);
|
||||
cache->bo = new_bo;
|
||||
cache->bo_used_by_gpu = false;
|
||||
|
||||
|
|
@ -264,10 +264,10 @@ brw_lookup_prog(const struct brw_cache *cache,
|
|||
continue;
|
||||
|
||||
if (!brw->has_llc)
|
||||
drm_intel_bo_map(cache->bo, false);
|
||||
drm_bacon_bo_map(cache->bo, false);
|
||||
ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
|
||||
if (!brw->has_llc)
|
||||
drm_intel_bo_unmap(cache->bo);
|
||||
drm_bacon_bo_unmap(cache->bo);
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
|
|
@ -369,7 +369,7 @@ brw_upload_cache(struct brw_cache *cache,
|
|||
if (brw->has_llc) {
|
||||
memcpy((char *)cache->bo->virtual + item->offset, data, data_size);
|
||||
} else {
|
||||
drm_intel_bo_subdata(cache->bo, item->offset, data_size, data);
|
||||
drm_bacon_bo_subdata(cache->bo, item->offset, data_size, data);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -406,9 +406,9 @@ brw_init_caches(struct brw_context *brw)
|
|||
cache->items =
|
||||
calloc(cache->size, sizeof(struct brw_cache_item *));
|
||||
|
||||
cache->bo = drm_intel_bo_alloc(brw->bufmgr, "program cache", 4096, 64);
|
||||
cache->bo = drm_bacon_bo_alloc(brw->bufmgr, "program cache", 4096, 64);
|
||||
if (brw->has_llc)
|
||||
drm_intel_gem_bo_map_unsynchronized(cache->bo);
|
||||
drm_bacon_gem_bo_map_unsynchronized(cache->bo);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -486,8 +486,8 @@ brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
|
|||
DBG("%s\n", __func__);
|
||||
|
||||
if (brw->has_llc)
|
||||
drm_intel_bo_unmap(cache->bo);
|
||||
drm_intel_bo_unreference(cache->bo);
|
||||
drm_bacon_bo_unmap(cache->bo);
|
||||
drm_bacon_bo_unreference(cache->bo);
|
||||
cache->bo = NULL;
|
||||
brw_clear_cache(brw, cache);
|
||||
free(cache->items);
|
||||
|
|
@ -536,7 +536,7 @@ brw_print_program_cache(struct brw_context *brw)
|
|||
struct brw_cache_item *item;
|
||||
|
||||
if (!brw->has_llc)
|
||||
drm_intel_bo_map(cache->bo, false);
|
||||
drm_bacon_bo_map(cache->bo, false);
|
||||
|
||||
for (unsigned i = 0; i < cache->size; i++) {
|
||||
for (item = cache->items[i]; item; item = item->next) {
|
||||
|
|
@ -547,5 +547,5 @@ brw_print_program_cache(struct brw_context *brw)
|
|||
}
|
||||
|
||||
if (!brw->has_llc)
|
||||
drm_intel_bo_unmap(cache->bo);
|
||||
drm_bacon_bo_unmap(cache->bo);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ brw_raw_timestamp_delta(struct brw_context *brw, uint64_t time0, uint64_t time1)
|
|||
* Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
|
||||
*/
|
||||
void
|
||||
brw_write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
|
||||
brw_write_timestamp(struct brw_context *brw, drm_bacon_bo *query_bo, int idx)
|
||||
{
|
||||
if (brw->gen == 6) {
|
||||
/* Emit Sandybridge workaround flush: */
|
||||
|
|
@ -104,7 +104,7 @@ brw_write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
|
|||
* Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
|
||||
*/
|
||||
void
|
||||
brw_write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
|
||||
brw_write_depth_count(struct brw_context *brw, drm_bacon_bo *query_bo, int idx)
|
||||
{
|
||||
uint32_t flags = PIPE_CONTROL_WRITE_DEPTH_COUNT | PIPE_CONTROL_DEPTH_STALL;
|
||||
|
||||
|
|
@ -137,16 +137,16 @@ brw_queryobj_get_results(struct gl_context *ctx,
|
|||
* still contributing to it, flush it now so the results will be present
|
||||
* when mapped.
|
||||
*/
|
||||
if (drm_intel_bo_references(brw->batch.bo, query->bo))
|
||||
if (drm_bacon_bo_references(brw->batch.bo, query->bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
if (drm_intel_bo_busy(query->bo)) {
|
||||
if (drm_bacon_bo_busy(query->bo)) {
|
||||
perf_debug("Stalling on the GPU waiting for a query object.\n");
|
||||
}
|
||||
}
|
||||
|
||||
drm_intel_bo_map(query->bo, false);
|
||||
drm_bacon_bo_map(query->bo, false);
|
||||
results = query->bo->virtual;
|
||||
switch (query->Base.Target) {
|
||||
case GL_TIME_ELAPSED_EXT:
|
||||
|
|
@ -199,12 +199,12 @@ brw_queryobj_get_results(struct gl_context *ctx,
|
|||
default:
|
||||
unreachable("Unrecognized query target in brw_queryobj_get_results()");
|
||||
}
|
||||
drm_intel_bo_unmap(query->bo);
|
||||
drm_bacon_bo_unmap(query->bo);
|
||||
|
||||
/* Now that we've processed the data stored in the query's buffer object,
|
||||
* we can release it.
|
||||
*/
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
drm_bacon_bo_unreference(query->bo);
|
||||
query->bo = NULL;
|
||||
}
|
||||
|
||||
|
|
@ -236,7 +236,7 @@ brw_delete_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
{
|
||||
struct brw_query_object *query = (struct brw_query_object *)q;
|
||||
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
drm_bacon_bo_unreference(query->bo);
|
||||
free(query);
|
||||
}
|
||||
|
||||
|
|
@ -275,8 +275,8 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
* obtain the time elapsed. Notably, this includes time elapsed while
|
||||
* the system was doing other work, such as running other applications.
|
||||
*/
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
query->bo = drm_intel_bo_alloc(brw->bufmgr, "timer query", 4096, 4096);
|
||||
drm_bacon_bo_unreference(query->bo);
|
||||
query->bo = drm_bacon_bo_alloc(brw->bufmgr, "timer query", 4096, 4096);
|
||||
brw_write_timestamp(brw, query->bo, 0);
|
||||
break;
|
||||
|
||||
|
|
@ -290,7 +290,7 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
* Since we're starting a new query, we need to be sure to throw away
|
||||
* any previous occlusion query results.
|
||||
*/
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
drm_bacon_bo_unreference(query->bo);
|
||||
query->bo = NULL;
|
||||
query->last_index = -1;
|
||||
|
||||
|
|
@ -402,10 +402,10 @@ static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
* not ready yet on the first time it is queried. This ensures that
|
||||
* the async query will return true in finite time.
|
||||
*/
|
||||
if (query->bo && drm_intel_bo_references(brw->batch.bo, query->bo))
|
||||
if (query->bo && drm_bacon_bo_references(brw->batch.bo, query->bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
|
||||
if (query->bo == NULL || !drm_bacon_bo_busy(query->bo)) {
|
||||
brw_queryobj_get_results(ctx, query);
|
||||
query->Base.Ready = true;
|
||||
}
|
||||
|
|
@ -434,7 +434,7 @@ ensure_bo_has_space(struct gl_context *ctx, struct brw_query_object *query)
|
|||
brw_queryobj_get_results(ctx, query);
|
||||
}
|
||||
|
||||
query->bo = drm_intel_bo_alloc(brw->bufmgr, "query", 4096, 1);
|
||||
query->bo = drm_bacon_bo_alloc(brw->bufmgr, "query", 4096, 1);
|
||||
query->last_index = 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -519,8 +519,8 @@ brw_query_counter(struct gl_context *ctx, struct gl_query_object *q)
|
|||
|
||||
assert(q->Target == GL_TIMESTAMP);
|
||||
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
query->bo = drm_intel_bo_alloc(brw->bufmgr, "timestamp query", 4096, 4096);
|
||||
drm_bacon_bo_unreference(query->bo);
|
||||
query->bo = drm_bacon_bo_alloc(brw->bufmgr, "timestamp query", 4096, 4096);
|
||||
brw_write_timestamp(brw, query->bo, 0);
|
||||
|
||||
query->flushed = false;
|
||||
|
|
@ -539,14 +539,14 @@ brw_get_timestamp(struct gl_context *ctx)
|
|||
|
||||
switch (brw->screen->hw_has_timestamp) {
|
||||
case 3: /* New kernel, always full 36bit accuracy */
|
||||
drm_intel_reg_read(brw->bufmgr, TIMESTAMP | 1, &result);
|
||||
drm_bacon_reg_read(brw->bufmgr, TIMESTAMP | 1, &result);
|
||||
break;
|
||||
case 2: /* 64bit kernel, result is left-shifted by 32bits, losing 4bits */
|
||||
drm_intel_reg_read(brw->bufmgr, TIMESTAMP, &result);
|
||||
drm_bacon_reg_read(brw->bufmgr, TIMESTAMP, &result);
|
||||
result = result >> 32;
|
||||
break;
|
||||
case 1: /* 32bit kernel, result is 36bit wide but may be inaccurate! */
|
||||
drm_intel_reg_read(brw->bufmgr, TIMESTAMP, &result);
|
||||
drm_bacon_reg_read(brw->bufmgr, TIMESTAMP, &result);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ brw_get_graphics_reset_status(struct gl_context *ctx)
|
|||
if (brw->reset_count != 0)
|
||||
return GL_NO_ERROR;
|
||||
|
||||
err = drm_intel_get_reset_stats(brw->hw_ctx, &reset_count, &active,
|
||||
err = drm_bacon_get_reset_stats(brw->hw_ctx, &reset_count, &active,
|
||||
&pending);
|
||||
if (err)
|
||||
return GL_NO_ERROR;
|
||||
|
|
@ -85,7 +85,7 @@ brw_check_for_reset(struct brw_context *brw)
|
|||
uint32_t pending;
|
||||
int err;
|
||||
|
||||
err = drm_intel_get_reset_stats(brw->hw_ctx, &reset_count, &active,
|
||||
err = drm_bacon_get_reset_stats(brw->hw_ctx, &reset_count, &active,
|
||||
&pending);
|
||||
if (err)
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ brw_emit_sampler_state(struct brw_context *brw,
|
|||
ss[2] = border_color_offset;
|
||||
if (brw->gen < 6) {
|
||||
ss[2] += brw->batch.bo->offset64; /* reloc */
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
batch_offset_for_sampler_state + 8,
|
||||
brw->batch.bo, border_color_offset,
|
||||
I915_GEM_DOMAIN_SAMPLER, 0);
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ static void upload_sf_unit( struct brw_context *brw )
|
|||
{
|
||||
struct gl_context *ctx = &brw->ctx;
|
||||
struct brw_sf_unit_state *sf;
|
||||
drm_intel_bo *bo = brw->batch.bo;
|
||||
drm_bacon_bo *bo = brw->batch.bo;
|
||||
int chipset_max_threads;
|
||||
bool render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
|
||||
|
||||
|
|
@ -291,7 +291,7 @@ static void upload_sf_unit( struct brw_context *brw )
|
|||
*/
|
||||
|
||||
/* Emit SF viewport relocation */
|
||||
drm_intel_bo_emit_reloc(bo, (brw->sf.state_offset +
|
||||
drm_bacon_bo_emit_reloc(bo, (brw->sf.state_offset +
|
||||
offsetof(struct brw_sf_unit_state, sf5)),
|
||||
brw->batch.bo, (brw->sf.vp_offset |
|
||||
sf->sf5.front_winding |
|
||||
|
|
|
|||
|
|
@ -274,7 +274,7 @@ int brw_get_texture_swizzle(const struct gl_context *ctx,
|
|||
|
||||
void brw_emit_buffer_surface_state(struct brw_context *brw,
|
||||
uint32_t *out_offset,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
unsigned buffer_offset,
|
||||
unsigned surface_format,
|
||||
unsigned buffer_size,
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ struct brw_fence {
|
|||
} type;
|
||||
|
||||
union {
|
||||
drm_intel_bo *batch_bo;
|
||||
drm_bacon_bo *batch_bo;
|
||||
|
||||
/* This struct owns the fd. */
|
||||
int sync_fd;
|
||||
|
|
@ -96,7 +96,7 @@ brw_fence_finish(struct brw_fence *fence)
|
|||
switch (fence->type) {
|
||||
case BRW_FENCE_TYPE_BO_WAIT:
|
||||
if (fence->batch_bo)
|
||||
drm_intel_bo_unreference(fence->batch_bo);
|
||||
drm_bacon_bo_unreference(fence->batch_bo);
|
||||
break;
|
||||
case BRW_FENCE_TYPE_SYNC_FD:
|
||||
if (fence->sync_fd != -1)
|
||||
|
|
@ -118,10 +118,10 @@ brw_fence_insert_locked(struct brw_context *brw, struct brw_fence *fence)
|
|||
assert(!fence->signalled);
|
||||
|
||||
fence->batch_bo = brw->batch.bo;
|
||||
drm_intel_bo_reference(fence->batch_bo);
|
||||
drm_bacon_bo_reference(fence->batch_bo);
|
||||
|
||||
if (intel_batchbuffer_flush(brw) < 0) {
|
||||
drm_intel_bo_unreference(fence->batch_bo);
|
||||
drm_bacon_bo_unreference(fence->batch_bo);
|
||||
fence->batch_bo = NULL;
|
||||
return false;
|
||||
}
|
||||
|
|
@ -179,10 +179,10 @@ brw_fence_has_completed_locked(struct brw_fence *fence)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (drm_intel_bo_busy(fence->batch_bo))
|
||||
if (drm_bacon_bo_busy(fence->batch_bo))
|
||||
return false;
|
||||
|
||||
drm_intel_bo_unreference(fence->batch_bo);
|
||||
drm_bacon_bo_unreference(fence->batch_bo);
|
||||
fence->batch_bo = NULL;
|
||||
fence->signalled = true;
|
||||
|
||||
|
|
@ -238,11 +238,11 @@ brw_fence_client_wait_locked(struct brw_context *brw, struct brw_fence *fence,
|
|||
if (timeout > INT64_MAX)
|
||||
timeout = INT64_MAX;
|
||||
|
||||
if (drm_intel_gem_bo_wait(fence->batch_bo, timeout) != 0)
|
||||
if (drm_bacon_gem_bo_wait(fence->batch_bo, timeout) != 0)
|
||||
return false;
|
||||
|
||||
fence->signalled = true;
|
||||
drm_intel_bo_unreference(fence->batch_bo);
|
||||
drm_bacon_bo_unreference(fence->batch_bo);
|
||||
fence->batch_bo = NULL;
|
||||
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -237,7 +237,7 @@ brw_codegen_tcs_prog(struct brw_context *brw, struct brw_program *tcp,
|
|||
st_index = brw_get_shader_time_index(brw, &tep->program, ST_TCS, true);
|
||||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
start_busy = brw->batch.last_bo && drm_intel_bo_busy(brw->batch.last_bo);
|
||||
start_busy = brw->batch.last_bo && drm_bacon_bo_busy(brw->batch.last_bo);
|
||||
start_time = get_time();
|
||||
}
|
||||
|
||||
|
|
@ -267,7 +267,7 @@ brw_codegen_tcs_prog(struct brw_context *brw, struct brw_program *tcp,
|
|||
tcp->compiled_once = true;
|
||||
}
|
||||
|
||||
if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
|
||||
if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
|
||||
perf_debug("TCS compile took %.03f ms and stalled the GPU\n",
|
||||
(get_time() - start_time) * 1000);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ brw_codegen_tes_prog(struct brw_context *brw,
|
|||
st_index = brw_get_shader_time_index(brw, &tep->program, ST_TES, true);
|
||||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
start_busy = brw->batch.last_bo && drm_intel_bo_busy(brw->batch.last_bo);
|
||||
start_busy = brw->batch.last_bo && drm_bacon_bo_busy(brw->batch.last_bo);
|
||||
start_time = get_time();
|
||||
}
|
||||
|
||||
|
|
@ -137,7 +137,7 @@ brw_codegen_tes_prog(struct brw_context *brw,
|
|||
if (tep->compiled_once) {
|
||||
brw_tes_debug_recompile(brw, &tep->program, key);
|
||||
}
|
||||
if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
|
||||
if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
|
||||
perf_debug("TES compile took %.03f ms and stalled the GPU\n",
|
||||
(get_time() - start_time) * 1000);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -227,7 +227,7 @@ brw_codegen_vs_prog(struct brw_context *brw,
|
|||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
start_busy = (brw->batch.last_bo &&
|
||||
drm_intel_bo_busy(brw->batch.last_bo));
|
||||
drm_bacon_bo_busy(brw->batch.last_bo));
|
||||
start_time = get_time();
|
||||
}
|
||||
|
||||
|
|
@ -266,7 +266,7 @@ brw_codegen_vs_prog(struct brw_context *brw,
|
|||
if (vp->compiled_once) {
|
||||
brw_vs_debug_recompile(brw, &vp->program, key);
|
||||
}
|
||||
if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
|
||||
if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
|
||||
perf_debug("VS compile took %.03f ms and stalled the GPU\n",
|
||||
(get_time() - start_time) * 1000);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ brw_upload_vs_unit(struct brw_context *brw)
|
|||
/* BRW_NEW_SAMPLER_STATE_TABLE - reloc */
|
||||
vs->vs5.sampler_state_pointer =
|
||||
(brw->batch.bo->offset64 + stage_state->sampler_offset) >> 5;
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
stage_state->state_offset +
|
||||
offsetof(struct brw_vs_unit_state, vs5),
|
||||
brw->batch.bo,
|
||||
|
|
@ -170,7 +170,7 @@ brw_upload_vs_unit(struct brw_context *brw)
|
|||
|
||||
/* Emit scratch space relocation */
|
||||
if (prog_data->total_scratch != 0) {
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
stage_state->state_offset +
|
||||
offsetof(struct brw_vs_unit_state, thread2),
|
||||
stage_state->scratch_bo,
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ brw_upload_pull_constants(struct brw_context *brw,
|
|||
|
||||
/* BRW_NEW_*_PROG_DATA | _NEW_PROGRAM_CONSTANTS */
|
||||
uint32_t size = prog_data->nr_pull_params * 4;
|
||||
drm_intel_bo *const_bo = NULL;
|
||||
drm_bacon_bo *const_bo = NULL;
|
||||
uint32_t const_offset;
|
||||
gl_constant_value *constants = intel_upload_space(brw, size, 64,
|
||||
&const_bo, &const_offset);
|
||||
|
|
@ -95,7 +95,7 @@ brw_upload_pull_constants(struct brw_context *brw,
|
|||
|
||||
brw_create_constant_surface(brw, const_bo, const_offset, size,
|
||||
&stage_state->surf_offset[surf_index]);
|
||||
drm_intel_bo_unreference(const_bo);
|
||||
drm_bacon_bo_unreference(const_bo);
|
||||
|
||||
brw->ctx.NewDriverState |= brw_new_constbuf;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ brw_codegen_wm_prog(struct brw_context *brw,
|
|||
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
start_busy = (brw->batch.last_bo &&
|
||||
drm_intel_bo_busy(brw->batch.last_bo));
|
||||
drm_bacon_bo_busy(brw->batch.last_bo));
|
||||
start_time = get_time();
|
||||
}
|
||||
|
||||
|
|
@ -208,7 +208,7 @@ brw_codegen_wm_prog(struct brw_context *brw,
|
|||
brw_wm_debug_recompile(brw, &fp->program, key);
|
||||
fp->compiled_once = true;
|
||||
|
||||
if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
|
||||
if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
|
||||
perf_debug("FS compile took %.03f ms and stalled the GPU\n",
|
||||
(get_time() - start_time) * 1000);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -222,7 +222,7 @@ brw_upload_wm_unit(struct brw_context *brw)
|
|||
|
||||
/* Emit scratch space relocation */
|
||||
if (prog_data->base.total_scratch != 0) {
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
brw->wm.base.state_offset +
|
||||
offsetof(struct brw_wm_unit_state, thread2),
|
||||
brw->wm.base.scratch_bo,
|
||||
|
|
@ -232,7 +232,7 @@ brw_upload_wm_unit(struct brw_context *brw)
|
|||
|
||||
/* Emit sampler state relocation */
|
||||
if (brw->wm.base.sampler_count != 0) {
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
brw->wm.base.state_offset +
|
||||
offsetof(struct brw_wm_unit_state, wm4),
|
||||
brw->batch.bo, (brw->wm.base.sampler_offset |
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ brw_emit_surface_state(struct brw_context *brw,
|
|||
|
||||
union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
|
||||
|
||||
drm_intel_bo *aux_bo;
|
||||
drm_bacon_bo *aux_bo;
|
||||
struct isl_surf *aux_surf = NULL, aux_surf_s;
|
||||
uint64_t aux_offset = 0;
|
||||
enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
|
||||
|
|
@ -168,7 +168,7 @@ brw_emit_surface_state(struct brw_context *brw,
|
|||
.mocs = mocs, .clear_color = clear_color,
|
||||
.x_offset_sa = tile_x, .y_offset_sa = tile_y);
|
||||
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
*surf_offset + brw->isl_dev.ss.addr_offset,
|
||||
mt->bo, offset,
|
||||
read_domains, write_domains);
|
||||
|
|
@ -182,7 +182,7 @@ brw_emit_surface_state(struct brw_context *brw,
|
|||
*/
|
||||
assert((aux_offset & 0xfff) == 0);
|
||||
uint32_t *aux_addr = state + brw->isl_dev.ss.aux_addr_offset;
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
*surf_offset + brw->isl_dev.ss.aux_addr_offset,
|
||||
aux_bo, *aux_addr - aux_bo->offset64,
|
||||
read_domains, write_domains);
|
||||
|
|
@ -647,7 +647,7 @@ brw_update_texture_surface(struct gl_context *ctx,
|
|||
void
|
||||
brw_emit_buffer_surface_state(struct brw_context *brw,
|
||||
uint32_t *out_offset,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
unsigned buffer_offset,
|
||||
unsigned surface_format,
|
||||
unsigned buffer_size,
|
||||
|
|
@ -667,7 +667,7 @@ brw_emit_buffer_surface_state(struct brw_context *brw,
|
|||
.mocs = tex_mocs[brw->gen]);
|
||||
|
||||
if (bo) {
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
*out_offset + brw->isl_dev.ss.addr_offset,
|
||||
bo, buffer_offset,
|
||||
I915_GEM_DOMAIN_SAMPLER,
|
||||
|
|
@ -685,7 +685,7 @@ brw_update_buffer_texture_surface(struct gl_context *ctx,
|
|||
struct intel_buffer_object *intel_obj =
|
||||
intel_buffer_object(tObj->BufferObject);
|
||||
uint32_t size = tObj->BufferSize;
|
||||
drm_intel_bo *bo = NULL;
|
||||
drm_bacon_bo *bo = NULL;
|
||||
mesa_format format = tObj->_BufferObjectFormat;
|
||||
uint32_t brw_format = brw_isl_format_for_mesa_format(format);
|
||||
int texel_size = _mesa_get_format_bytes(format);
|
||||
|
|
@ -732,7 +732,7 @@ brw_update_buffer_texture_surface(struct gl_context *ctx,
|
|||
*/
|
||||
void
|
||||
brw_create_constant_surface(struct brw_context *brw,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t offset,
|
||||
uint32_t size,
|
||||
uint32_t *out_offset)
|
||||
|
|
@ -749,7 +749,7 @@ brw_create_constant_surface(struct brw_context *brw,
|
|||
*/
|
||||
void
|
||||
brw_create_buffer_surface(struct brw_context *brw,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t offset,
|
||||
uint32_t size,
|
||||
uint32_t *out_offset)
|
||||
|
|
@ -778,7 +778,7 @@ brw_update_sol_surface(struct brw_context *brw,
|
|||
{
|
||||
struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
|
||||
uint32_t offset_bytes = 4 * offset_dwords;
|
||||
drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
|
||||
drm_bacon_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
|
||||
offset_bytes,
|
||||
buffer_obj->Size - offset_bytes);
|
||||
uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
|
||||
|
|
@ -842,7 +842,7 @@ brw_update_sol_surface(struct brw_context *brw,
|
|||
surf[5] = 0;
|
||||
|
||||
/* Emit relocation to surface contents. */
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
*out_offset + 4,
|
||||
bo, offset_bytes,
|
||||
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
|
||||
|
|
@ -914,7 +914,7 @@ brw_emit_null_surface_state(struct brw_context *brw,
|
|||
* - Surface Format must be R8G8B8A8_UNORM.
|
||||
*/
|
||||
unsigned surface_type = BRW_SURFACE_NULL;
|
||||
drm_intel_bo *bo = NULL;
|
||||
drm_bacon_bo *bo = NULL;
|
||||
unsigned pitch_minus_1 = 0;
|
||||
uint32_t multisampling_state = 0;
|
||||
uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
|
||||
|
|
@ -968,7 +968,7 @@ brw_emit_null_surface_state(struct brw_context *brw,
|
|||
surf[5] = 0;
|
||||
|
||||
if (bo) {
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
*out_offset + 4,
|
||||
bo, 0,
|
||||
I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
|
||||
|
|
@ -1071,7 +1071,7 @@ gen4_update_renderbuffer_surface(struct brw_context *brw,
|
|||
}
|
||||
}
|
||||
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo,
|
||||
offset + 4,
|
||||
mt->bo,
|
||||
surf[1] - mt->bo->offset64,
|
||||
|
|
@ -1410,7 +1410,7 @@ brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
|
|||
GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
|
||||
if (!binding->AutomaticSize)
|
||||
size = MIN2(size, binding->Size);
|
||||
drm_intel_bo *bo =
|
||||
drm_bacon_bo *bo =
|
||||
intel_bufferobj_buffer(brw, intel_bo,
|
||||
binding->Offset,
|
||||
size);
|
||||
|
|
@ -1435,7 +1435,7 @@ brw_upload_ubo_surfaces(struct brw_context *brw, struct gl_program *prog,
|
|||
GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
|
||||
if (!binding->AutomaticSize)
|
||||
size = MIN2(size, binding->Size);
|
||||
drm_intel_bo *bo =
|
||||
drm_bacon_bo *bo =
|
||||
intel_bufferobj_buffer(brw, intel_bo,
|
||||
binding->Offset,
|
||||
size);
|
||||
|
|
@ -1510,7 +1510,7 @@ brw_upload_abo_surfaces(struct brw_context *brw,
|
|||
&ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
|
||||
struct intel_buffer_object *intel_bo =
|
||||
intel_buffer_object(binding->BufferObject);
|
||||
drm_intel_bo *bo = intel_bufferobj_buffer(
|
||||
drm_bacon_bo *bo = intel_bufferobj_buffer(
|
||||
brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
|
||||
|
||||
brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
|
||||
|
|
@ -1865,7 +1865,7 @@ brw_upload_cs_work_groups_surface(struct brw_context *brw)
|
|||
const unsigned surf_idx =
|
||||
cs_prog_data->binding_table.work_groups_start;
|
||||
uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
uint32_t bo_offset;
|
||||
|
||||
if (brw->compute.num_work_groups_bo == NULL) {
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ set_query_availability(struct brw_context *brw, struct brw_query_object *query,
|
|||
|
||||
static void
|
||||
write_primitives_generated(struct brw_context *brw,
|
||||
drm_intel_bo *query_bo, int stream, int idx)
|
||||
drm_bacon_bo *query_bo, int stream, int idx)
|
||||
{
|
||||
brw_emit_mi_flush(brw);
|
||||
|
||||
|
|
@ -85,7 +85,7 @@ write_primitives_generated(struct brw_context *brw,
|
|||
|
||||
static void
|
||||
write_xfb_primitives_written(struct brw_context *brw,
|
||||
drm_intel_bo *bo, int stream, int idx)
|
||||
drm_bacon_bo *bo, int stream, int idx)
|
||||
{
|
||||
brw_emit_mi_flush(brw);
|
||||
|
||||
|
|
@ -100,7 +100,7 @@ write_xfb_primitives_written(struct brw_context *brw,
|
|||
|
||||
static void
|
||||
write_xfb_overflow_streams(struct gl_context *ctx,
|
||||
drm_intel_bo *bo, int stream, int count,
|
||||
drm_bacon_bo *bo, int stream, int count,
|
||||
int idx)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
|
|
@ -156,7 +156,7 @@ pipeline_target_to_index(int target)
|
|||
}
|
||||
|
||||
static void
|
||||
emit_pipeline_stat(struct brw_context *brw, drm_intel_bo *bo,
|
||||
emit_pipeline_stat(struct brw_context *brw, drm_bacon_bo *bo,
|
||||
int stream, int target, int idx)
|
||||
{
|
||||
/* One source of confusion is the tessellation shader statistics. The
|
||||
|
|
@ -288,12 +288,12 @@ gen6_queryobj_get_results(struct gl_context *ctx,
|
|||
default:
|
||||
unreachable("Unrecognized query target in brw_queryobj_get_results()");
|
||||
}
|
||||
drm_intel_bo_unmap(query->bo);
|
||||
drm_bacon_bo_unmap(query->bo);
|
||||
|
||||
/* Now that we've processed the data stored in the query's buffer object,
|
||||
* we can release it.
|
||||
*/
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
drm_bacon_bo_unreference(query->bo);
|
||||
query->bo = NULL;
|
||||
|
||||
query->Base.Ready = true;
|
||||
|
|
@ -312,8 +312,8 @@ gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
struct brw_query_object *query = (struct brw_query_object *)q;
|
||||
|
||||
/* Since we're starting a new query, we need to throw away old results. */
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
query->bo = drm_intel_bo_alloc(brw->bufmgr, "query results", 4096, 4096);
|
||||
drm_bacon_bo_unreference(query->bo);
|
||||
query->bo = drm_bacon_bo_alloc(brw->bufmgr, "query results", 4096, 4096);
|
||||
|
||||
/* For ARB_query_buffer_object: The result is not available */
|
||||
set_query_availability(brw, query, false);
|
||||
|
|
@ -467,7 +467,7 @@ flush_batch_if_needed(struct brw_context *brw, struct brw_query_object *query)
|
|||
* (for example, due to being full). Record that it's been flushed.
|
||||
*/
|
||||
query->flushed = query->flushed ||
|
||||
!drm_intel_bo_references(brw->batch.bo, query->bo);
|
||||
!drm_bacon_bo_references(brw->batch.bo, query->bo);
|
||||
|
||||
if (!query->flushed)
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
|
@ -519,7 +519,7 @@ static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
*/
|
||||
flush_batch_if_needed(brw, query);
|
||||
|
||||
if (!drm_intel_bo_busy(query->bo)) {
|
||||
if (!drm_bacon_bo_busy(query->bo)) {
|
||||
gen6_queryobj_get_results(ctx, query);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -194,9 +194,9 @@ brw_new_transform_feedback(struct gl_context *ctx, GLuint name)
|
|||
_mesa_init_transform_feedback_object(&brw_obj->base, name);
|
||||
|
||||
brw_obj->offset_bo =
|
||||
drm_intel_bo_alloc(brw->bufmgr, "transform feedback offsets", 16, 64);
|
||||
drm_bacon_bo_alloc(brw->bufmgr, "transform feedback offsets", 16, 64);
|
||||
brw_obj->prim_count_bo =
|
||||
drm_intel_bo_alloc(brw->bufmgr, "xfb primitive counts", 4096, 64);
|
||||
drm_bacon_bo_alloc(brw->bufmgr, "xfb primitive counts", 4096, 64);
|
||||
|
||||
return &brw_obj->base;
|
||||
}
|
||||
|
|
@ -212,8 +212,8 @@ brw_delete_transform_feedback(struct gl_context *ctx,
|
|||
_mesa_reference_buffer_object(ctx, &obj->Buffers[i], NULL);
|
||||
}
|
||||
|
||||
drm_intel_bo_unreference(brw_obj->offset_bo);
|
||||
drm_intel_bo_unreference(brw_obj->prim_count_bo);
|
||||
drm_bacon_bo_unreference(brw_obj->offset_bo);
|
||||
drm_bacon_bo_unreference(brw_obj->prim_count_bo);
|
||||
|
||||
free(brw_obj);
|
||||
}
|
||||
|
|
@ -241,13 +241,13 @@ tally_prims_generated(struct brw_context *brw,
|
|||
/* If the current batch is still contributing to the number of primitives
|
||||
* generated, flush it now so the results will be present when mapped.
|
||||
*/
|
||||
if (drm_intel_bo_references(brw->batch.bo, obj->prim_count_bo))
|
||||
if (drm_bacon_bo_references(brw->batch.bo, obj->prim_count_bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (unlikely(brw->perf_debug && drm_intel_bo_busy(obj->prim_count_bo)))
|
||||
if (unlikely(brw->perf_debug && drm_bacon_bo_busy(obj->prim_count_bo)))
|
||||
perf_debug("Stalling for # of transform feedback primitives written.\n");
|
||||
|
||||
drm_intel_bo_map(obj->prim_count_bo, false);
|
||||
drm_bacon_bo_map(obj->prim_count_bo, false);
|
||||
uint64_t *prim_counts = obj->prim_count_bo->virtual;
|
||||
|
||||
assert(obj->prim_count_buffer_index % (2 * streams) == 0);
|
||||
|
|
@ -260,7 +260,7 @@ tally_prims_generated(struct brw_context *brw,
|
|||
prim_counts += 2 * streams; /* move to the next pair */
|
||||
}
|
||||
|
||||
drm_intel_bo_unmap(obj->prim_count_bo);
|
||||
drm_bacon_bo_unmap(obj->prim_count_bo);
|
||||
|
||||
/* We've already gathered up the old data; we can safely overwrite it now. */
|
||||
obj->prim_count_buffer_index = 0;
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ upload_3dstate_so_buffers(struct brw_context *brw)
|
|||
for (i = 0; i < 4; i++) {
|
||||
struct intel_buffer_object *bufferobj =
|
||||
intel_buffer_object(xfb_obj->Buffers[i]);
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
uint32_t start, end;
|
||||
uint32_t stride;
|
||||
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ gen8_upload_3dstate_so_buffers(struct brw_context *brw)
|
|||
uint32_t start = xfb_obj->Offset[i];
|
||||
assert(start % 4 == 0);
|
||||
uint32_t end = ALIGN(start + xfb_obj->Size[i], 4);
|
||||
drm_intel_bo *bo =
|
||||
drm_bacon_bo *bo =
|
||||
intel_bufferobj_buffer(brw, bufferobj, start, end - start);
|
||||
assert(end <= bo->size);
|
||||
|
||||
|
|
|
|||
|
|
@ -67,9 +67,9 @@ blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
|
|||
{
|
||||
assert(batch->blorp->driver_ctx == batch->driver_batch);
|
||||
struct brw_context *brw = batch->driver_batch;
|
||||
drm_intel_bo *bo = address.buffer;
|
||||
drm_bacon_bo *bo = address.buffer;
|
||||
|
||||
drm_intel_bo_emit_reloc(brw->batch.bo, ss_offset,
|
||||
drm_bacon_bo_emit_reloc(brw->batch.bo, ss_offset,
|
||||
bo, address.offset + delta,
|
||||
address.read_domains, address.write_domain);
|
||||
|
||||
|
|
@ -200,7 +200,7 @@ genX(blorp_exec)(struct blorp_batch *batch,
|
|||
retry:
|
||||
intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
|
||||
intel_batchbuffer_save_state(brw);
|
||||
drm_intel_bo *saved_bo = brw->batch.bo;
|
||||
drm_bacon_bo *saved_bo = brw->batch.bo;
|
||||
uint32_t saved_used = USED_BATCH(brw->batch);
|
||||
uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
|
||||
|
||||
|
|
@ -244,7 +244,7 @@ retry:
|
|||
* map all the BOs into the GPU at batch exec time later. If so, flush the
|
||||
* batch and try again with nothing else in the batch.
|
||||
*/
|
||||
if (drm_intel_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
|
||||
if (drm_bacon_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
|
||||
if (!check_aperture_failed_once) {
|
||||
check_aperture_failed_once = true;
|
||||
intel_batchbuffer_reset_to_saved(brw);
|
||||
|
|
|
|||
|
|
@ -393,7 +393,7 @@ hsw_result_to_gpr0(struct gl_context *ctx, struct brw_query_object *query,
|
|||
* Store immediate data into the user buffer using the requested size.
|
||||
*/
|
||||
static void
|
||||
store_query_result_imm(struct brw_context *brw, drm_intel_bo *bo,
|
||||
store_query_result_imm(struct brw_context *brw, drm_bacon_bo *bo,
|
||||
uint32_t offset, GLenum ptype, uint64_t imm)
|
||||
{
|
||||
switch (ptype) {
|
||||
|
|
@ -411,7 +411,7 @@ store_query_result_imm(struct brw_context *brw, drm_intel_bo *bo,
|
|||
}
|
||||
|
||||
static void
|
||||
set_predicate(struct brw_context *brw, drm_intel_bo *query_bo)
|
||||
set_predicate(struct brw_context *brw, drm_bacon_bo *query_bo)
|
||||
{
|
||||
brw_load_register_imm64(brw, MI_PREDICATE_SRC1, 0ull);
|
||||
|
||||
|
|
@ -435,7 +435,7 @@ set_predicate(struct brw_context *brw, drm_intel_bo *query_bo)
|
|||
* query has not finished yet.
|
||||
*/
|
||||
static void
|
||||
store_query_result_reg(struct brw_context *brw, drm_intel_bo *bo,
|
||||
store_query_result_reg(struct brw_context *brw, drm_bacon_bo *bo,
|
||||
uint32_t offset, GLenum ptype, uint32_t reg,
|
||||
const bool pipelined)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
#include "intel_batchbuffer.h"
|
||||
#include "intel_buffer_objects.h"
|
||||
#include "intel_bufmgr.h"
|
||||
#include "brw_bufmgr.h"
|
||||
#include "intel_buffers.h"
|
||||
#include "intel_fbo.h"
|
||||
#include "brw_context.h"
|
||||
|
|
@ -40,7 +40,7 @@
|
|||
|
||||
static void
|
||||
intel_batchbuffer_reset(struct intel_batchbuffer *batch,
|
||||
drm_intel_bufmgr *bufmgr,
|
||||
drm_bacon_bufmgr *bufmgr,
|
||||
bool has_llc);
|
||||
|
||||
static bool
|
||||
|
|
@ -57,7 +57,7 @@ uint_key_hash(const void *key)
|
|||
|
||||
void
|
||||
intel_batchbuffer_init(struct intel_batchbuffer *batch,
|
||||
drm_intel_bufmgr *bufmgr,
|
||||
drm_bacon_bufmgr *bufmgr,
|
||||
bool has_llc)
|
||||
{
|
||||
intel_batchbuffer_reset(batch, bufmgr, has_llc);
|
||||
|
|
@ -76,18 +76,18 @@ intel_batchbuffer_init(struct intel_batchbuffer *batch,
|
|||
|
||||
static void
|
||||
intel_batchbuffer_reset(struct intel_batchbuffer *batch,
|
||||
drm_intel_bufmgr *bufmgr,
|
||||
drm_bacon_bufmgr *bufmgr,
|
||||
bool has_llc)
|
||||
{
|
||||
if (batch->last_bo != NULL) {
|
||||
drm_intel_bo_unreference(batch->last_bo);
|
||||
drm_bacon_bo_unreference(batch->last_bo);
|
||||
batch->last_bo = NULL;
|
||||
}
|
||||
batch->last_bo = batch->bo;
|
||||
|
||||
batch->bo = drm_intel_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
|
||||
batch->bo = drm_bacon_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
|
||||
if (has_llc) {
|
||||
drm_intel_bo_map(batch->bo, true);
|
||||
drm_bacon_bo_map(batch->bo, true);
|
||||
batch->map = batch->bo->virtual;
|
||||
}
|
||||
batch->map_next = batch->map;
|
||||
|
|
@ -118,13 +118,13 @@ intel_batchbuffer_save_state(struct brw_context *brw)
|
|||
{
|
||||
brw->batch.saved.map_next = brw->batch.map_next;
|
||||
brw->batch.saved.reloc_count =
|
||||
drm_intel_gem_bo_get_reloc_count(brw->batch.bo);
|
||||
drm_bacon_gem_bo_get_reloc_count(brw->batch.bo);
|
||||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_reset_to_saved(struct brw_context *brw)
|
||||
{
|
||||
drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
|
||||
drm_bacon_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
|
||||
|
||||
brw->batch.map_next = brw->batch.saved.map_next;
|
||||
if (USED_BATCH(brw->batch) == 0)
|
||||
|
|
@ -135,8 +135,8 @@ void
|
|||
intel_batchbuffer_free(struct intel_batchbuffer *batch)
|
||||
{
|
||||
free(batch->cpu_map);
|
||||
drm_intel_bo_unreference(batch->last_bo);
|
||||
drm_intel_bo_unreference(batch->bo);
|
||||
drm_bacon_bo_unreference(batch->last_bo);
|
||||
drm_bacon_bo_unreference(batch->bo);
|
||||
if (batch->state_batch_sizes)
|
||||
_mesa_hash_table_destroy(batch->state_batch_sizes, NULL);
|
||||
}
|
||||
|
|
@ -210,7 +210,7 @@ do_batch_dump(struct brw_context *brw)
|
|||
if (batch->ring != RENDER_RING)
|
||||
return;
|
||||
|
||||
int ret = drm_intel_bo_map(batch->bo, false);
|
||||
int ret = drm_bacon_bo_map(batch->bo, false);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr,
|
||||
"WARNING: failed to map batchbuffer (%s), "
|
||||
|
|
@ -321,7 +321,7 @@ do_batch_dump(struct brw_context *brw)
|
|||
}
|
||||
|
||||
if (ret == 0) {
|
||||
drm_intel_bo_unmap(batch->bo);
|
||||
drm_bacon_bo_unmap(batch->bo);
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
|
@ -335,7 +335,7 @@ static void
|
|||
brw_new_batch(struct brw_context *brw)
|
||||
{
|
||||
/* Create a new batchbuffer and reset the associated state: */
|
||||
drm_intel_gem_bo_clear_relocs(brw->batch.bo, 0);
|
||||
drm_bacon_gem_bo_clear_relocs(brw->batch.bo, 0);
|
||||
intel_batchbuffer_reset_and_clear_render_cache(brw);
|
||||
|
||||
/* If the kernel supports hardware contexts, then most hardware state is
|
||||
|
|
@ -435,8 +435,8 @@ throttle(struct brw_context *brw)
|
|||
if (brw->need_swap_throttle && brw->throttle_batch[0]) {
|
||||
if (brw->throttle_batch[1]) {
|
||||
if (!brw->disable_throttling)
|
||||
drm_intel_bo_wait_rendering(brw->throttle_batch[1]);
|
||||
drm_intel_bo_unreference(brw->throttle_batch[1]);
|
||||
drm_bacon_bo_wait_rendering(brw->throttle_batch[1]);
|
||||
drm_bacon_bo_unreference(brw->throttle_batch[1]);
|
||||
}
|
||||
brw->throttle_batch[1] = brw->throttle_batch[0];
|
||||
brw->throttle_batch[0] = NULL;
|
||||
|
|
@ -461,11 +461,11 @@ do_flush_locked(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
|
|||
int ret = 0;
|
||||
|
||||
if (brw->has_llc) {
|
||||
drm_intel_bo_unmap(batch->bo);
|
||||
drm_bacon_bo_unmap(batch->bo);
|
||||
} else {
|
||||
ret = drm_intel_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
|
||||
ret = drm_bacon_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
|
||||
if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
|
||||
ret = drm_intel_bo_subdata(batch->bo,
|
||||
ret = drm_bacon_bo_subdata(batch->bo,
|
||||
batch->state_batch_offset,
|
||||
batch->bo->size - batch->state_batch_offset,
|
||||
(char *)batch->map + batch->state_batch_offset);
|
||||
|
|
@ -487,10 +487,10 @@ do_flush_locked(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
|
|||
if (brw->hw_ctx == NULL || batch->ring != RENDER_RING) {
|
||||
assert(in_fence_fd == -1);
|
||||
assert(out_fence_fd == NULL);
|
||||
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * USED_BATCH(*batch),
|
||||
ret = drm_bacon_bo_mrb_exec(batch->bo, 4 * USED_BATCH(*batch),
|
||||
NULL, 0, 0, flags);
|
||||
} else {
|
||||
ret = drm_intel_gem_bo_fence_exec(batch->bo, brw->hw_ctx,
|
||||
ret = drm_bacon_gem_bo_fence_exec(batch->bo, brw->hw_ctx,
|
||||
4 * USED_BATCH(*batch),
|
||||
in_fence_fd, out_fence_fd,
|
||||
flags);
|
||||
|
|
@ -533,7 +533,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
|
|||
|
||||
if (brw->throttle_batch[0] == NULL) {
|
||||
brw->throttle_batch[0] = brw->batch.bo;
|
||||
drm_intel_bo_reference(brw->throttle_batch[0]);
|
||||
drm_bacon_bo_reference(brw->throttle_batch[0]);
|
||||
}
|
||||
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
|
||||
|
|
@ -567,7 +567,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
|
|||
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
|
||||
fprintf(stderr, "waiting for idle\n");
|
||||
drm_intel_bo_wait_rendering(brw->batch.bo);
|
||||
drm_bacon_bo_wait_rendering(brw->batch.bo);
|
||||
}
|
||||
|
||||
/* Start a new batch buffer. */
|
||||
|
|
@ -581,13 +581,13 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
|
|||
*/
|
||||
uint64_t
|
||||
intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
|
||||
drm_intel_bo *buffer, uint32_t offset,
|
||||
drm_bacon_bo *buffer, uint32_t offset,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_intel_bo_emit_reloc(batch->bo, offset,
|
||||
ret = drm_bacon_bo_emit_reloc(batch->bo, offset,
|
||||
buffer, delta,
|
||||
read_domains, write_domain);
|
||||
assert(ret == 0);
|
||||
|
|
@ -613,7 +613,7 @@ intel_batchbuffer_data(struct brw_context *brw,
|
|||
static void
|
||||
load_sized_register_mem(struct brw_context *brw,
|
||||
uint32_t reg,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t offset,
|
||||
int size)
|
||||
|
|
@ -645,7 +645,7 @@ load_sized_register_mem(struct brw_context *brw,
|
|||
void
|
||||
brw_load_register_mem(struct brw_context *brw,
|
||||
uint32_t reg,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t offset)
|
||||
{
|
||||
|
|
@ -655,7 +655,7 @@ brw_load_register_mem(struct brw_context *brw,
|
|||
void
|
||||
brw_load_register_mem64(struct brw_context *brw,
|
||||
uint32_t reg,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t offset)
|
||||
{
|
||||
|
|
@ -667,7 +667,7 @@ brw_load_register_mem64(struct brw_context *brw,
|
|||
*/
|
||||
void
|
||||
brw_store_register_mem32(struct brw_context *brw,
|
||||
drm_intel_bo *bo, uint32_t reg, uint32_t offset)
|
||||
drm_bacon_bo *bo, uint32_t reg, uint32_t offset)
|
||||
{
|
||||
assert(brw->gen >= 6);
|
||||
|
||||
|
|
@ -693,7 +693,7 @@ brw_store_register_mem32(struct brw_context *brw,
|
|||
*/
|
||||
void
|
||||
brw_store_register_mem64(struct brw_context *brw,
|
||||
drm_intel_bo *bo, uint32_t reg, uint32_t offset)
|
||||
drm_bacon_bo *bo, uint32_t reg, uint32_t offset)
|
||||
{
|
||||
assert(brw->gen >= 6);
|
||||
|
||||
|
|
@ -794,7 +794,7 @@ brw_load_register_reg64(struct brw_context *brw, uint32_t src, uint32_t dest)
|
|||
* Write 32-bits of immediate data to a GPU memory buffer.
|
||||
*/
|
||||
void
|
||||
brw_store_data_imm32(struct brw_context *brw, drm_intel_bo *bo,
|
||||
brw_store_data_imm32(struct brw_context *brw, drm_bacon_bo *bo,
|
||||
uint32_t offset, uint32_t imm)
|
||||
{
|
||||
assert(brw->gen >= 6);
|
||||
|
|
@ -817,7 +817,7 @@ brw_store_data_imm32(struct brw_context *brw, drm_intel_bo *bo,
|
|||
* Write 64-bits of immediate data to a GPU memory buffer.
|
||||
*/
|
||||
void
|
||||
brw_store_data_imm64(struct brw_context *brw, drm_intel_bo *bo,
|
||||
brw_store_data_imm64(struct brw_context *brw, drm_bacon_bo *bo,
|
||||
uint32_t offset, uint64_t imm)
|
||||
{
|
||||
assert(brw->gen >= 6);
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
#include "main/mtypes.h"
|
||||
|
||||
#include "brw_context.h"
|
||||
#include "intel_bufmgr.h"
|
||||
#include "brw_bufmgr.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
|
@ -39,7 +39,7 @@ extern "C" {
|
|||
struct intel_batchbuffer;
|
||||
|
||||
void intel_batchbuffer_init(struct intel_batchbuffer *batch,
|
||||
drm_intel_bufmgr *bufmgr,
|
||||
drm_bacon_bufmgr *bufmgr,
|
||||
bool has_llc);
|
||||
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
|
||||
void intel_batchbuffer_save_state(struct brw_context *brw);
|
||||
|
|
@ -66,7 +66,7 @@ void intel_batchbuffer_data(struct brw_context *brw,
|
|||
enum brw_gpu_ring ring);
|
||||
|
||||
uint64_t intel_batchbuffer_reloc(struct intel_batchbuffer *batch,
|
||||
drm_intel_bo *buffer,
|
||||
drm_bacon_bo *buffer,
|
||||
uint32_t offset,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain,
|
||||
|
|
|
|||
|
|
@ -486,11 +486,11 @@ bool
|
|||
intelEmitCopyBlit(struct brw_context *brw,
|
||||
GLuint cpp,
|
||||
int32_t src_pitch,
|
||||
drm_intel_bo *src_buffer,
|
||||
drm_bacon_bo *src_buffer,
|
||||
GLuint src_offset,
|
||||
uint32_t src_tiling,
|
||||
int32_t dst_pitch,
|
||||
drm_intel_bo *dst_buffer,
|
||||
drm_bacon_bo *dst_buffer,
|
||||
GLuint dst_offset,
|
||||
uint32_t dst_tiling,
|
||||
GLshort src_x, GLshort src_y,
|
||||
|
|
@ -501,7 +501,7 @@ intelEmitCopyBlit(struct brw_context *brw,
|
|||
GLuint CMD, BR13, pass = 0;
|
||||
int dst_y2 = dst_y + h;
|
||||
int dst_x2 = dst_x + w;
|
||||
drm_intel_bo *aper_array[3];
|
||||
drm_bacon_bo *aper_array[3];
|
||||
bool dst_y_tiled = dst_tiling == I915_TILING_Y;
|
||||
bool src_y_tiled = src_tiling == I915_TILING_Y;
|
||||
uint32_t src_tile_w, src_tile_h;
|
||||
|
|
@ -516,7 +516,7 @@ intelEmitCopyBlit(struct brw_context *brw,
|
|||
aper_array[1] = dst_buffer;
|
||||
aper_array[2] = src_buffer;
|
||||
|
||||
if (drm_intel_bufmgr_check_aperture_space(aper_array, 3) != 0) {
|
||||
if (drm_bacon_bufmgr_check_aperture_space(aper_array, 3) != 0) {
|
||||
intel_batchbuffer_flush(brw);
|
||||
pass++;
|
||||
} else
|
||||
|
|
@ -633,7 +633,7 @@ intelEmitImmediateColorExpandBlit(struct brw_context *brw,
|
|||
GLubyte *src_bits, GLuint src_size,
|
||||
GLuint fg_color,
|
||||
GLshort dst_pitch,
|
||||
drm_intel_bo *dst_buffer,
|
||||
drm_bacon_bo *dst_buffer,
|
||||
GLuint dst_offset,
|
||||
uint32_t dst_tiling,
|
||||
GLshort x, GLshort y,
|
||||
|
|
@ -717,9 +717,9 @@ intelEmitImmediateColorExpandBlit(struct brw_context *brw,
|
|||
*/
|
||||
void
|
||||
intel_emit_linear_blit(struct brw_context *brw,
|
||||
drm_intel_bo *dst_bo,
|
||||
drm_bacon_bo *dst_bo,
|
||||
unsigned int dst_offset,
|
||||
drm_intel_bo *src_bo,
|
||||
drm_bacon_bo *src_bo,
|
||||
unsigned int src_offset,
|
||||
unsigned int size)
|
||||
{
|
||||
|
|
@ -780,7 +780,7 @@ intel_miptree_set_alpha_to_one(struct brw_context *brw,
|
|||
{
|
||||
uint32_t BR13, CMD;
|
||||
int pitch, cpp;
|
||||
drm_intel_bo *aper_array[2];
|
||||
drm_bacon_bo *aper_array[2];
|
||||
|
||||
pitch = mt->pitch;
|
||||
cpp = mt->cpp;
|
||||
|
|
@ -802,7 +802,7 @@ intel_miptree_set_alpha_to_one(struct brw_context *brw,
|
|||
aper_array[0] = brw->batch.bo;
|
||||
aper_array[1] = mt->bo;
|
||||
|
||||
if (drm_intel_bufmgr_check_aperture_space(aper_array,
|
||||
if (drm_bacon_bufmgr_check_aperture_space(aper_array,
|
||||
ARRAY_SIZE(aper_array)) != 0) {
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,11 +32,11 @@ bool
|
|||
intelEmitCopyBlit(struct brw_context *brw,
|
||||
GLuint cpp,
|
||||
int32_t src_pitch,
|
||||
drm_intel_bo *src_buffer,
|
||||
drm_bacon_bo *src_buffer,
|
||||
GLuint src_offset,
|
||||
uint32_t src_tiling,
|
||||
int32_t dst_pitch,
|
||||
drm_intel_bo *dst_buffer,
|
||||
drm_bacon_bo *dst_buffer,
|
||||
GLuint dst_offset,
|
||||
uint32_t dst_tiling,
|
||||
GLshort srcx, GLshort srcy,
|
||||
|
|
@ -71,16 +71,16 @@ intelEmitImmediateColorExpandBlit(struct brw_context *brw,
|
|||
GLubyte *src_bits, GLuint src_size,
|
||||
GLuint fg_color,
|
||||
GLshort dst_pitch,
|
||||
drm_intel_bo *dst_buffer,
|
||||
drm_bacon_bo *dst_buffer,
|
||||
GLuint dst_offset,
|
||||
uint32_t dst_tiling,
|
||||
GLshort x, GLshort y,
|
||||
GLshort w, GLshort h,
|
||||
GLenum logic_op);
|
||||
void intel_emit_linear_blit(struct brw_context *brw,
|
||||
drm_intel_bo *dst_bo,
|
||||
drm_bacon_bo *dst_bo,
|
||||
unsigned int dst_offset,
|
||||
drm_intel_bo *src_bo,
|
||||
drm_bacon_bo *src_bo,
|
||||
unsigned int src_offset,
|
||||
unsigned int size);
|
||||
|
||||
|
|
|
|||
|
|
@ -42,20 +42,20 @@
|
|||
/**
|
||||
* Map a buffer object; issue performance warnings if mapping causes stalls.
|
||||
*
|
||||
* This matches the drm_intel_bo_map API, but takes an additional human-readable
|
||||
* This matches the drm_bacon_bo_map API, but takes an additional human-readable
|
||||
* name for the buffer object to use in the performance debug message.
|
||||
*/
|
||||
int
|
||||
brw_bo_map(struct brw_context *brw,
|
||||
drm_intel_bo *bo, int write_enable,
|
||||
drm_bacon_bo *bo, int write_enable,
|
||||
const char *bo_name)
|
||||
{
|
||||
if (likely(!brw->perf_debug) || !drm_intel_bo_busy(bo))
|
||||
return drm_intel_bo_map(bo, write_enable);
|
||||
if (likely(!brw->perf_debug) || !drm_bacon_bo_busy(bo))
|
||||
return drm_bacon_bo_map(bo, write_enable);
|
||||
|
||||
double start_time = get_time();
|
||||
|
||||
int ret = drm_intel_bo_map(bo, write_enable);
|
||||
int ret = drm_bacon_bo_map(bo, write_enable);
|
||||
|
||||
perf_debug("CPU mapping a busy %s BO stalled and took %.03f ms.\n",
|
||||
bo_name, (get_time() - start_time) * 1000);
|
||||
|
|
@ -64,14 +64,14 @@ brw_bo_map(struct brw_context *brw,
|
|||
}
|
||||
|
||||
int
|
||||
brw_bo_map_gtt(struct brw_context *brw, drm_intel_bo *bo, const char *bo_name)
|
||||
brw_bo_map_gtt(struct brw_context *brw, drm_bacon_bo *bo, const char *bo_name)
|
||||
{
|
||||
if (likely(!brw->perf_debug) || !drm_intel_bo_busy(bo))
|
||||
return drm_intel_gem_bo_map_gtt(bo);
|
||||
if (likely(!brw->perf_debug) || !drm_bacon_bo_busy(bo))
|
||||
return drm_bacon_gem_bo_map_gtt(bo);
|
||||
|
||||
double start_time = get_time();
|
||||
|
||||
int ret = drm_intel_gem_bo_map_gtt(bo);
|
||||
int ret = drm_bacon_gem_bo_map_gtt(bo);
|
||||
|
||||
perf_debug("GTT mapping a busy %s BO stalled and took %.03f ms.\n",
|
||||
bo_name, (get_time() - start_time) * 1000);
|
||||
|
|
@ -94,12 +94,12 @@ mark_buffer_inactive(struct intel_buffer_object *intel_obj)
|
|||
intel_obj->gpu_active_end = 0;
|
||||
}
|
||||
|
||||
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
|
||||
/** Allocates a new drm_bacon_bo to store the data for the buffer object. */
|
||||
static void
|
||||
alloc_buffer_object(struct brw_context *brw,
|
||||
struct intel_buffer_object *intel_obj)
|
||||
{
|
||||
intel_obj->buffer = drm_intel_bo_alloc(brw->bufmgr, "bufferobj",
|
||||
intel_obj->buffer = drm_bacon_bo_alloc(brw->bufmgr, "bufferobj",
|
||||
intel_obj->Base.Size, 64);
|
||||
|
||||
/* the buffer might be bound as a uniform buffer, need to update it
|
||||
|
|
@ -119,7 +119,7 @@ alloc_buffer_object(struct brw_context *brw,
|
|||
static void
|
||||
release_buffer(struct intel_buffer_object *intel_obj)
|
||||
{
|
||||
drm_intel_bo_unreference(intel_obj->buffer);
|
||||
drm_bacon_bo_unreference(intel_obj->buffer);
|
||||
intel_obj->buffer = NULL;
|
||||
}
|
||||
|
||||
|
|
@ -166,7 +166,7 @@ brw_delete_buffer(struct gl_context * ctx, struct gl_buffer_object *obj)
|
|||
*/
|
||||
_mesa_buffer_unmap_all_mappings(ctx, obj);
|
||||
|
||||
drm_intel_bo_unreference(intel_obj->buffer);
|
||||
drm_bacon_bo_unreference(intel_obj->buffer);
|
||||
_mesa_delete_buffer_object(ctx, obj);
|
||||
}
|
||||
|
||||
|
|
@ -213,7 +213,7 @@ brw_buffer_data(struct gl_context *ctx,
|
|||
return false;
|
||||
|
||||
if (data != NULL)
|
||||
drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
|
||||
drm_bacon_bo_subdata(intel_obj->buffer, 0, size, data);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
@ -257,9 +257,9 @@ brw_buffer_subdata(struct gl_context *ctx,
|
|||
if (offset + size <= intel_obj->gpu_active_start ||
|
||||
intel_obj->gpu_active_end <= offset) {
|
||||
if (brw->has_llc) {
|
||||
drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
|
||||
drm_bacon_gem_bo_map_unsynchronized(intel_obj->buffer);
|
||||
memcpy(intel_obj->buffer->virtual + offset, data, size);
|
||||
drm_intel_bo_unmap(intel_obj->buffer);
|
||||
drm_bacon_bo_unmap(intel_obj->buffer);
|
||||
|
||||
if (intel_obj->gpu_active_end > intel_obj->gpu_active_start)
|
||||
intel_obj->prefer_stall_to_blit = true;
|
||||
|
|
@ -270,13 +270,13 @@ brw_buffer_subdata(struct gl_context *ctx,
|
|||
}
|
||||
|
||||
busy =
|
||||
drm_intel_bo_busy(intel_obj->buffer) ||
|
||||
drm_intel_bo_references(brw->batch.bo, intel_obj->buffer);
|
||||
drm_bacon_bo_busy(intel_obj->buffer) ||
|
||||
drm_bacon_bo_references(brw->batch.bo, intel_obj->buffer);
|
||||
|
||||
if (busy) {
|
||||
if (size == intel_obj->Base.Size) {
|
||||
/* Replace the current busy bo so the subdata doesn't stall. */
|
||||
drm_intel_bo_unreference(intel_obj->buffer);
|
||||
drm_bacon_bo_unreference(intel_obj->buffer);
|
||||
alloc_buffer_object(brw, intel_obj);
|
||||
} else if (!intel_obj->prefer_stall_to_blit) {
|
||||
perf_debug("Using a blit copy to avoid stalling on "
|
||||
|
|
@ -285,17 +285,17 @@ brw_buffer_subdata(struct gl_context *ctx,
|
|||
(long)offset, (long)offset + size, (long)(size/1024),
|
||||
intel_obj->gpu_active_start,
|
||||
intel_obj->gpu_active_end);
|
||||
drm_intel_bo *temp_bo =
|
||||
drm_intel_bo_alloc(brw->bufmgr, "subdata temp", size, 64);
|
||||
drm_bacon_bo *temp_bo =
|
||||
drm_bacon_bo_alloc(brw->bufmgr, "subdata temp", size, 64);
|
||||
|
||||
drm_intel_bo_subdata(temp_bo, 0, size, data);
|
||||
drm_bacon_bo_subdata(temp_bo, 0, size, data);
|
||||
|
||||
intel_emit_linear_blit(brw,
|
||||
intel_obj->buffer, offset,
|
||||
temp_bo, 0,
|
||||
size);
|
||||
|
||||
drm_intel_bo_unreference(temp_bo);
|
||||
drm_bacon_bo_unreference(temp_bo);
|
||||
return;
|
||||
} else {
|
||||
perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
|
||||
|
|
@ -308,7 +308,7 @@ brw_buffer_subdata(struct gl_context *ctx,
|
|||
}
|
||||
}
|
||||
|
||||
drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
|
||||
drm_bacon_bo_subdata(intel_obj->buffer, offset, size, data);
|
||||
mark_buffer_inactive(intel_obj);
|
||||
}
|
||||
|
||||
|
|
@ -330,10 +330,10 @@ brw_get_buffer_subdata(struct gl_context *ctx,
|
|||
struct brw_context *brw = brw_context(ctx);
|
||||
|
||||
assert(intel_obj);
|
||||
if (drm_intel_bo_references(brw->batch.bo, intel_obj->buffer)) {
|
||||
if (drm_bacon_bo_references(brw->batch.bo, intel_obj->buffer)) {
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
|
||||
drm_bacon_bo_get_subdata(intel_obj->buffer, offset, size, data);
|
||||
|
||||
mark_buffer_inactive(intel_obj);
|
||||
}
|
||||
|
|
@ -389,18 +389,18 @@ brw_map_buffer_range(struct gl_context *ctx,
|
|||
* achieve the required synchronization.
|
||||
*/
|
||||
if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
|
||||
if (drm_intel_bo_references(brw->batch.bo, intel_obj->buffer)) {
|
||||
if (drm_bacon_bo_references(brw->batch.bo, intel_obj->buffer)) {
|
||||
if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
|
||||
drm_intel_bo_unreference(intel_obj->buffer);
|
||||
drm_bacon_bo_unreference(intel_obj->buffer);
|
||||
alloc_buffer_object(brw, intel_obj);
|
||||
} else {
|
||||
perf_debug("Stalling on the GPU for mapping a busy buffer "
|
||||
"object\n");
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
} else if (drm_intel_bo_busy(intel_obj->buffer) &&
|
||||
} else if (drm_bacon_bo_busy(intel_obj->buffer) &&
|
||||
(access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
|
||||
drm_intel_bo_unreference(intel_obj->buffer);
|
||||
drm_bacon_bo_unreference(intel_obj->buffer);
|
||||
alloc_buffer_object(brw, intel_obj);
|
||||
}
|
||||
}
|
||||
|
|
@ -416,14 +416,14 @@ brw_map_buffer_range(struct gl_context *ctx,
|
|||
*/
|
||||
if (!(access & (GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_PERSISTENT_BIT)) &&
|
||||
(access & GL_MAP_INVALIDATE_RANGE_BIT) &&
|
||||
drm_intel_bo_busy(intel_obj->buffer)) {
|
||||
drm_bacon_bo_busy(intel_obj->buffer)) {
|
||||
/* Ensure that the base alignment of the allocation meets the alignment
|
||||
* guarantees the driver has advertised to the application.
|
||||
*/
|
||||
const unsigned alignment = ctx->Const.MinMapBufferAlignment;
|
||||
|
||||
intel_obj->map_extra[index] = (uintptr_t) offset % alignment;
|
||||
intel_obj->range_map_bo[index] = drm_intel_bo_alloc(brw->bufmgr,
|
||||
intel_obj->range_map_bo[index] = drm_bacon_bo_alloc(brw->bufmgr,
|
||||
"BO blit temp",
|
||||
length +
|
||||
intel_obj->map_extra[index],
|
||||
|
|
@ -432,7 +432,7 @@ brw_map_buffer_range(struct gl_context *ctx,
|
|||
brw_bo_map(brw, intel_obj->range_map_bo[index],
|
||||
(access & GL_MAP_WRITE_BIT) != 0, "range-map");
|
||||
} else {
|
||||
drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
|
||||
drm_bacon_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
|
||||
}
|
||||
obj->Mappings[index].Pointer =
|
||||
intel_obj->range_map_bo[index]->virtual + intel_obj->map_extra[index];
|
||||
|
|
@ -441,13 +441,13 @@ brw_map_buffer_range(struct gl_context *ctx,
|
|||
|
||||
if (access & GL_MAP_UNSYNCHRONIZED_BIT) {
|
||||
if (!brw->has_llc && brw->perf_debug &&
|
||||
drm_intel_bo_busy(intel_obj->buffer)) {
|
||||
drm_bacon_bo_busy(intel_obj->buffer)) {
|
||||
perf_debug("MapBufferRange with GL_MAP_UNSYNCHRONIZED_BIT stalling (it's actually synchronized on non-LLC platforms)\n");
|
||||
}
|
||||
drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
|
||||
drm_bacon_gem_bo_map_unsynchronized(intel_obj->buffer);
|
||||
} else if (!brw->has_llc && (!(access & GL_MAP_READ_BIT) ||
|
||||
(access & GL_MAP_PERSISTENT_BIT))) {
|
||||
drm_intel_gem_bo_map_gtt(intel_obj->buffer);
|
||||
drm_bacon_gem_bo_map_gtt(intel_obj->buffer);
|
||||
mark_buffer_inactive(intel_obj);
|
||||
} else {
|
||||
brw_bo_map(brw, intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0,
|
||||
|
|
@ -543,7 +543,7 @@ brw_unmap_buffer(struct gl_context *ctx,
|
|||
assert(intel_obj);
|
||||
assert(obj->Mappings[index].Pointer);
|
||||
if (intel_obj->range_map_bo[index] != NULL) {
|
||||
drm_intel_bo_unmap(intel_obj->range_map_bo[index]);
|
||||
drm_bacon_bo_unmap(intel_obj->range_map_bo[index]);
|
||||
|
||||
if (!(obj->Mappings[index].AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT)) {
|
||||
intel_emit_linear_blit(brw,
|
||||
|
|
@ -562,10 +562,10 @@ brw_unmap_buffer(struct gl_context *ctx,
|
|||
*/
|
||||
brw_emit_mi_flush(brw);
|
||||
|
||||
drm_intel_bo_unreference(intel_obj->range_map_bo[index]);
|
||||
drm_bacon_bo_unreference(intel_obj->range_map_bo[index]);
|
||||
intel_obj->range_map_bo[index] = NULL;
|
||||
} else if (intel_obj->buffer != NULL) {
|
||||
drm_intel_bo_unmap(intel_obj->buffer);
|
||||
drm_bacon_bo_unmap(intel_obj->buffer);
|
||||
}
|
||||
obj->Mappings[index].Pointer = NULL;
|
||||
obj->Mappings[index].Offset = 0;
|
||||
|
|
@ -581,7 +581,7 @@ brw_unmap_buffer(struct gl_context *ctx,
|
|||
* Anywhere that uses buffer objects in the pipeline should be using this to
|
||||
* mark the range of the buffer that is being accessed by the pipeline.
|
||||
*/
|
||||
drm_intel_bo *
|
||||
drm_bacon_bo *
|
||||
intel_bufferobj_buffer(struct brw_context *brw,
|
||||
struct intel_buffer_object *intel_obj,
|
||||
uint32_t offset, uint32_t size)
|
||||
|
|
@ -615,7 +615,7 @@ brw_copy_buffer_subdata(struct gl_context *ctx,
|
|||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_buffer_object *intel_src = intel_buffer_object(src);
|
||||
struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
|
||||
drm_intel_bo *src_bo, *dst_bo;
|
||||
drm_bacon_bo *src_bo, *dst_bo;
|
||||
|
||||
if (size == 0)
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -38,9 +38,9 @@ struct gl_buffer_object;
|
|||
struct intel_buffer_object
|
||||
{
|
||||
struct gl_buffer_object Base;
|
||||
drm_intel_bo *buffer; /* the low-level buffer manager's buffer handle */
|
||||
drm_bacon_bo *buffer; /* the low-level buffer manager's buffer handle */
|
||||
|
||||
drm_intel_bo *range_map_bo[MAP_COUNT];
|
||||
drm_bacon_bo *range_map_bo[MAP_COUNT];
|
||||
|
||||
/**
|
||||
* Alignment offset from the range_map_bo temporary mapping to the returned
|
||||
|
|
@ -82,7 +82,7 @@ struct intel_buffer_object
|
|||
|
||||
/* Get the bm buffer associated with a GL bufferobject:
|
||||
*/
|
||||
drm_intel_bo *intel_bufferobj_buffer(struct brw_context *brw,
|
||||
drm_bacon_bo *intel_bufferobj_buffer(struct brw_context *brw,
|
||||
struct intel_buffer_object *obj,
|
||||
uint32_t offset,
|
||||
uint32_t size);
|
||||
|
|
@ -91,13 +91,13 @@ void intel_upload_data(struct brw_context *brw,
|
|||
const void *data,
|
||||
uint32_t size,
|
||||
uint32_t alignment,
|
||||
drm_intel_bo **out_bo,
|
||||
drm_bacon_bo **out_bo,
|
||||
uint32_t *out_offset);
|
||||
|
||||
void *intel_upload_space(struct brw_context *brw,
|
||||
uint32_t size,
|
||||
uint32_t alignment,
|
||||
drm_intel_bo **out_bo,
|
||||
drm_bacon_bo **out_bo,
|
||||
uint32_t *out_offset);
|
||||
|
||||
void intel_upload_finish(struct brw_context *brw);
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@
|
|||
#include <drm.h>
|
||||
#include <i915_drm.h>
|
||||
#include "libdrm_macros.h"
|
||||
#include "intel_bufmgr.h"
|
||||
#include "brw_bufmgr.h"
|
||||
#include "intel_bufmgr_priv.h"
|
||||
#include "xf86drm.h"
|
||||
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@
|
|||
#endif
|
||||
#include "libdrm_macros.h"
|
||||
#include "libdrm_lists.h"
|
||||
#include "intel_bufmgr.h"
|
||||
#include "brw_bufmgr.h"
|
||||
#include "intel_bufmgr_priv.h"
|
||||
#include "intel_chipset.h"
|
||||
#include "string.h"
|
||||
|
|
|
|||
|
|
@ -1058,7 +1058,7 @@ brw_render_cache_set_clear(struct brw_context *brw)
|
|||
}
|
||||
|
||||
void
|
||||
brw_render_cache_set_add_bo(struct brw_context *brw, drm_intel_bo *bo)
|
||||
brw_render_cache_set_add_bo(struct brw_context *brw, drm_bacon_bo *bo)
|
||||
{
|
||||
_mesa_set_add(brw->render_cache, bo);
|
||||
}
|
||||
|
|
@ -1076,7 +1076,7 @@ brw_render_cache_set_add_bo(struct brw_context *brw, drm_intel_bo *bo)
|
|||
* different caches within a batchbuffer, it's all our responsibility.
|
||||
*/
|
||||
void
|
||||
brw_render_cache_set_check_flush(struct brw_context *brw, drm_intel_bo *bo)
|
||||
brw_render_cache_set_check_flush(struct brw_context *brw, drm_bacon_bo *bo)
|
||||
{
|
||||
if (!_mesa_set_search(brw->render_cache, bo))
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -236,8 +236,8 @@ intel_renderbuffer_upsample(struct brw_context *brw,
|
|||
struct intel_renderbuffer *irb);
|
||||
|
||||
void brw_render_cache_set_clear(struct brw_context *brw);
|
||||
void brw_render_cache_set_add_bo(struct brw_context *brw, drm_intel_bo *bo);
|
||||
void brw_render_cache_set_check_flush(struct brw_context *brw, drm_intel_bo *bo);
|
||||
void brw_render_cache_set_add_bo(struct brw_context *brw, drm_bacon_bo *bo);
|
||||
void brw_render_cache_set_check_flush(struct brw_context *brw, drm_bacon_bo *bo);
|
||||
|
||||
unsigned
|
||||
intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples);
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@
|
|||
#include <xf86drm.h>
|
||||
|
||||
#include "main/mtypes.h"
|
||||
#include "intel_bufmgr.h"
|
||||
#include "brw_bufmgr.h"
|
||||
#include <GL/internal/dri_interface.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
@ -66,7 +66,7 @@ struct intel_image_format {
|
|||
|
||||
struct __DRIimageRec {
|
||||
struct intel_screen *screen;
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
uint32_t pitch; /**< in bytes */
|
||||
GLenum internal_format;
|
||||
uint32_t dri_format;
|
||||
|
|
|
|||
|
|
@ -616,13 +616,13 @@ miptree_create(struct brw_context *brw,
|
|||
|
||||
if (format == MESA_FORMAT_S_UINT8) {
|
||||
/* Align to size of W tile, 64x64. */
|
||||
mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
|
||||
mt->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "miptree",
|
||||
ALIGN(mt->total_width, 64),
|
||||
ALIGN(mt->total_height, 64),
|
||||
mt->cpp, &mt->tiling, &pitch,
|
||||
alloc_flags);
|
||||
} else {
|
||||
mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
|
||||
mt->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "miptree",
|
||||
mt->total_width, mt->total_height,
|
||||
mt->cpp, &mt->tiling, &pitch,
|
||||
alloc_flags);
|
||||
|
|
@ -665,8 +665,8 @@ intel_miptree_create(struct brw_context *brw,
|
|||
mt->total_width, mt->total_height);
|
||||
|
||||
mt->tiling = I915_TILING_X;
|
||||
drm_intel_bo_unreference(mt->bo);
|
||||
mt->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "miptree",
|
||||
drm_bacon_bo_unreference(mt->bo);
|
||||
mt->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "miptree",
|
||||
mt->total_width, mt->total_height, mt->cpp,
|
||||
&mt->tiling, &pitch, alloc_flags);
|
||||
mt->pitch = pitch;
|
||||
|
|
@ -719,7 +719,7 @@ intel_miptree_create(struct brw_context *brw,
|
|||
|
||||
struct intel_mipmap_tree *
|
||||
intel_miptree_create_for_bo(struct brw_context *brw,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
mesa_format format,
|
||||
uint32_t offset,
|
||||
uint32_t width,
|
||||
|
|
@ -732,7 +732,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
|
|||
uint32_t tiling, swizzle;
|
||||
GLenum target;
|
||||
|
||||
drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
|
||||
drm_bacon_bo_get_tiling(bo, &tiling, &swizzle);
|
||||
|
||||
/* Nothing will be able to use this miptree with the BO if the offset isn't
|
||||
* aligned.
|
||||
|
|
@ -761,7 +761,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
|
|||
if (!mt)
|
||||
return NULL;
|
||||
|
||||
drm_intel_bo_reference(bo);
|
||||
drm_bacon_bo_reference(bo);
|
||||
mt->bo = bo;
|
||||
mt->pitch = pitch;
|
||||
mt->offset = offset;
|
||||
|
|
@ -783,7 +783,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
|
|||
void
|
||||
intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
|
||||
struct intel_renderbuffer *irb,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t pitch)
|
||||
{
|
||||
|
|
@ -914,7 +914,7 @@ intel_miptree_hiz_buffer_free(struct intel_miptree_hiz_buffer *hiz_buf)
|
|||
if (hiz_buf->mt)
|
||||
intel_miptree_release(&hiz_buf->mt);
|
||||
else
|
||||
drm_intel_bo_unreference(hiz_buf->aux_base.bo);
|
||||
drm_bacon_bo_unreference(hiz_buf->aux_base.bo);
|
||||
|
||||
free(hiz_buf);
|
||||
}
|
||||
|
|
@ -931,12 +931,12 @@ intel_miptree_release(struct intel_mipmap_tree **mt)
|
|||
|
||||
DBG("%s deleting %p\n", __func__, *mt);
|
||||
|
||||
drm_intel_bo_unreference((*mt)->bo);
|
||||
drm_bacon_bo_unreference((*mt)->bo);
|
||||
intel_miptree_release(&(*mt)->stencil_mt);
|
||||
intel_miptree_release(&(*mt)->r8stencil_mt);
|
||||
intel_miptree_hiz_buffer_free((*mt)->hiz_buf);
|
||||
if ((*mt)->mcs_buf) {
|
||||
drm_intel_bo_unreference((*mt)->mcs_buf->bo);
|
||||
drm_bacon_bo_unreference((*mt)->mcs_buf->bo);
|
||||
free((*mt)->mcs_buf);
|
||||
}
|
||||
intel_resolve_map_clear(&(*mt)->hiz_map);
|
||||
|
|
@ -1389,13 +1389,13 @@ intel_miptree_init_mcs(struct brw_context *brw,
|
|||
const int ret = brw_bo_map_gtt(brw, mt->mcs_buf->bo, "miptree");
|
||||
if (unlikely(ret)) {
|
||||
fprintf(stderr, "Failed to map mcs buffer into GTT\n");
|
||||
drm_intel_bo_unreference(mt->mcs_buf->bo);
|
||||
drm_bacon_bo_unreference(mt->mcs_buf->bo);
|
||||
free(mt->mcs_buf);
|
||||
return;
|
||||
}
|
||||
void *data = mt->mcs_buf->bo->virtual;
|
||||
memset(data, init_value, mt->mcs_buf->size);
|
||||
drm_intel_bo_unmap(mt->mcs_buf->bo);
|
||||
drm_bacon_bo_unmap(mt->mcs_buf->bo);
|
||||
}
|
||||
|
||||
static struct intel_miptree_aux_buffer *
|
||||
|
|
@ -1442,7 +1442,7 @@ intel_mcs_miptree_buf_create(struct brw_context *brw,
|
|||
* structure should go away. We use miptree create simply as a means to make
|
||||
* sure all the constraints for the buffer are satisfied.
|
||||
*/
|
||||
drm_intel_bo_reference(temp_mt->bo);
|
||||
drm_bacon_bo_reference(temp_mt->bo);
|
||||
intel_miptree_release(&temp_mt);
|
||||
|
||||
return buf;
|
||||
|
|
@ -1551,7 +1551,7 @@ intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
|
|||
* Therefore one can pass the ISL dimensions in terms of bytes instead of
|
||||
* trying to recalculate based on different format block sizes.
|
||||
*/
|
||||
buf->bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "ccs-miptree",
|
||||
buf->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "ccs-miptree",
|
||||
buf->pitch, buf->size / buf->pitch,
|
||||
1, &tiling, &pitch, alloc_flags);
|
||||
if (buf->bo) {
|
||||
|
|
@ -1688,7 +1688,7 @@ intel_gen7_hiz_buf_create(struct brw_context *brw,
|
|||
|
||||
unsigned long pitch;
|
||||
uint32_t tiling = I915_TILING_Y;
|
||||
buf->aux_base.bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
|
||||
buf->aux_base.bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "hiz",
|
||||
hz_width, hz_height, 1,
|
||||
&tiling, &pitch,
|
||||
BO_ALLOC_FOR_RENDER);
|
||||
|
|
@ -1696,7 +1696,7 @@ intel_gen7_hiz_buf_create(struct brw_context *brw,
|
|||
free(buf);
|
||||
return NULL;
|
||||
} else if (tiling != I915_TILING_Y) {
|
||||
drm_intel_bo_unreference(buf->aux_base.bo);
|
||||
drm_bacon_bo_unreference(buf->aux_base.bo);
|
||||
free(buf);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -1785,7 +1785,7 @@ intel_gen8_hiz_buf_create(struct brw_context *brw,
|
|||
|
||||
unsigned long pitch;
|
||||
uint32_t tiling = I915_TILING_Y;
|
||||
buf->aux_base.bo = drm_intel_bo_alloc_tiled(brw->bufmgr, "hiz",
|
||||
buf->aux_base.bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "hiz",
|
||||
hz_width, hz_height, 1,
|
||||
&tiling, &pitch,
|
||||
BO_ALLOC_FOR_RENDER);
|
||||
|
|
@ -1793,7 +1793,7 @@ intel_gen8_hiz_buf_create(struct brw_context *brw,
|
|||
free(buf);
|
||||
return NULL;
|
||||
} else if (tiling != I915_TILING_Y) {
|
||||
drm_intel_bo_unreference(buf->aux_base.bo);
|
||||
drm_bacon_bo_unreference(buf->aux_base.bo);
|
||||
free(buf);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -2272,7 +2272,7 @@ intel_miptree_make_shareable(struct brw_context *brw,
|
|||
if (mt->mcs_buf) {
|
||||
intel_miptree_all_slices_resolve_color(brw, mt, 0);
|
||||
mt->aux_disable |= (INTEL_AUX_DISABLE_CCS | INTEL_AUX_DISABLE_MCS);
|
||||
drm_intel_bo_unreference(mt->mcs_buf->bo);
|
||||
drm_bacon_bo_unreference(mt->mcs_buf->bo);
|
||||
free(mt->mcs_buf);
|
||||
mt->mcs_buf = NULL;
|
||||
|
||||
|
|
@ -2455,9 +2455,9 @@ intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
|
|||
*/
|
||||
intel_miptree_all_slices_resolve_color(brw, mt, 0);
|
||||
|
||||
drm_intel_bo *bo = mt->bo;
|
||||
drm_bacon_bo *bo = mt->bo;
|
||||
|
||||
if (drm_intel_bo_references(brw->batch.bo, bo))
|
||||
if (drm_bacon_bo_references(brw->batch.bo, bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
/* brw_bo_map() uses a WB mmaping of the buffer's backing storage. It
|
||||
|
|
@ -2483,7 +2483,7 @@ intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
|
|||
static void
|
||||
intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
|
||||
{
|
||||
drm_intel_bo_unmap(mt->bo);
|
||||
drm_bacon_bo_unmap(mt->bo);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@
|
|||
* The hardware has a fixed layout of a texture depending on parameters such
|
||||
* as the target/type (2D, 3D, CUBE), width, height, pitch, and number of
|
||||
* mipmap levels. The individual level/layer slices are each 2D rectangles of
|
||||
* pixels at some x/y offset from the start of the drm_intel_bo.
|
||||
* pixels at some x/y offset from the start of the drm_bacon_bo.
|
||||
*
|
||||
* Original OpenGL allowed texture miplevels to be specified in arbitrary
|
||||
* order, and a texture may change size over time. Thus, each
|
||||
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#include "main/mtypes.h"
|
||||
#include "isl/isl.h"
|
||||
#include "intel_bufmgr.h"
|
||||
#include "brw_bufmgr.h"
|
||||
#include "intel_resolve_map.h"
|
||||
#include <GL/internal/dri_interface.h>
|
||||
|
||||
|
|
@ -279,7 +279,7 @@ struct intel_miptree_aux_buffer
|
|||
* @see RENDER_SURFACE_STATE.AuxiliarySurfaceBaseAddress
|
||||
* @see 3DSTATE_HIER_DEPTH_BUFFER.AuxiliarySurfaceBaseAddress
|
||||
*/
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
|
||||
/**
|
||||
* Offset into bo where the surface starts.
|
||||
|
|
@ -345,7 +345,7 @@ struct intel_mipmap_tree
|
|||
* @see 3DSTATE_HIER_DEPTH_BUFFER.SurfaceBaseAddress
|
||||
* @see 3DSTATE_STENCIL_BUFFER.SurfaceBaseAddress
|
||||
*/
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
|
||||
/**
|
||||
* Pitch in bytes.
|
||||
|
|
@ -698,7 +698,7 @@ struct intel_mipmap_tree *intel_miptree_create(struct brw_context *brw,
|
|||
|
||||
struct intel_mipmap_tree *
|
||||
intel_miptree_create_for_bo(struct brw_context *brw,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
mesa_format format,
|
||||
uint32_t offset,
|
||||
uint32_t width,
|
||||
|
|
@ -710,7 +710,7 @@ intel_miptree_create_for_bo(struct brw_context *brw,
|
|||
void
|
||||
intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
|
||||
struct intel_renderbuffer *irb,
|
||||
drm_intel_bo *bo,
|
||||
drm_bacon_bo *bo,
|
||||
uint32_t width, uint32_t height,
|
||||
uint32_t pitch);
|
||||
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ do_blit_drawpixels(struct gl_context * ctx,
|
|||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_buffer_object *src = intel_buffer_object(unpack->BufferObj);
|
||||
GLuint src_offset;
|
||||
drm_intel_bo *src_buffer;
|
||||
drm_bacon_bo *src_buffer;
|
||||
|
||||
DBG("%s\n", __func__);
|
||||
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx,
|
|||
int dst_pitch;
|
||||
|
||||
/* The miptree's buffer. */
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
|
||||
int error = 0;
|
||||
|
||||
|
|
@ -142,7 +142,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx,
|
|||
|
||||
bo = irb->mt->bo;
|
||||
|
||||
if (drm_intel_bo_references(brw->batch.bo, bo)) {
|
||||
if (drm_bacon_bo_references(brw->batch.bo, bo)) {
|
||||
perf_debug("Flushing before mapping a referenced bo.\n");
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
|
|
@ -195,7 +195,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx,
|
|||
mem_copy
|
||||
);
|
||||
|
||||
drm_intel_bo_unmap(bo);
|
||||
drm_bacon_bo_unmap(bo);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -105,7 +105,7 @@ DRI_CONF_END
|
|||
|
||||
#include "intel_batchbuffer.h"
|
||||
#include "intel_buffers.h"
|
||||
#include "intel_bufmgr.h"
|
||||
#include "brw_bufmgr.h"
|
||||
#include "intel_fbo.h"
|
||||
#include "intel_mipmap_tree.h"
|
||||
#include "intel_screen.h"
|
||||
|
|
@ -294,7 +294,7 @@ static void
|
|||
intel_image_warn_if_unaligned(__DRIimage *image, const char *func)
|
||||
{
|
||||
uint32_t tiling, swizzle;
|
||||
drm_intel_bo_get_tiling(image->bo, &tiling, &swizzle);
|
||||
drm_bacon_bo_get_tiling(image->bo, &tiling, &swizzle);
|
||||
|
||||
if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) {
|
||||
_mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary",
|
||||
|
|
@ -375,9 +375,9 @@ intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
|
|||
&image->tile_x,
|
||||
&image->tile_y);
|
||||
|
||||
drm_intel_bo_unreference(image->bo);
|
||||
drm_bacon_bo_unreference(image->bo);
|
||||
image->bo = mt->bo;
|
||||
drm_intel_bo_reference(mt->bo);
|
||||
drm_bacon_bo_reference(mt->bo);
|
||||
}
|
||||
|
||||
static __DRIimage *
|
||||
|
|
@ -401,7 +401,7 @@ intel_create_image_from_name(__DRIscreen *dri_screen,
|
|||
image->width = width;
|
||||
image->height = height;
|
||||
image->pitch = pitch * cpp;
|
||||
image->bo = drm_intel_bo_gem_create_from_name(screen->bufmgr, "image",
|
||||
image->bo = drm_bacon_bo_gem_create_from_name(screen->bufmgr, "image",
|
||||
name);
|
||||
if (!image->bo) {
|
||||
free(image);
|
||||
|
|
@ -437,9 +437,9 @@ intel_create_image_from_renderbuffer(__DRIcontext *context,
|
|||
image->format = rb->Format;
|
||||
image->offset = 0;
|
||||
image->data = loaderPrivate;
|
||||
drm_intel_bo_unreference(image->bo);
|
||||
drm_bacon_bo_unreference(image->bo);
|
||||
image->bo = irb->mt->bo;
|
||||
drm_intel_bo_reference(irb->mt->bo);
|
||||
drm_bacon_bo_reference(irb->mt->bo);
|
||||
image->width = rb->Width;
|
||||
image->height = rb->Height;
|
||||
image->pitch = irb->mt->pitch;
|
||||
|
|
@ -513,7 +513,7 @@ intel_create_image_from_texture(__DRIcontext *context, int target,
|
|||
static void
|
||||
intel_destroy_image(__DRIimage *image)
|
||||
{
|
||||
drm_intel_bo_unreference(image->bo);
|
||||
drm_bacon_bo_unreference(image->bo);
|
||||
free(image);
|
||||
}
|
||||
|
||||
|
|
@ -613,7 +613,7 @@ intel_create_image_common(__DRIscreen *dri_screen,
|
|||
return NULL;
|
||||
|
||||
cpp = _mesa_get_format_bytes(image->format);
|
||||
image->bo = drm_intel_bo_alloc_tiled(screen->bufmgr, "image",
|
||||
image->bo = drm_bacon_bo_alloc_tiled(screen->bufmgr, "image",
|
||||
width, height, cpp, &tiling,
|
||||
&pitch, 0);
|
||||
if (image->bo == NULL) {
|
||||
|
|
@ -660,7 +660,7 @@ intel_query_image(__DRIimage *image, int attrib, int *value)
|
|||
*value = image->bo->handle;
|
||||
return true;
|
||||
case __DRI_IMAGE_ATTRIB_NAME:
|
||||
return !drm_intel_bo_flink(image->bo, (uint32_t *) value);
|
||||
return !drm_bacon_bo_flink(image->bo, (uint32_t *) value);
|
||||
case __DRI_IMAGE_ATTRIB_FORMAT:
|
||||
*value = image->dri_format;
|
||||
return true;
|
||||
|
|
@ -676,7 +676,7 @@ intel_query_image(__DRIimage *image, int attrib, int *value)
|
|||
*value = image->planar_format->components;
|
||||
return true;
|
||||
case __DRI_IMAGE_ATTRIB_FD:
|
||||
return !drm_intel_bo_gem_export_to_prime(image->bo, value);
|
||||
return !drm_bacon_bo_gem_export_to_prime(image->bo, value);
|
||||
case __DRI_IMAGE_ATTRIB_FOURCC:
|
||||
return intel_lookup_fourcc(image->dri_format, value);
|
||||
case __DRI_IMAGE_ATTRIB_NUM_PLANES:
|
||||
|
|
@ -706,7 +706,7 @@ intel_dup_image(__DRIimage *orig_image, void *loaderPrivate)
|
|||
if (image == NULL)
|
||||
return NULL;
|
||||
|
||||
drm_intel_bo_reference(orig_image->bo);
|
||||
drm_bacon_bo_reference(orig_image->bo);
|
||||
image->bo = orig_image->bo;
|
||||
image->internal_format = orig_image->internal_format;
|
||||
image->planar_format = orig_image->planar_format;
|
||||
|
|
@ -824,7 +824,7 @@ intel_create_image_from_fds(__DRIscreen *dri_screen,
|
|||
size = end;
|
||||
}
|
||||
|
||||
image->bo = drm_intel_bo_gem_create_from_prime(screen->bufmgr,
|
||||
image->bo = drm_bacon_bo_gem_create_from_prime(screen->bufmgr,
|
||||
fds[0], size);
|
||||
if (image->bo == NULL) {
|
||||
free(image);
|
||||
|
|
@ -916,7 +916,7 @@ intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate)
|
|||
}
|
||||
|
||||
image->bo = parent->bo;
|
||||
drm_intel_bo_reference(parent->bo);
|
||||
drm_bacon_bo_reference(parent->bo);
|
||||
|
||||
image->width = width;
|
||||
image->height = height;
|
||||
|
|
@ -1111,7 +1111,7 @@ intelDestroyScreen(__DRIscreen * sPriv)
|
|||
{
|
||||
struct intel_screen *screen = sPriv->driverPrivate;
|
||||
|
||||
drm_intel_bufmgr_destroy(screen->bufmgr);
|
||||
drm_bacon_bufmgr_destroy(screen->bufmgr);
|
||||
driDestroyOptionInfo(&screen->optionCache);
|
||||
|
||||
ralloc_free(screen);
|
||||
|
|
@ -1274,7 +1274,7 @@ intel_init_bufmgr(struct intel_screen *screen)
|
|||
|
||||
screen->no_hw = getenv("INTEL_NO_HW") != NULL;
|
||||
|
||||
screen->bufmgr = drm_intel_bufmgr_gem_init(dri_screen->fd, BATCH_SZ);
|
||||
screen->bufmgr = drm_bacon_bufmgr_gem_init(dri_screen->fd, BATCH_SZ);
|
||||
if (screen->bufmgr == NULL) {
|
||||
fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
|
||||
__func__, __LINE__);
|
||||
|
|
@ -1292,20 +1292,20 @@ intel_init_bufmgr(struct intel_screen *screen)
|
|||
static bool
|
||||
intel_detect_swizzling(struct intel_screen *screen)
|
||||
{
|
||||
drm_intel_bo *buffer;
|
||||
drm_bacon_bo *buffer;
|
||||
unsigned long flags = 0;
|
||||
unsigned long aligned_pitch;
|
||||
uint32_t tiling = I915_TILING_X;
|
||||
uint32_t swizzle_mode = 0;
|
||||
|
||||
buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "swizzle test",
|
||||
buffer = drm_bacon_bo_alloc_tiled(screen->bufmgr, "swizzle test",
|
||||
64, 64, 4,
|
||||
&tiling, &aligned_pitch, flags);
|
||||
if (buffer == NULL)
|
||||
return false;
|
||||
|
||||
drm_intel_bo_get_tiling(buffer, &tiling, &swizzle_mode);
|
||||
drm_intel_bo_unreference(buffer);
|
||||
drm_bacon_bo_get_tiling(buffer, &tiling, &swizzle_mode);
|
||||
drm_bacon_bo_unreference(buffer);
|
||||
|
||||
if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
|
||||
return false;
|
||||
|
|
@ -1325,13 +1325,13 @@ intel_detect_timestamp(struct intel_screen *screen)
|
|||
* More recent kernels offer an interface to read the full 36bits
|
||||
* everywhere.
|
||||
*/
|
||||
if (drm_intel_reg_read(screen->bufmgr, TIMESTAMP | 1, &dummy) == 0)
|
||||
if (drm_bacon_reg_read(screen->bufmgr, TIMESTAMP | 1, &dummy) == 0)
|
||||
return 3;
|
||||
|
||||
/* Determine if we have a 32bit or 64bit kernel by inspecting the
|
||||
* upper 32bits for a rapidly changing timestamp.
|
||||
*/
|
||||
if (drm_intel_reg_read(screen->bufmgr, TIMESTAMP, &last))
|
||||
if (drm_bacon_reg_read(screen->bufmgr, TIMESTAMP, &last))
|
||||
return 0;
|
||||
|
||||
upper = lower = 0;
|
||||
|
|
@ -1339,7 +1339,7 @@ intel_detect_timestamp(struct intel_screen *screen)
|
|||
/* The TIMESTAMP should change every 80ns, so several round trips
|
||||
* through the kernel should be enough to advance it.
|
||||
*/
|
||||
if (drm_intel_reg_read(screen->bufmgr, TIMESTAMP, &dummy))
|
||||
if (drm_bacon_reg_read(screen->bufmgr, TIMESTAMP, &dummy))
|
||||
return 0;
|
||||
|
||||
upper += (dummy >> 32) != (last >> 32);
|
||||
|
|
@ -1368,21 +1368,21 @@ static bool
|
|||
intel_detect_pipelined_register(struct intel_screen *screen,
|
||||
int reg, uint32_t expected_value, bool reset)
|
||||
{
|
||||
drm_intel_bo *results, *bo;
|
||||
drm_bacon_bo *results, *bo;
|
||||
uint32_t *batch;
|
||||
uint32_t offset = 0;
|
||||
bool success = false;
|
||||
|
||||
/* Create a zero'ed temporary buffer for reading our results */
|
||||
results = drm_intel_bo_alloc(screen->bufmgr, "registers", 4096, 0);
|
||||
results = drm_bacon_bo_alloc(screen->bufmgr, "registers", 4096, 0);
|
||||
if (results == NULL)
|
||||
goto err;
|
||||
|
||||
bo = drm_intel_bo_alloc(screen->bufmgr, "batchbuffer", 4096, 0);
|
||||
bo = drm_bacon_bo_alloc(screen->bufmgr, "batchbuffer", 4096, 0);
|
||||
if (bo == NULL)
|
||||
goto err_results;
|
||||
|
||||
if (drm_intel_bo_map(bo, 1))
|
||||
if (drm_bacon_bo_map(bo, 1))
|
||||
goto err_batch;
|
||||
|
||||
batch = bo->virtual;
|
||||
|
|
@ -1395,7 +1395,7 @@ intel_detect_pipelined_register(struct intel_screen *screen,
|
|||
/* Save the register's value back to the buffer. */
|
||||
*batch++ = MI_STORE_REGISTER_MEM | (3 - 2);
|
||||
*batch++ = reg;
|
||||
drm_intel_bo_emit_reloc(bo, (char *)batch -(char *)bo->virtual,
|
||||
drm_bacon_bo_emit_reloc(bo, (char *)batch -(char *)bo->virtual,
|
||||
results, offset*sizeof(uint32_t),
|
||||
I915_GEM_DOMAIN_INSTRUCTION,
|
||||
I915_GEM_DOMAIN_INSTRUCTION);
|
||||
|
|
@ -1410,20 +1410,20 @@ intel_detect_pipelined_register(struct intel_screen *screen,
|
|||
|
||||
*batch++ = MI_BATCH_BUFFER_END;
|
||||
|
||||
drm_intel_bo_mrb_exec(bo, ALIGN((char *)batch - (char *)bo->virtual, 8),
|
||||
drm_bacon_bo_mrb_exec(bo, ALIGN((char *)batch - (char *)bo->virtual, 8),
|
||||
NULL, 0, 0,
|
||||
I915_EXEC_RENDER);
|
||||
|
||||
/* Check whether the value got written. */
|
||||
if (drm_intel_bo_map(results, false) == 0) {
|
||||
if (drm_bacon_bo_map(results, false) == 0) {
|
||||
success = *((uint32_t *)results->virtual + offset) == expected_value;
|
||||
drm_intel_bo_unmap(results);
|
||||
drm_bacon_bo_unmap(results);
|
||||
}
|
||||
|
||||
err_batch:
|
||||
drm_intel_bo_unreference(bo);
|
||||
drm_bacon_bo_unreference(bo);
|
||||
err_results:
|
||||
drm_intel_bo_unreference(results);
|
||||
drm_bacon_bo_unreference(results);
|
||||
err:
|
||||
return success;
|
||||
}
|
||||
|
|
@ -1763,7 +1763,7 @@ __DRIconfig **intelInitScreen2(__DRIscreen *dri_screen)
|
|||
if (!intel_init_bufmgr(screen))
|
||||
return NULL;
|
||||
|
||||
screen->deviceID = drm_intel_bufmgr_gem_get_devid(screen->bufmgr);
|
||||
screen->deviceID = drm_bacon_bufmgr_gem_get_devid(screen->bufmgr);
|
||||
if (!gen_get_device_info(screen->deviceID, &screen->devinfo))
|
||||
return NULL;
|
||||
|
||||
|
|
@ -1772,7 +1772,7 @@ __DRIconfig **intelInitScreen2(__DRIscreen *dri_screen)
|
|||
brw_process_intel_debug_variable();
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_BUFMGR)
|
||||
drm_intel_bufmgr_set_debug(screen->bufmgr, true);
|
||||
drm_bacon_bufmgr_set_debug(screen->bufmgr, true);
|
||||
|
||||
if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && devinfo->gen < 7) {
|
||||
fprintf(stderr,
|
||||
|
|
@ -1786,7 +1786,7 @@ __DRIconfig **intelInitScreen2(__DRIscreen *dri_screen)
|
|||
* Currently the entire (global) address space for all GTT maps is
|
||||
* limited to 64bits. That is all objects on the system that are
|
||||
* setup for GTT mmapping must fit within 64bits. An attempt to use
|
||||
* one that exceeds the limit with fail in drm_intel_bo_map_gtt().
|
||||
* one that exceeds the limit with fail in drm_bacon_bo_map_gtt().
|
||||
*
|
||||
* Long before we hit that limit, we will be practically limited by
|
||||
* that any single object must fit in physical memory (RAM). The upper
|
||||
|
|
@ -2005,7 +2005,7 @@ __DRIconfig **intelInitScreen2(__DRIscreen *dri_screen)
|
|||
|
||||
struct intel_buffer {
|
||||
__DRIbuffer base;
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
};
|
||||
|
||||
static __DRIbuffer *
|
||||
|
|
@ -2029,7 +2029,7 @@ intelAllocateBuffer(__DRIscreen *dri_screen,
|
|||
uint32_t tiling = I915_TILING_X;
|
||||
unsigned long pitch;
|
||||
int cpp = format / 8;
|
||||
intelBuffer->bo = drm_intel_bo_alloc_tiled(screen->bufmgr,
|
||||
intelBuffer->bo = drm_bacon_bo_alloc_tiled(screen->bufmgr,
|
||||
"intelAllocateBuffer",
|
||||
width,
|
||||
height,
|
||||
|
|
@ -2042,7 +2042,7 @@ intelAllocateBuffer(__DRIscreen *dri_screen,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
drm_intel_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
|
||||
drm_bacon_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
|
||||
|
||||
intelBuffer->base.attachment = attachment;
|
||||
intelBuffer->base.cpp = cpp;
|
||||
|
|
@ -2056,7 +2056,7 @@ intelReleaseBuffer(__DRIscreen *dri_screen, __DRIbuffer *buffer)
|
|||
{
|
||||
struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
|
||||
|
||||
drm_intel_bo_unreference(intelBuffer->bo);
|
||||
drm_bacon_bo_unreference(intelBuffer->bo);
|
||||
free(intelBuffer);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@
|
|||
#include <GL/internal/dri_interface.h>
|
||||
|
||||
#include "dri_util.h"
|
||||
#include "intel_bufmgr.h"
|
||||
#include "brw_bufmgr.h"
|
||||
#include "common/gen_device_info.h"
|
||||
#include "i915_drm.h"
|
||||
#include "xmlconfig.h"
|
||||
|
|
@ -72,7 +72,7 @@ struct intel_screen
|
|||
#define KERNEL_ALLOWS_HSW_SCRATCH1_AND_ROW_CHICKEN3 (1<<3)
|
||||
#define KERNEL_ALLOWS_COMPUTE_DISPATCH (1<<4)
|
||||
|
||||
drm_intel_bufmgr *bufmgr;
|
||||
drm_bacon_bufmgr *bufmgr;
|
||||
|
||||
/**
|
||||
* A unique ID for shader programs.
|
||||
|
|
|
|||
|
|
@ -333,7 +333,7 @@ intel_set_texture_storage_for_buffer_object(struct gl_context *ctx,
|
|||
|
||||
assert(intel_texobj->mt == NULL);
|
||||
|
||||
drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_obj,
|
||||
drm_bacon_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_obj,
|
||||
buffer_offset,
|
||||
row_stride * image->Height);
|
||||
intel_texobj->mt =
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ intelTexImage(struct gl_context * ctx,
|
|||
struct intel_texture_image *intelImage = intel_texture_image(texImage);
|
||||
bool ok;
|
||||
|
||||
bool tex_busy = intelImage->mt && drm_intel_bo_busy(intelImage->mt->bo);
|
||||
bool tex_busy = intelImage->mt && drm_bacon_bo_busy(intelImage->mt->bo);
|
||||
|
||||
DBG("%s mesa_format %s target %s format %s type %s level %d %dx%dx%d\n",
|
||||
__func__, _mesa_get_format_name(texImage->TexFormat),
|
||||
|
|
@ -467,7 +467,7 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx,
|
|||
int dst_pitch;
|
||||
|
||||
/* The miptree's buffer. */
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
|
||||
int error = 0;
|
||||
|
||||
|
|
@ -527,7 +527,7 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx,
|
|||
|
||||
bo = image->mt->bo;
|
||||
|
||||
if (drm_intel_bo_references(brw->batch.bo, bo)) {
|
||||
if (drm_bacon_bo_references(brw->batch.bo, bo)) {
|
||||
perf_debug("Flushing before mapping a referenced bo.\n");
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
|
|
@ -565,7 +565,7 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx,
|
|||
mem_copy
|
||||
);
|
||||
|
||||
drm_intel_bo_unmap(bo);
|
||||
drm_bacon_bo_unmap(bo);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
|
|||
int src_pitch;
|
||||
|
||||
/* The miptree's buffer. */
|
||||
drm_intel_bo *bo;
|
||||
drm_bacon_bo *bo;
|
||||
|
||||
int error = 0;
|
||||
|
||||
|
|
@ -143,7 +143,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
|
|||
|
||||
bo = image->mt->bo;
|
||||
|
||||
if (drm_intel_bo_references(brw->batch.bo, bo)) {
|
||||
if (drm_bacon_bo_references(brw->batch.bo, bo)) {
|
||||
perf_debug("Flushing before mapping a referenced bo.\n");
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
|
|
@ -185,7 +185,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
|
|||
mem_copy
|
||||
);
|
||||
|
||||
drm_intel_bo_unmap(bo);
|
||||
drm_bacon_bo_unmap(bo);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -202,7 +202,7 @@ intelTexSubImage(struct gl_context * ctx,
|
|||
struct intel_mipmap_tree *mt = intel_texture_image(texImage)->mt;
|
||||
bool ok;
|
||||
|
||||
bool tex_busy = mt && drm_intel_bo_busy(mt->bo);
|
||||
bool tex_busy = mt && drm_bacon_bo_busy(mt->bo);
|
||||
|
||||
if (mt && mt->format == MESA_FORMAT_S_UINT8)
|
||||
mt->r8stencil_needs_update = true;
|
||||
|
|
|
|||
|
|
@ -50,8 +50,8 @@ intel_upload_finish(struct brw_context *brw)
|
|||
if (!brw->upload.bo)
|
||||
return;
|
||||
|
||||
drm_intel_bo_unmap(brw->upload.bo);
|
||||
drm_intel_bo_unreference(brw->upload.bo);
|
||||
drm_bacon_bo_unmap(brw->upload.bo);
|
||||
drm_bacon_bo_unreference(brw->upload.bo);
|
||||
brw->upload.bo = NULL;
|
||||
brw->upload.next_offset = 0;
|
||||
}
|
||||
|
|
@ -83,7 +83,7 @@ void *
|
|||
intel_upload_space(struct brw_context *brw,
|
||||
uint32_t size,
|
||||
uint32_t alignment,
|
||||
drm_intel_bo **out_bo,
|
||||
drm_bacon_bo **out_bo,
|
||||
uint32_t *out_offset)
|
||||
{
|
||||
uint32_t offset;
|
||||
|
|
@ -95,21 +95,21 @@ intel_upload_space(struct brw_context *brw,
|
|||
}
|
||||
|
||||
if (!brw->upload.bo) {
|
||||
brw->upload.bo = drm_intel_bo_alloc(brw->bufmgr, "streamed data",
|
||||
brw->upload.bo = drm_bacon_bo_alloc(brw->bufmgr, "streamed data",
|
||||
MAX2(INTEL_UPLOAD_SIZE, size), 4096);
|
||||
if (brw->has_llc)
|
||||
drm_intel_bo_map(brw->upload.bo, true);
|
||||
drm_bacon_bo_map(brw->upload.bo, true);
|
||||
else
|
||||
drm_intel_gem_bo_map_gtt(brw->upload.bo);
|
||||
drm_bacon_gem_bo_map_gtt(brw->upload.bo);
|
||||
}
|
||||
|
||||
brw->upload.next_offset = offset + size;
|
||||
|
||||
*out_offset = offset;
|
||||
if (*out_bo != brw->upload.bo) {
|
||||
drm_intel_bo_unreference(*out_bo);
|
||||
drm_bacon_bo_unreference(*out_bo);
|
||||
*out_bo = brw->upload.bo;
|
||||
drm_intel_bo_reference(brw->upload.bo);
|
||||
drm_bacon_bo_reference(brw->upload.bo);
|
||||
}
|
||||
|
||||
return brw->upload.bo->virtual + offset;
|
||||
|
|
@ -125,7 +125,7 @@ intel_upload_data(struct brw_context *brw,
|
|||
const void *data,
|
||||
uint32_t size,
|
||||
uint32_t alignment,
|
||||
drm_intel_bo **out_bo,
|
||||
drm_bacon_bo **out_bo,
|
||||
uint32_t *out_offset)
|
||||
{
|
||||
void *dst = intel_upload_space(brw, size, alignment, out_bo, out_offset);
|
||||
|
|
|
|||
|
|
@ -38,8 +38,6 @@
|
|||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#if HAVE_LIBDRM_ATOMIC_PRIMITIVES
|
||||
|
||||
#define HAS_ATOMIC_OPS 1
|
||||
|
||||
typedef struct {
|
||||
|
|
@ -113,5 +111,3 @@ static inline int atomic_add_unless(atomic_t *v, int add, int unless)
|
|||
c = old;
|
||||
return c == unless;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue