ilo: remove intel_bo_get_virtual()

Make the map functions return the pointer directly.
This commit is contained in:
Chia-I Wu 2014-03-08 16:22:06 +08:00
parent 90786613e9
commit 790c32ec75
6 changed files with 141 additions and 114 deletions

View file

@ -47,8 +47,7 @@ process_query_for_occlusion_counter(struct ilo_3d *hw3d,
/* in pairs */
assert(q->reg_read % 2 == 0);
intel_bo_map(q->bo, false);
vals = intel_bo_get_virtual(q->bo);
vals = intel_bo_map(q->bo, false);
for (i = 1; i < q->reg_read; i += 2)
depth_count += vals[i] - vals[i - 1];
intel_bo_unmap(q->bo);
@ -72,8 +71,7 @@ process_query_for_timestamp(struct ilo_3d *hw3d, struct ilo_query *q)
assert(q->reg_read == 1);
intel_bo_map(q->bo, false);
vals = intel_bo_get_virtual(q->bo);
vals = intel_bo_map(q->bo, false);
timestamp = vals[0];
intel_bo_unmap(q->bo);
@ -90,8 +88,7 @@ process_query_for_time_elapsed(struct ilo_3d *hw3d, struct ilo_query *q)
/* in pairs */
assert(q->reg_read % 2 == 0);
intel_bo_map(q->bo, false);
vals = intel_bo_get_virtual(q->bo);
vals = intel_bo_map(q->bo, false);
for (i = 1; i < q->reg_read; i += 2)
elapsed += vals[i] - vals[i - 1];

View file

@ -517,12 +517,17 @@ static void dump_binding_table(struct brw_context *brw, uint32_t offset,
}
}
static void
static bool
init_brw(struct brw_context *brw, struct ilo_3d_pipeline *p)
{
brw->intel.gen = ILO_GEN_GET_MAJOR(p->dev->gen);
brw->intel.batch.bo_dst.virtual = intel_bo_get_virtual(p->cp->bo);
brw->intel.batch.bo = &brw->intel.batch.bo_dst;
brw->intel.batch.bo_dst.virtual = intel_bo_map(p->cp->bo, false);
if (!brw->intel.batch.bo_dst.virtual)
return false;
return true;
}
static void
@ -531,7 +536,8 @@ dump_3d_state(struct ilo_3d_pipeline *p)
struct brw_context brw;
int num_states, i;
init_brw(&brw, p);
if (!init_brw(&brw, p))
return;
if (brw.intel.gen >= 7) {
dump_cc_viewport_state(&brw, p->state.CC_VIEWPORT);
@ -627,6 +633,8 @@ dump_3d_state(struct ilo_3d_pipeline *p)
(void) dump_sf_state;
(void) dump_wm_state;
(void) dump_cc_state_gen4;
intel_bo_unmap(p->cp->bo);
}
/**
@ -635,13 +643,6 @@ dump_3d_state(struct ilo_3d_pipeline *p)
void
ilo_3d_pipeline_dump(struct ilo_3d_pipeline *p)
{
int err;
ilo_cp_dump(p->cp);
err = intel_bo_map(p->cp->bo, false);
if (!err) {
dump_3d_state(p);
intel_bo_unmap(p->cp->bo);
}
dump_3d_state(p);
}

View file

@ -171,10 +171,8 @@ ilo_cp_realloc_bo(struct ilo_cp *cp)
intel_bo_unreference(cp->bo);
cp->bo = bo;
if (!cp->sys) {
intel_bo_map(cp->bo, true);
cp->ptr = intel_bo_get_virtual(cp->bo);
}
if (!cp->sys)
cp->ptr = intel_bo_map(cp->bo, true);
}
/**

View file

@ -50,29 +50,29 @@ is_bo_busy(struct ilo_context *ilo, struct intel_bo *bo, bool *need_flush)
return intel_bo_is_busy(bo);
}
static bool
static void *
map_bo_for_transfer(struct ilo_context *ilo, struct intel_bo *bo,
const struct ilo_transfer *xfer)
{
int err;
void *ptr;
switch (xfer->method) {
case ILO_TRANSFER_MAP_CPU:
err = intel_bo_map(bo, (xfer->base.usage & PIPE_TRANSFER_WRITE));
ptr = intel_bo_map(bo, (xfer->base.usage & PIPE_TRANSFER_WRITE));
break;
case ILO_TRANSFER_MAP_GTT:
err = intel_bo_map_gtt(bo);
ptr = intel_bo_map_gtt(bo);
break;
case ILO_TRANSFER_MAP_UNSYNC:
err = intel_bo_map_unsynchronized(bo);
ptr = intel_bo_map_unsynchronized(bo);
break;
default:
assert(!"unknown mapping method");
err = -1;
ptr = NULL;
break;
}
return !err;
return ptr;
}
/**
@ -406,29 +406,62 @@ tex_tile_choose_offset_func(const struct ilo_texture *tex,
}
}
static void *
tex_staging_sys_map_bo(const struct ilo_context *ilo,
struct ilo_texture *tex,
bool for_read_back, bool linear_view)
{
const bool prefer_cpu = (ilo->dev->has_llc || for_read_back);
void *ptr;
if (prefer_cpu && (tex->tiling == INTEL_TILING_NONE || !linear_view))
ptr = intel_bo_map(tex->bo, !for_read_back);
else
ptr = intel_bo_map_gtt(tex->bo);
return ptr;
}
static void
tex_staging_sys_unmap_bo(const struct ilo_context *ilo,
const struct ilo_texture *tex)
{
intel_bo_unmap(tex->bo);
}
static bool
tex_staging_sys_zs_read(struct ilo_context *ilo,
struct ilo_texture *tex,
const struct ilo_transfer *xfer)
{
const bool swizzle = ilo->dev->has_address_swizzling;
const struct pipe_box *box = &xfer->base.box;
const uint8_t *src = intel_bo_get_virtual(tex->bo);
const uint8_t *src;
tex_tile_offset_func tile_offset;
unsigned tiles_per_row;
int slice;
src = tex_staging_sys_map_bo(ilo, tex, true, false);
if (!src)
return false;
tile_offset = tex_tile_choose_offset_func(tex, &tiles_per_row);
assert(tex->block_width == 1 && tex->block_height == 1);
if (tex->separate_s8) {
struct ilo_texture *s8_tex = tex->separate_s8;
const uint8_t *s8_src = intel_bo_get_virtual(s8_tex->bo);
const uint8_t *s8_src;
tex_tile_offset_func s8_tile_offset;
unsigned s8_tiles_per_row;
int dst_cpp, dst_s8_pos, src_cpp_used;
s8_src = tex_staging_sys_map_bo(ilo, s8_tex, true, false);
if (!s8_src) {
tex_staging_sys_unmap_bo(ilo, tex);
return false;
}
s8_tile_offset = tex_tile_choose_offset_func(s8_tex, &s8_tiles_per_row);
if (tex->base.format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
@ -482,6 +515,8 @@ tex_staging_sys_zs_read(struct ilo_context *ilo,
s8_mem_y++;
}
}
tex_staging_sys_unmap_bo(ilo, s8_tex);
}
else {
assert(tex->bo_format == PIPE_FORMAT_S8_UINT);
@ -515,31 +550,45 @@ tex_staging_sys_zs_read(struct ilo_context *ilo,
}
}
}
tex_staging_sys_unmap_bo(ilo, tex);
return true;
}
static void
static bool
tex_staging_sys_zs_write(struct ilo_context *ilo,
struct ilo_texture *tex,
const struct ilo_transfer *xfer)
{
const bool swizzle = ilo->dev->has_address_swizzling;
const struct pipe_box *box = &xfer->base.box;
uint8_t *dst = intel_bo_get_virtual(tex->bo);
uint8_t *dst;
tex_tile_offset_func tile_offset;
unsigned tiles_per_row;
int slice;
dst = tex_staging_sys_map_bo(ilo, tex, false, false);
if (!dst)
return false;
tile_offset = tex_tile_choose_offset_func(tex, &tiles_per_row);
assert(tex->block_width == 1 && tex->block_height == 1);
if (tex->separate_s8) {
struct ilo_texture *s8_tex = tex->separate_s8;
uint8_t *s8_dst = intel_bo_get_virtual(s8_tex->bo);
uint8_t *s8_dst;
tex_tile_offset_func s8_tile_offset;
unsigned s8_tiles_per_row;
int src_cpp, src_s8_pos, dst_cpp_used;
s8_dst = tex_staging_sys_map_bo(ilo, s8_tex, false, false);
if (!s8_dst) {
tex_staging_sys_unmap_bo(ilo, s8_tex);
return false;
}
s8_tile_offset = tex_tile_choose_offset_func(s8_tex, &s8_tiles_per_row);
if (tex->base.format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
@ -593,6 +642,8 @@ tex_staging_sys_zs_write(struct ilo_context *ilo,
s8_mem_y++;
}
}
tex_staging_sys_unmap_bo(ilo, s8_tex);
}
else {
assert(tex->bo_format == PIPE_FORMAT_S8_UINT);
@ -626,9 +677,13 @@ tex_staging_sys_zs_write(struct ilo_context *ilo,
}
}
}
tex_staging_sys_unmap_bo(ilo, tex);
return true;
}
static void
static bool
tex_staging_sys_convert_write(struct ilo_context *ilo,
struct ilo_texture *tex,
const struct ilo_transfer *xfer)
@ -638,7 +693,10 @@ tex_staging_sys_convert_write(struct ilo_context *ilo,
void *dst;
int slice;
dst = intel_bo_get_virtual(tex->bo);
dst = tex_staging_sys_map_bo(ilo, tex, false, true);
if (!dst)
return false;
dst += tex_get_box_offset(tex, xfer->base.level, box);
/* slice stride is not always available */
@ -652,7 +710,10 @@ tex_staging_sys_convert_write(struct ilo_context *ilo,
0, 0, 0, box->width, box->height, box->depth,
xfer->staging_sys, xfer->base.stride, xfer->base.layer_stride,
0, 0, 0);
return;
tex_staging_sys_unmap_bo(ilo, tex);
return true;
}
switch (tex->base.format) {
@ -674,39 +735,10 @@ tex_staging_sys_convert_write(struct ilo_context *ilo,
assert(!"unable to convert the staging data");
break;
}
}
static bool
tex_staging_sys_map_bo(const struct ilo_context *ilo,
const struct ilo_texture *tex,
bool for_read_back, bool linear_view)
{
const bool prefer_cpu = (ilo->dev->has_llc || for_read_back);
int err;
tex_staging_sys_unmap_bo(ilo, tex);
if (prefer_cpu && (tex->tiling == INTEL_TILING_NONE || !linear_view))
err = intel_bo_map(tex->bo, !for_read_back);
else
err = intel_bo_map_gtt(tex->bo);
if (!tex->separate_s8)
return !err;
err = intel_bo_map(tex->separate_s8->bo, !for_read_back);
if (err)
intel_bo_unmap(tex->bo);
return !err;
}
static void
tex_staging_sys_unmap_bo(const struct ilo_context *ilo,
const struct ilo_texture *tex)
{
if (tex->separate_s8)
intel_bo_unmap(tex->separate_s8->bo);
intel_bo_unmap(tex->bo);
return true;
}
static void
@ -723,18 +755,10 @@ tex_staging_sys_unmap(struct ilo_context *ilo,
switch (xfer->method) {
case ILO_TRANSFER_MAP_SW_CONVERT:
success = tex_staging_sys_map_bo(ilo, tex, false, true);
if (success) {
tex_staging_sys_convert_write(ilo, tex, xfer);
tex_staging_sys_unmap_bo(ilo, tex);
}
success = tex_staging_sys_convert_write(ilo, tex, xfer);
break;
case ILO_TRANSFER_MAP_SW_ZS:
success = tex_staging_sys_map_bo(ilo, tex, false, false);
if (success) {
tex_staging_sys_zs_write(ilo, tex, xfer);
tex_staging_sys_unmap_bo(ilo, tex);
}
success = tex_staging_sys_zs_write(ilo, tex, xfer);
break;
default:
assert(!"unknown mapping method");
@ -788,11 +812,7 @@ tex_staging_sys_map(struct ilo_context *ilo,
success = false;
break;
case ILO_TRANSFER_MAP_SW_ZS:
success = tex_staging_sys_map_bo(ilo, tex, true, false);
if (success) {
tex_staging_sys_zs_read(ilo, tex, xfer);
tex_staging_sys_unmap_bo(ilo, tex);
}
success = tex_staging_sys_zs_read(ilo, tex, xfer);
break;
default:
assert(!"unknown mapping method");
@ -816,9 +836,12 @@ tex_direct_map(struct ilo_context *ilo,
struct ilo_texture *tex,
struct ilo_transfer *xfer)
{
if (!map_bo_for_transfer(ilo, tex->bo, xfer))
xfer->ptr = map_bo_for_transfer(ilo, tex->bo, xfer);
if (!xfer->ptr)
return false;
xfer->ptr += tex_get_box_offset(tex, xfer->base.level, &xfer->base.box);
/* note that stride is for a block row, not a texel row */
xfer->base.stride = tex->bo_stride;
@ -828,9 +851,6 @@ tex_direct_map(struct ilo_context *ilo,
else
xfer->base.layer_stride = 0;
xfer->ptr = intel_bo_get_virtual(tex->bo);
xfer->ptr += tex_get_box_offset(tex, xfer->base.level, &xfer->base.box);
return true;
}
@ -891,7 +911,8 @@ buf_map(struct ilo_context *ilo, struct ilo_transfer *xfer)
if (!choose_transfer_method(ilo, xfer))
return false;
if (!map_bo_for_transfer(ilo, buf->bo, xfer))
xfer->ptr = map_bo_for_transfer(ilo, buf->bo, xfer);
if (!xfer->ptr)
return false;
assert(xfer->base.level == 0);
@ -900,12 +921,10 @@ buf_map(struct ilo_context *ilo, struct ilo_transfer *xfer)
assert(xfer->base.box.height == 1);
assert(xfer->base.box.depth == 1);
xfer->ptr += xfer->base.box.x;
xfer->base.stride = 0;
xfer->base.layer_stride = 0;
xfer->ptr = intel_bo_get_virtual(buf->bo);
xfer->ptr += xfer->base.box.x;
return true;
}

View file

@ -365,7 +365,7 @@ void
intel_winsys_decode_commands(struct intel_winsys *winsys,
struct intel_bo *bo, int used)
{
int err;
void *ptr;
if (!winsys->decode) {
winsys->decode = drm_intel_decode_context_alloc(winsys->info.devid);
@ -376,8 +376,8 @@ intel_winsys_decode_commands(struct intel_winsys *winsys,
drm_intel_decode_set_output_file(winsys->decode, stderr);
}
err = intel_bo_map(bo, false);
if (err) {
ptr = intel_bo_map(bo, false);
if (!ptr) {
debug_printf("failed to map buffer for decoding\n");
return;
}
@ -386,7 +386,7 @@ intel_winsys_decode_commands(struct intel_winsys *winsys,
used /= 4;
drm_intel_decode_set_batch_pointer(winsys->decode,
gem_bo(bo)->virtual, gem_bo(bo)->offset64, used);
ptr, gem_bo(bo)->offset64, used);
drm_intel_decode(winsys->decode);
@ -412,27 +412,45 @@ intel_bo_get_size(const struct intel_bo *bo)
}
void *
intel_bo_get_virtual(const struct intel_bo *bo)
intel_bo_map(struct intel_bo *bo, bool write_enable)
{
int err;
err = drm_intel_bo_map(gem_bo(bo), write_enable);
if (err) {
debug_error("failed to map bo");
return NULL;
}
return gem_bo(bo)->virtual;
}
int
intel_bo_map(struct intel_bo *bo, bool write_enable)
{
return drm_intel_bo_map(gem_bo(bo), write_enable);
}
int
void *
intel_bo_map_gtt(struct intel_bo *bo)
{
return drm_intel_gem_bo_map_gtt(gem_bo(bo));
int err;
err = drm_intel_gem_bo_map_gtt(gem_bo(bo));
if (err) {
debug_error("failed to map bo");
return NULL;
}
return gem_bo(bo)->virtual;
}
int
void *
intel_bo_map_unsynchronized(struct intel_bo *bo)
{
return drm_intel_gem_bo_map_unsynchronized(gem_bo(bo));
int err;
err = drm_intel_gem_bo_map_unsynchronized(gem_bo(bo));
if (err) {
debug_error("failed to map bo");
return NULL;
}
return gem_bo(bo)->virtual;
}
void

View file

@ -204,12 +204,6 @@ intel_bo_unreference(struct intel_bo *bo);
unsigned long
intel_bo_get_size(const struct intel_bo *bo);
/**
* Return the pointer to the memory area of the mapped \p bo.
*/
void *
intel_bo_get_virtual(const struct intel_bo *bo);
/**
* Map \p bo for CPU access. Recursive mapping is allowed.
*
@ -227,13 +221,13 @@ intel_bo_get_virtual(const struct intel_bo *bo);
* map_unsynchronized() is similar to map_gtt(), except that it does not
* block.
*/
int
void *
intel_bo_map(struct intel_bo *bo, bool write_enable);
int
void *
intel_bo_map_gtt(struct intel_bo *bo);
int
void *
intel_bo_map_unsynchronized(struct intel_bo *bo);
/**