iris: Rename bo->gtt_offset to bo->address

This is the virtual memory address of the buffer object.  Calling it the
BO's address is a lot more obvious than calling it an offset in one of
the now many graphics translation tables.

Reviewed-by: Tapani Pälli <tapani.palli@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/12206>
This commit is contained in:
Kenneth Graunke 2021-07-19 21:23:18 -07:00 committed by Marge Bot
parent c964e5f099
commit 2616e15c01
12 changed files with 79 additions and 79 deletions

View file

@ -129,7 +129,7 @@ decode_get_bo(void *v_batch, bool ppgtt, uint64_t address)
for (int i = 0; i < batch->exec_count; i++) {
struct iris_bo *bo = batch->exec_bos[i];
/* The decoder zeroes out the top 16 bits, so we need to as well */
uint64_t bo_address = bo->gtt_offset & (~0ull >> 16);
uint64_t bo_address = bo->address & (~0ull >> 16);
if (address >= bo_address && address < bo_address + bo->size) {
return (struct intel_batch_decode_bo) {
@ -163,7 +163,7 @@ decode_batch(struct iris_batch *batch)
{
void *map = iris_bo_map(batch->dbg, batch->exec_bos[0], MAP_READ);
intel_print_batch(&batch->decoder, map, batch->primary_batch_size,
batch->exec_bos[0]->gtt_offset, false);
batch->exec_bos[0]->address, false);
}
void
@ -341,7 +341,7 @@ iris_use_pinned_bo(struct iris_batch *batch,
batch->validation_list[batch->exec_count] =
(struct drm_i915_gem_exec_object2) {
.handle = bo->gem_handle,
.offset = bo->gtt_offset,
.offset = bo->address,
.flags = bo->kflags | (writable ? EXEC_OBJECT_WRITE : 0),
};
@ -497,7 +497,7 @@ iris_chain_to_new_batch(struct iris_batch *batch)
/* Emit MI_BATCH_BUFFER_START to chain to another batch. */
*cmd = (0x31 << 23) | (1 << 8) | (3 - 2);
*addr = batch->bo->gtt_offset;
*addr = batch->bo->address;
}
static void
@ -517,7 +517,7 @@ add_aux_map_bos_to_batch(struct iris_batch *batch)
batch->validation_list[batch->exec_count] =
(struct drm_i915_gem_exec_object2) {
.handle = bo->gem_handle,
.offset = bo->gtt_offset,
.offset = bo->address,
.flags = bo->kflags,
};
batch->aperture_space += bo->size;
@ -637,7 +637,7 @@ submit_batch(struct iris_batch *batch)
/* The requirement for using I915_EXEC_NO_RELOC are:
*
* The addresses written in the objects must match the corresponding
* reloc.gtt_offset which in turn must match the corresponding
* reloc.address which in turn must match the corresponding
* execobject.offset.
*
* Any render targets written to in the batch must be flagged with

View file

@ -77,7 +77,7 @@ binder_realloc(struct iris_context *ice)
/* Place the new binder just after the old binder, unless we've hit the
* end of the memory zone...then wrap around to the start again.
*/
next_address = binder->bo->gtt_offset + IRIS_BINDER_SIZE;
next_address = binder->bo->address + IRIS_BINDER_SIZE;
if (next_address >= IRIS_MEMZONE_BINDLESS_START)
next_address = IRIS_MEMZONE_BINDER_START;
@ -87,7 +87,7 @@ binder_realloc(struct iris_context *ice)
binder->bo = iris_bo_alloc(bufmgr, "binder", IRIS_BINDER_SIZE, 1,
IRIS_MEMZONE_BINDER, 0);
binder->bo->gtt_offset = next_address;
binder->bo->address = next_address;
binder->map = iris_bo_map(NULL, binder->bo, MAP_WRITE);
binder->insert_point = INIT_INSERT_POINT;
@ -186,7 +186,7 @@ iris_binder_reserve_3d(struct iris_context *ice)
if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
binder->bt_offset[stage] = sizes[stage] > 0 ? offset : 0;
iris_record_state_size(ice->state.sizes,
binder->bo->gtt_offset + offset, sizes[stage]);
binder->bo->address + offset, sizes[stage]);
offset += sizes[stage];
}
}

View file

@ -62,10 +62,10 @@ stream_state(struct iris_batch *batch,
iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
iris_record_state_size(batch->state_sizes,
bo->gtt_offset + *out_offset, size);
bo->address + *out_offset, size);
/* If the caller has asked for a BO, we leave them the responsibility of
* adding bo->gtt_offset (say, by handing an address to genxml). If not,
* adding bo->address (say, by handing an address to genxml). If not,
* we assume they want the offset from a base address.
*/
if (out_bo)
@ -96,7 +96,7 @@ combine_and_pin_address(struct blorp_batch *blorp_batch,
IRIS_DOMAIN_NONE);
/* Assume this is a general address, not relative to a base. */
return bo->gtt_offset + addr.offset;
return bo->address + addr.offset;
}
static uint64_t
@ -159,7 +159,7 @@ blorp_alloc_binding_table(struct blorp_batch *blorp_batch,
surface_maps[i] = stream_state(batch, ice->state.surface_uploader,
state_size, state_alignment,
&surface_offsets[i], NULL);
bt_map[i] = surface_offsets[i] - (uint32_t) binder->bo->gtt_offset;
bt_map[i] = surface_offsets[i] - (uint32_t) binder->bo->address;
}
iris_use_pinned_bo(batch, binder->bo, false, IRIS_DOMAIN_NONE);
@ -207,7 +207,7 @@ blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *blorp_batch,
for (unsigned i = 0; i < num_vbs; i++) {
struct iris_bo *bo = addrs[i].buffer;
uint16_t high_bits = bo->gtt_offset >> 32u;
uint16_t high_bits = bo->address >> 32u;
if (high_bits != ice->state.last_vbo_high_bits[i]) {
need_invalidate = true;

View file

@ -449,7 +449,7 @@ alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
continue;
/* Try a little harder to find one that's already in the right memzone */
if (match_zone && memzone != iris_memzone_for_address(cur->gtt_offset))
if (match_zone && memzone != iris_memzone_for_address(cur->address))
continue;
/* If the last BO in the cache is busy, there are no idle BOs. Bail,
@ -483,7 +483,7 @@ alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
* removed from the aux-map.
*/
if (bo->bufmgr->aux_map_ctx)
intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->address,
bo->size);
bo->aux_map_address = 0;
}
@ -491,10 +491,10 @@ alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
/* If the cached BO isn't in the right memory zone, or the alignment
* isn't sufficient, free the old memory and assign it a new address.
*/
if (memzone != iris_memzone_for_address(bo->gtt_offset) ||
bo->gtt_offset % alignment != 0) {
vma_free(bufmgr, bo->gtt_offset, bo->size);
bo->gtt_offset = 0ull;
if (memzone != iris_memzone_for_address(bo->address) ||
bo->address % alignment != 0) {
vma_free(bufmgr, bo->address, bo->size);
bo->address = 0ull;
}
/* Zero the contents if necessary. If this fails, fall back to
@ -639,12 +639,12 @@ iris_bo_alloc(struct iris_bufmgr *bufmgr,
return NULL;
}
if (bo->gtt_offset == 0ull) {
if (bo->address == 0ull) {
simple_mtx_lock(&bufmgr->lock);
bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, alignment);
bo->address = vma_alloc(bufmgr, memzone, bo->size, alignment);
simple_mtx_unlock(&bufmgr->lock);
if (bo->gtt_offset == 0ull)
if (bo->address == 0ull)
goto err_free;
}
@ -725,10 +725,10 @@ iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
simple_mtx_lock(&bufmgr->lock);
bo->gtt_offset = vma_alloc(bufmgr, memzone, size, 1);
bo->address = vma_alloc(bufmgr, memzone, size, 1);
simple_mtx_unlock(&bufmgr->lock);
if (bo->gtt_offset == 0ull)
if (bo->address == 0ull)
goto err_close;
p_atomic_set(&bo->refcount, 1);
@ -801,7 +801,7 @@ iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
bo->imported = true;
bo->mmap_mode = IRIS_MMAP_WC;
bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
bo->address = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);
_mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
_mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
@ -849,12 +849,12 @@ bo_close(struct iris_bo *bo)
}
if (bo->aux_map_address && bo->bufmgr->aux_map_ctx) {
intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->gtt_offset,
intel_aux_map_unmap_range(bo->bufmgr->aux_map_ctx, bo->address,
bo->size);
}
/* Return the VMA for reuse */
vma_free(bo->bufmgr, bo->gtt_offset, bo->size);
vma_free(bo->bufmgr, bo->address, bo->size);
free(bo);
}
@ -1313,7 +1313,7 @@ iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
* in case. We always align to 64KB even on platforms where we don't need
* to, because it's a fairly reasonable thing to do anyway.
*/
bo->gtt_offset =
bo->address =
vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 64 * 1024);
bo->gem_handle = handle;
@ -1644,7 +1644,7 @@ intel_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
IRIS_MEMZONE_OTHER, 0);
buf->driver_bo = bo;
buf->gpu = bo->gtt_offset;
buf->gpu = bo->address;
buf->gpu_end = buf->gpu + bo->size;
buf->map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
return buf;

View file

@ -153,7 +153,7 @@ struct iris_bo {
* Although each hardware context has its own VMA, we assign BO's to the
* same address in all contexts, for simplicity.
*/
uint64_t gtt_offset;
uint64_t address;
/**
* If non-zero, then this bo has an aux-map translation to this address.
@ -425,8 +425,8 @@ iris_bo_offset_from_base_address(struct iris_bo *bo)
/* This only works for buffers in the memory zones corresponding to a
* base address - the top, unbounded memory zone doesn't have a base.
*/
assert(bo->gtt_offset < IRIS_MEMZONE_OTHER_START);
return bo->gtt_offset;
assert(bo->address < IRIS_MEMZONE_OTHER_START);
return bo->address;
}
/**

View file

@ -349,7 +349,7 @@ iris_update_grid_size_resource(struct iris_context *ice,
state_ref->offset +=
iris_bo_offset_from_base_address(iris_resource_bo(state_ref->res));
isl_buffer_fill_state(&screen->isl_dev, surf_map,
.address = grid_ref->offset + grid_bo->gtt_offset,
.address = grid_ref->offset + grid_bo->address,
.size_B = sizeof(grid->grid),
.format = ISL_FORMAT_RAW,
.stride_B = 1,

View file

@ -55,7 +55,7 @@ __gen_combine_address(struct iris_batch *batch, void *location,
iris_use_pinned_bo(batch, addr.bo,
!iris_domain_is_read_only(addr.access), addr.access);
/* Assume this is a general address, not relative to a base. */
result += addr.bo->gtt_offset;
result += addr.bo->address;
}
return result;

View file

@ -209,7 +209,7 @@ iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
const bool dataport = ssbo || !screen->compiler->indirect_ubos_use_sampler;
isl_buffer_fill_state(&screen->isl_dev, map,
.address = res->bo->gtt_offset + res->offset +
.address = res->bo->address + res->offset +
buf->buffer_offset,
.size_B = buf->buffer_size - res->offset,
.format = dataport ? ISL_FORMAT_RAW
@ -2407,7 +2407,7 @@ iris_get_scratch_surf(struct iris_context *ice,
screen->isl_dev.ss.size, 64);
isl_buffer_fill_state(&screen->isl_dev, map,
.address = scratch_bo->gtt_offset,
.address = scratch_bo->address,
.size_B = scratch_bo->size,
.format = ISL_FORMAT_RAW,
.swizzle = ISL_SWIZZLE_IDENTITY,

View file

@ -165,7 +165,7 @@ iris_upload_shader(struct iris_screen *screen,
memcpy(shader->map, assembly, shader->prog_data->program_size);
struct iris_resource *res = (void *) shader->assembly.res;
uint64_t shader_data_addr = res->bo->gtt_offset +
uint64_t shader_data_addr = res->bo->address +
shader->assembly.offset +
shader->prog_data->const_data_offset;

View file

@ -540,10 +540,10 @@ map_aux_addresses(struct iris_screen *screen, struct iris_resource *res,
res->aux.extra_aux.offset : res->aux.offset;
const uint64_t format_bits =
intel_aux_map_format_bits(res->surf.tiling, format, plane);
intel_aux_map_add_mapping(aux_map_ctx, res->bo->gtt_offset + res->offset,
res->aux.bo->gtt_offset + aux_offset,
intel_aux_map_add_mapping(aux_map_ctx, res->bo->address + res->offset,
res->aux.bo->address + aux_offset,
res->surf.size_B, format_bits);
res->bo->aux_map_address = res->aux.bo->gtt_offset;
res->bo->aux_map_address = res->aux.bo->address;
}
}
@ -1605,7 +1605,7 @@ iris_invalidate_resource(struct pipe_context *ctx,
struct iris_bo *old_bo = res->bo;
struct iris_bo *new_bo =
iris_bo_alloc(screen->bufmgr, res->bo->name, resource->width0, 1,
iris_memzone_for_address(old_bo->gtt_offset), 0);
iris_memzone_for_address(old_bo->address), 0);
if (!new_bo)
return;

View file

@ -204,7 +204,7 @@ struct iris_surface_state {
unsigned num_states;
/**
* Address of the resource (res->bo->gtt_offset). Note that "Surface
* Address of the resource (res->bo->address). Note that "Surface
* Base Address" may be offset from this value.
*/
uint64_t bo_address;

View file

@ -342,7 +342,7 @@ stream_state(struct iris_batch *batch,
iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
iris_record_state_size(batch->state_sizes,
bo->gtt_offset + *out_offset, size);
bo->address + *out_offset, size);
*out_offset += iris_bo_offset_from_base_address(bo);
@ -2106,7 +2106,7 @@ iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage)
struct iris_bo *bo = iris_resource_bo(res);
iris_record_state_size(ice->state.sizes,
bo->gtt_offset + shs->sampler_table.offset, size);
bo->address + shs->sampler_table.offset, size);
shs->sampler_table.offset += iris_bo_offset_from_base_address(bo);
@ -2219,7 +2219,7 @@ fill_buffer_surface_state(struct isl_device *isl_dev,
IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
isl_buffer_fill_state(isl_dev, map,
.address = res->bo->gtt_offset + res->offset + offset,
.address = res->bo->address + res->offset + offset,
.size_B = final_size,
.format = format,
.swizzle = swizzle,
@ -2285,7 +2285,7 @@ update_surface_state_addrs(struct u_upload_mgr *mgr,
struct iris_surface_state *surf_state,
struct iris_bo *bo)
{
if (surf_state->bo_address == bo->gtt_offset)
if (surf_state->bo_address == bo->address)
return false;
STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) % 64 == 0);
@ -2297,14 +2297,14 @@ update_surface_state_addrs(struct u_upload_mgr *mgr,
* the QWord containing Surface Base Address.
*/
for (unsigned i = 0; i < surf_state->num_states; i++) {
*ss_addr = *ss_addr - surf_state->bo_address + bo->gtt_offset;
*ss_addr = *ss_addr - surf_state->bo_address + bo->address;
ss_addr = ((void *) ss_addr) + SURFACE_STATE_ALIGNMENT;
}
/* Next, upload the updated copies to a GPU buffer. */
upload_surface_states(mgr, surf_state);
surf_state->bo_address = bo->gtt_offset;
surf_state->bo_address = bo->address;
return true;
}
@ -2324,7 +2324,7 @@ fill_surface_state(struct isl_device *isl_dev,
.surf = surf,
.view = view,
.mocs = iris_mocs(res->bo, isl_dev, view->usage),
.address = res->bo->gtt_offset + res->offset + extra_main_offset,
.address = res->bo->address + res->offset + extra_main_offset,
.x_offset_sa = tile_x_sa,
.y_offset_sa = tile_y_sa,
};
@ -2334,14 +2334,14 @@ fill_surface_state(struct isl_device *isl_dev,
if (aux_usage != ISL_AUX_USAGE_NONE) {
f.aux_surf = &res->aux.surf;
f.aux_usage = aux_usage;
f.aux_address = res->aux.bo->gtt_offset + res->aux.offset;
f.aux_address = res->aux.bo->address + res->aux.offset;
struct iris_bo *clear_bo = NULL;
uint64_t clear_offset = 0;
f.clear_color =
iris_resource_get_clear_color(res, &clear_bo, &clear_offset);
if (clear_bo) {
f.clear_address = clear_bo->gtt_offset + clear_offset;
f.clear_address = clear_bo->address + clear_offset;
f.use_clear_address = isl_dev->info->ver > 9;
}
}
@ -2385,7 +2385,7 @@ iris_create_sampler_view(struct pipe_context *ctx,
alloc_surface_states(&isv->surface_state, isv->res->aux.sampler_usages);
isv->surface_state.bo_address = isv->res->bo->gtt_offset;
isv->surface_state.bo_address = isv->res->bo->address;
isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
@ -2573,11 +2573,11 @@ iris_create_surface(struct pipe_context *ctx,
alloc_surface_states(&surf->surface_state, res->aux.possible_usages);
surf->surface_state.bo_address = res->bo->gtt_offset;
surf->surface_state.bo_address = res->bo->address;
#if GFX_VER == 8
alloc_surface_states(&surf->surface_state_read, res->aux.possible_usages);
surf->surface_state_read.bo_address = res->bo->gtt_offset;
surf->surface_state_read.bo_address = res->bo->address;
#endif
if (!isl_format_is_compressed(res->surf.format)) {
@ -2640,7 +2640,7 @@ iris_create_surface(struct pipe_context *ctx,
.view = view,
.mocs = iris_mocs(res->bo, &screen->isl_dev,
ISL_SURF_USAGE_RENDER_TARGET_BIT),
.address = res->bo->gtt_offset + offset_B,
.address = res->bo->address + offset_B,
.x_offset_sa = tile_x_el, /* Single-sampled, so el == sa */
.y_offset_sa = tile_y_el, /* Single-sampled, so el == sa */
};
@ -2723,7 +2723,7 @@ iris_set_shader_images(struct pipe_context *ctx,
1 << ISL_AUX_USAGE_NONE;
alloc_surface_states(&iv->surface_state, aux_usages);
iv->surface_state.bo_address = res->bo->gtt_offset;
iv->surface_state.bo_address = res->bo->address;
void *map = iv->surface_state.cpu;
@ -2863,7 +2863,7 @@ iris_set_global_binding(struct pipe_context *ctx,
pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
resources[i]);
struct iris_resource *res = (void *) resources[i];
uint64_t addr = res->bo->gtt_offset;
uint64_t addr = res->bo->address;
memcpy(handles[i], &addr, sizeof(addr));
} else {
pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
@ -3107,7 +3107,7 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
info.depth_surf = &zres->surf;
info.depth_address = zres->bo->gtt_offset + zres->offset;
info.depth_address = zres->bo->address + zres->offset;
info.mocs = iris_mocs(zres->bo, isl_dev, view.usage);
view.format = zres->surf.format;
@ -3115,7 +3115,7 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
if (iris_resource_level_has_hiz(zres, view.base_level)) {
info.hiz_usage = zres->aux.usage;
info.hiz_surf = &zres->aux.surf;
info.hiz_address = zres->aux.bo->gtt_offset + zres->aux.offset;
info.hiz_address = zres->aux.bo->address + zres->aux.offset;
}
ice->state.hiz_usage = info.hiz_usage;
@ -3125,7 +3125,7 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
info.stencil_aux_usage = stencil_res->aux.usage;
info.stencil_surf = &stencil_res->surf;
info.stencil_address = stencil_res->bo->gtt_offset + stencil_res->offset;
info.stencil_address = stencil_res->bo->address + stencil_res->offset;
if (!zres) {
view.format = stencil_res->surf.format;
info.mocs = iris_mocs(stencil_res->bo, isl_dev, view.usage);
@ -3430,7 +3430,7 @@ iris_set_vertex_buffers(struct pipe_context *ctx,
if (res) {
vb.BufferSize = res->base.b.width0 - (int) buffer->buffer_offset;
vb.BufferStartingAddress =
ro_bo(NULL, res->bo->gtt_offset + (int) buffer->buffer_offset);
ro_bo(NULL, res->bo->address + (int) buffer->buffer_offset);
vb.MOCS = iris_mocs(res->bo, &screen->isl_dev,
ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
#if GFX_VER >= 12
@ -3753,7 +3753,7 @@ iris_set_stream_output_targets(struct pipe_context *ctx,
sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
#endif
sob.SurfaceBaseAddress =
rw_bo(NULL, res->bo->gtt_offset + tgt->base.buffer_offset,
rw_bo(NULL, res->bo->address + tgt->base.buffer_offset,
IRIS_DOMAIN_OTHER_WRITE);
sob.SOBufferEnable = true;
sob.StreamOffsetWriteEnable = true;
@ -3762,7 +3762,7 @@ iris_set_stream_output_targets(struct pipe_context *ctx,
sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
sob.StreamOutputBufferOffsetAddress =
rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset +
rw_bo(NULL, iris_resource_bo(tgt->offset.res)->address +
tgt->offset.offset, IRIS_DOMAIN_OTHER_WRITE);
sob.StreamOffset = 0xFFFFFFFF; /* not offset, see above */
}
@ -4658,7 +4658,7 @@ surf_state_update_clear_value(struct iris_batch *batch,
struct isl_device *isl_dev = &batch->screen->isl_dev;
struct iris_bo *state_bo = iris_resource_bo(state->res);
uint64_t real_offset = state->offset + IRIS_MEMZONE_BINDER_START;
uint32_t offset_into_bo = real_offset - state_bo->gtt_offset;
uint32_t offset_into_bo = real_offset - state_bo->address;
uint32_t clear_offset = offset_into_bo +
isl_dev->ss.clear_value_offset +
surf_state_offset_for_aux(res, aux_modes, aux_usage);
@ -4903,7 +4903,7 @@ iris_populate_binding_table(struct iris_context *ice,
struct iris_binding_table *bt = &shader->bt;
UNUSED struct brw_stage_prog_data *prog_data = shader->prog_data;
struct iris_shader_state *shs = &ice->state.shaders[stage];
uint32_t binder_addr = binder->bo->gtt_offset;
uint32_t binder_addr = binder->bo->address;
uint32_t *bt_map = binder->map + binder->bt_offset[stage];
int s = 0;
@ -5059,11 +5059,11 @@ pin_scratch_space(struct iris_context *ice,
iris_use_pinned_bo(batch, iris_resource_bo(ref->res),
false, IRIS_DOMAIN_NONE);
scratch_addr = ref->offset +
iris_resource_bo(ref->res)->gtt_offset -
iris_resource_bo(ref->res)->address -
IRIS_MEMZONE_BINDLESS_START;
assert((scratch_addr & 0x3f) == 0 && scratch_addr < (1 << 26));
#else
scratch_addr = scratch_bo->gtt_offset;
scratch_addr = scratch_bo->address;
#endif
}
@ -5268,7 +5268,7 @@ static void
iris_update_surface_base_address(struct iris_batch *batch,
struct iris_binder *binder)
{
if (batch->last_surface_base_address == binder->bo->gtt_offset)
if (batch->last_surface_base_address == binder->bo->address)
return;
struct isl_device *isl_dev = &batch->screen->isl_dev;
@ -5318,7 +5318,7 @@ iris_update_surface_base_address(struct iris_batch *batch,
flush_after_state_base_change(batch);
iris_batch_sync_region_end(batch);
batch->last_surface_base_address = binder->bo->gtt_offset;
batch->last_surface_base_address = binder->bo->address;
}
static inline void
@ -6281,7 +6281,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
vb.BufferPitch = 0;
vb.BufferSize = res->bo->size - ice->draw.draw_params.offset;
vb.BufferStartingAddress =
ro_bo(NULL, res->bo->gtt_offset +
ro_bo(NULL, res->bo->address +
(int) ice->draw.draw_params.offset);
vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev,
ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
@ -6307,7 +6307,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
vb.BufferSize =
res->bo->size - ice->draw.derived_draw_params.offset;
vb.BufferStartingAddress =
ro_bo(NULL, res->bo->gtt_offset +
ro_bo(NULL, res->bo->address +
(int) ice->draw.derived_draw_params.offset);
vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev,
ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
@ -6350,7 +6350,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
if (res) {
iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_OTHER_READ);
high_bits = res->bo->gtt_offset >> 32ull;
high_bits = res->bo->address >> 32ull;
if (high_bits != ice->state.last_vbo_high_bits[i]) {
flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE |
PIPE_CONTROL_CS_STALL;
@ -6598,7 +6598,7 @@ iris_upload_render_state(struct iris_context *ice,
ib.MOCS = iris_mocs(bo, &batch->screen->isl_dev,
ISL_SURF_USAGE_INDEX_BUFFER_BIT);
ib.BufferSize = bo->size - offset;
ib.BufferStartingAddress = ro_bo(NULL, bo->gtt_offset + offset);
ib.BufferStartingAddress = ro_bo(NULL, bo->address + offset);
#if GFX_VER >= 12
ib.L3BypassDisable = true;
#endif
@ -6612,7 +6612,7 @@ iris_upload_render_state(struct iris_context *ice,
#if GFX_VER < 11
/* The VF cache key only uses 32-bits, see vertex buffer comment above */
uint16_t high_bits = bo->gtt_offset >> 32ull;
uint16_t high_bits = bo->address >> 32ull;
if (high_bits != ice->state.last_index_bo_high_bits) {
iris_emit_pipe_control_flush(batch,
"workaround: VF cache 32-bit key [IB]",
@ -7162,8 +7162,8 @@ iris_rebind_buffer(struct iris_context *ice,
uint64_t *addr = (uint64_t *) &state->state[1];
struct iris_bo *bo = iris_resource_bo(state->resource);
if (*addr != bo->gtt_offset + state->offset) {
*addr = bo->gtt_offset + state->offset;
if (*addr != bo->address + state->offset) {
*addr = bo->address + state->offset;
ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
}
}
@ -7190,8 +7190,8 @@ iris_rebind_buffer(struct iris_context *ice,
struct pipe_stream_output_target *tgt = ice->state.so_target[i];
if (tgt) {
struct iris_bo *bo = iris_resource_bo(tgt->buffer);
if (*addr != bo->gtt_offset + tgt->buffer_offset) {
*addr = bo->gtt_offset + tgt->buffer_offset;
if (*addr != bo->address + tgt->buffer_offset) {
*addr = bo->address + tgt->buffer_offset;
ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
}
}