gallium/radeon: don't allocate HTILE in a separate buffer

Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
This commit is contained in:
Marek Olšák 2017-06-06 23:54:23 +02:00
parent c6451b1209
commit 6940361796
8 changed files with 41 additions and 59 deletions

View file

@ -1393,8 +1393,8 @@ static void evergreen_init_depth_surface(struct r600_context *rctx,
}
/* use htile only for first level */
if (rtex->htile_buffer && !level) {
uint64_t va = rtex->htile_buffer->gpu_address;
if (rtex->htile_offset && !level) {
uint64_t va = rtex->resource.gpu_address + rtex->htile_offset;
surf->db_htile_data_base = va >> 8;
surf->db_htile_surface = S_028ABC_HTILE_WIDTH(1) |
S_028ABC_HTILE_HEIGHT(1) |
@ -1876,7 +1876,7 @@ static void evergreen_emit_db_state(struct r600_context *rctx, struct r600_atom
radeon_set_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
radeon_set_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control);
radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rtex->htile_buffer,
reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, &rtex->resource,
RADEON_USAGE_READWRITE, RADEON_PRIO_HTILE);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, reloc_idx);

View file

@ -444,7 +444,7 @@ static void r600_clear(struct pipe_context *ctx, unsigned buffers,
* disable fast clear for texture array.
*/
/* Only use htile for first level */
if (rtex->htile_buffer && !level &&
if (rtex->htile_offset && !level &&
fb->zsbuf->u.tex.first_layer == 0 &&
fb->zsbuf->u.tex.last_layer == util_max_layer(&rtex->resource.b.b, level)) {
if (rtex->depth_clear_value != depth) {

View file

@ -1061,8 +1061,8 @@ static void r600_init_depth_surface(struct r600_context *rctx,
surf->db_prefetch_limit = (rtex->surface.u.legacy.level[level].nblk_y / 8) - 1;
/* use htile only for first level */
if (rtex->htile_buffer && !level) {
surf->db_htile_data_base = 0;
if (rtex->htile_offset && !level) {
surf->db_htile_data_base = rtex->htile_offset >> 8;
surf->db_htile_surface = S_028D24_HTILE_WIDTH(1) |
S_028D24_HTILE_HEIGHT(1) |
S_028D24_FULL_CACHE(1);
@ -1543,7 +1543,7 @@ static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom
radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value));
radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface);
radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base);
reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rtex->htile_buffer,
reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, &rtex->resource,
RADEON_USAGE_READWRITE, RADEON_PRIO_HTILE);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, reloc_idx);

View file

@ -232,7 +232,7 @@ struct r600_texture {
unsigned last_msaa_resolve_target_micro_mode;
/* Depth buffer compression and fast clear. */
struct r600_resource *htile_buffer;
uint64_t htile_offset;
bool tc_compatible_htile;
bool depth_cleared; /* if it was cleared at least once */
float depth_clear_value;

View file

@ -509,7 +509,7 @@ static void r600_degrade_tile_mode_to_linear(struct r600_common_context *rctx,
rtex->cb_color_info = new_tex->cb_color_info;
rtex->cmask = new_tex->cmask; /* needed even without CMASK */
assert(!rtex->htile_buffer);
assert(!rtex->htile_offset);
assert(!rtex->cmask.size);
assert(!rtex->fmask.size);
assert(!rtex->dcc_offset);
@ -612,7 +612,6 @@ static void r600_texture_destroy(struct pipe_screen *screen,
r600_texture_reference(&rtex->flushed_depth_texture, NULL);
r600_resource_reference(&rtex->htile_buffer, NULL);
if (rtex->cmask_buffer != &rtex->resource) {
r600_resource_reference(&rtex->cmask_buffer, NULL);
}
@ -929,33 +928,14 @@ static void r600_texture_get_htile_size(struct r600_common_screen *rscreen,
static void r600_texture_allocate_htile(struct r600_common_screen *rscreen,
struct r600_texture *rtex)
{
uint32_t clear_value;
if (rscreen->chip_class >= GFX9 || rtex->tc_compatible_htile) {
clear_value = 0x0000030F;
} else {
if (rscreen->chip_class <= VI && !rtex->tc_compatible_htile)
r600_texture_get_htile_size(rscreen, rtex);
clear_value = 0;
}
if (!rtex->surface.htile_size)
return;
rtex->htile_buffer = (struct r600_resource*)
r600_aligned_buffer_create(&rscreen->b,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
rtex->surface.htile_size,
rtex->surface.htile_alignment);
if (rtex->htile_buffer == NULL) {
/* this is not a fatal error as we can still keep rendering
* without htile buffer */
R600_ERR("Failed to create buffer object for htile buffer.\n");
} else {
r600_screen_clear_buffer(rscreen, &rtex->htile_buffer->b.b,
0, rtex->surface.htile_size,
clear_value);
}
rtex->htile_offset = align(rtex->size, rtex->surface.htile_alignment);
rtex->size = rtex->htile_offset + rtex->surface.htile_size;
}
void r600_print_texture_info(struct r600_common_screen *rscreen,
@ -1004,11 +984,12 @@ void r600_print_texture_info(struct r600_common_screen *rscreen,
rtex->surface.u.gfx9.cmask.pipe_aligned);
}
if (rtex->htile_buffer) {
fprintf(f, " HTile: size=%u, alignment=%u, "
if (rtex->htile_offset) {
fprintf(f, " HTile: offset=%"PRIu64", size=%"PRIu64", alignment=%u, "
"rb_aligned=%u, pipe_aligned=%u\n",
rtex->htile_buffer->b.b.width0,
rtex->htile_buffer->buf->alignment,
rtex->htile_offset,
rtex->surface.htile_size,
rtex->surface.htile_alignment,
rtex->surface.u.gfx9.htile.rb_aligned,
rtex->surface.u.gfx9.htile.pipe_aligned);
}
@ -1051,10 +1032,11 @@ void r600_print_texture_info(struct r600_common_screen *rscreen,
rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment,
rtex->cmask.slice_tile_max);
if (rtex->htile_buffer)
fprintf(f, " HTile: size=%u, alignment=%u, TC_compatible = %u\n",
rtex->htile_buffer->b.b.width0,
rtex->htile_buffer->buf->alignment,
if (rtex->htile_offset)
fprintf(f, " HTile: offset=%"PRIu64", size=%"PRIu64", "
"alignment=%u, TC_compatible = %u\n",
rtex->htile_offset, rtex->surface.htile_size,
rtex->surface.htile_alignment,
rtex->tc_compatible_htile);
if (rtex->dcc_offset) {
@ -1242,6 +1224,17 @@ r600_texture_create_object(struct pipe_screen *screen,
rtex->cmask.offset, rtex->cmask.size,
0xCCCCCCCC);
}
if (rtex->htile_offset) {
uint32_t clear_value = 0;
if (rscreen->chip_class >= GFX9 || rtex->tc_compatible_htile)
clear_value = 0x0000030F;
r600_screen_clear_buffer(rscreen, &rtex->resource.b.b,
rtex->htile_offset,
rtex->surface.htile_size,
clear_value);
}
/* Initialize DCC only if the texture is not being imported. */
if (!buf && rtex->dcc_offset) {

View file

@ -726,7 +726,7 @@ static void si_clear(struct pipe_context *ctx, unsigned buffers,
}
}
if (zstex && zstex->htile_buffer &&
if (zstex && zstex->htile_offset &&
zsbuf->u.tex.level == 0 &&
zsbuf->u.tex.first_layer == 0 &&
zsbuf->u.tex.last_layer == util_max_layer(&zstex->resource.b.b, 0)) {

View file

@ -337,13 +337,6 @@ static void si_sampler_view_add_buffer(struct si_context *sctx,
rtex->dcc_separate_buffer, usage,
RADEON_PRIO_DCC, check_mem);
}
if (rtex->htile_buffer &&
rtex->tc_compatible_htile) {
radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
rtex->htile_buffer, usage,
RADEON_PRIO_HTILE, check_mem);
}
}
static void si_sampler_views_begin_new_cs(struct si_context *sctx,
@ -424,7 +417,7 @@ void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
if (sscreen->b.chip_class <= VI)
meta_va += base_level_info->dcc_offset;
} else if (tex->tc_compatible_htile) {
meta_va = tex->htile_buffer->gpu_address;
meta_va = tex->resource.gpu_address + tex->htile_offset;
}
if (meta_va) {

View file

@ -2316,7 +2316,7 @@ static void si_init_depth_surface(struct si_context *sctx,
S_02801C_Y_MAX(rtex->resource.b.b.height0 - 1);
/* Only use HTILE for the first level. */
if (rtex->htile_buffer && !level) {
if (rtex->htile_offset && !level) {
z_info |= S_028038_TILE_SURFACE_ENABLE(1) |
S_028038_ALLOW_EXPCLEAR(1);
@ -2342,7 +2342,8 @@ static void si_init_depth_surface(struct si_context *sctx,
s_info |= S_02803C_TILE_STENCIL_DISABLE(1);
}
surf->db_htile_data_base = rtex->htile_buffer->gpu_address >> 8;
surf->db_htile_data_base = (rtex->resource.gpu_address +
rtex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1) |
S_028ABC_PIPE_ALIGNED(rtex->surface.u.gfx9.htile.pipe_aligned) |
S_028ABC_RB_ALIGNED(rtex->surface.u.gfx9.htile.rb_aligned);
@ -2394,7 +2395,7 @@ static void si_init_depth_surface(struct si_context *sctx,
levelinfo->nblk_y) / 64 - 1);
/* Only use HTILE for the first level. */
if (rtex->htile_buffer && !level) {
if (rtex->htile_offset && !level) {
z_info |= S_028040_TILE_SURFACE_ENABLE(1) |
S_028040_ALLOW_EXPCLEAR(1);
@ -2420,7 +2421,8 @@ static void si_init_depth_surface(struct si_context *sctx,
s_info |= S_028044_TILE_STENCIL_DISABLE(1);
}
surf->db_htile_data_base = rtex->htile_buffer->gpu_address >> 8;
surf->db_htile_data_base = (rtex->resource.gpu_address +
rtex->htile_offset) >> 8;
surf->db_htile_surface = S_028ABC_FULL_CACHE(1);
if (rtex->tc_compatible_htile) {
@ -2815,12 +2817,6 @@ static void si_emit_framebuffer_state(struct si_context *sctx, struct r600_atom
RADEON_PRIO_DEPTH_BUFFER_MSAA :
RADEON_PRIO_DEPTH_BUFFER);
if (zb->db_htile_data_base) {
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
rtex->htile_buffer, RADEON_USAGE_READWRITE,
RADEON_PRIO_HTILE);
}
if (sctx->b.chip_class >= GFX9) {
radeon_set_context_reg_seq(cs, R_028014_DB_HTILE_DATA_BASE, 3);
radeon_emit(cs, zb->db_htile_data_base); /* DB_HTILE_DATA_BASE */