radeonsi: rename r600 -> si in some places

Acked-by: Timothy Arceri <tarceri@itsqueeze.com>
This commit is contained in:
Marek Olšák 2018-04-01 17:52:55 -04:00
parent 50c7aa6756
commit 90d12f1d77
13 changed files with 574 additions and 574 deletions

View file

@ -244,8 +244,8 @@ bool si_alloc_resource(struct si_screen *sscreen,
return true;
}
static void r600_buffer_destroy(struct pipe_screen *screen,
struct pipe_resource *buf)
static void si_buffer_destroy(struct pipe_screen *screen,
struct pipe_resource *buf)
{
struct r600_resource *rbuffer = r600_resource(buf);
@ -331,13 +331,13 @@ static void si_invalidate_resource(struct pipe_context *ctx,
(void)si_invalidate_buffer(sctx, rbuffer);
}
static void *r600_buffer_get_transfer(struct pipe_context *ctx,
struct pipe_resource *resource,
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **ptransfer,
void *data, struct r600_resource *staging,
unsigned offset)
static void *si_buffer_get_transfer(struct pipe_context *ctx,
struct pipe_resource *resource,
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **ptransfer,
void *data, struct r600_resource *staging,
unsigned offset)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_transfer *transfer;
@ -361,12 +361,12 @@ static void *r600_buffer_get_transfer(struct pipe_context *ctx,
return data;
}
static void *r600_buffer_transfer_map(struct pipe_context *ctx,
struct pipe_resource *resource,
unsigned level,
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **ptransfer)
static void *si_buffer_transfer_map(struct pipe_context *ctx,
struct pipe_resource *resource,
unsigned level,
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **ptransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_resource *rbuffer = r600_resource(resource);
@ -459,7 +459,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
if (staging) {
data += box->x % R600_MAP_BUFFER_ALIGNMENT;
return r600_buffer_get_transfer(ctx, resource, usage, box,
return si_buffer_get_transfer(ctx, resource, usage, box,
ptransfer, data, staging, offset);
} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
return NULL;
@ -495,7 +495,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
}
data += box->x % R600_MAP_BUFFER_ALIGNMENT;
return r600_buffer_get_transfer(ctx, resource, usage, box,
return si_buffer_get_transfer(ctx, resource, usage, box,
ptransfer, data, staging, 0);
} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
return NULL;
@ -508,13 +508,13 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
}
data += box->x;
return r600_buffer_get_transfer(ctx, resource, usage, box,
return si_buffer_get_transfer(ctx, resource, usage, box,
ptransfer, data, NULL, 0);
}
static void r600_buffer_do_flush_region(struct pipe_context *ctx,
struct pipe_transfer *transfer,
const struct pipe_box *box)
static void si_buffer_do_flush_region(struct pipe_context *ctx,
struct pipe_transfer *transfer,
const struct pipe_box *box)
{
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct r600_resource *rbuffer = r600_resource(transfer->resource);
@ -538,9 +538,9 @@ static void r600_buffer_do_flush_region(struct pipe_context *ctx,
box->x + box->width);
}
static void r600_buffer_flush_region(struct pipe_context *ctx,
struct pipe_transfer *transfer,
const struct pipe_box *rel_box)
static void si_buffer_flush_region(struct pipe_context *ctx,
struct pipe_transfer *transfer,
const struct pipe_box *rel_box)
{
unsigned required_usage = PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_FLUSH_EXPLICIT;
@ -549,19 +549,19 @@ static void r600_buffer_flush_region(struct pipe_context *ctx,
struct pipe_box box;
u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
r600_buffer_do_flush_region(ctx, transfer, &box);
si_buffer_do_flush_region(ctx, transfer, &box);
}
}
static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
struct pipe_transfer *transfer)
static void si_buffer_transfer_unmap(struct pipe_context *ctx,
struct pipe_transfer *transfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
if (transfer->usage & PIPE_TRANSFER_WRITE &&
!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
si_buffer_do_flush_region(ctx, transfer, &transfer->box);
r600_resource_reference(&rtransfer->staging, NULL);
assert(rtransfer->b.staging == NULL); /* for threaded context only */
@ -582,7 +582,7 @@ static void si_buffer_subdata(struct pipe_context *ctx,
uint8_t *map = NULL;
u_box_1d(offset, size, &box);
map = r600_buffer_transfer_map(ctx, buffer, 0,
map = si_buffer_transfer_map(ctx, buffer, 0,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_DISCARD_RANGE |
usage,
@ -591,21 +591,21 @@ static void si_buffer_subdata(struct pipe_context *ctx,
return;
memcpy(map, data, size);
r600_buffer_transfer_unmap(ctx, transfer);
si_buffer_transfer_unmap(ctx, transfer);
}
static const struct u_resource_vtbl r600_buffer_vtbl =
static const struct u_resource_vtbl si_buffer_vtbl =
{
NULL, /* get_handle */
r600_buffer_destroy, /* resource_destroy */
r600_buffer_transfer_map, /* transfer_map */
r600_buffer_flush_region, /* transfer_flush_region */
r600_buffer_transfer_unmap, /* transfer_unmap */
si_buffer_destroy, /* resource_destroy */
si_buffer_transfer_map, /* transfer_map */
si_buffer_flush_region, /* transfer_flush_region */
si_buffer_transfer_unmap, /* transfer_unmap */
};
static struct r600_resource *
r600_alloc_buffer_struct(struct pipe_screen *screen,
const struct pipe_resource *templ)
si_alloc_buffer_struct(struct pipe_screen *screen,
const struct pipe_resource *templ)
{
struct r600_resource *rbuffer;
@ -616,7 +616,7 @@ r600_alloc_buffer_struct(struct pipe_screen *screen,
pipe_reference_init(&rbuffer->b.b.reference, 1);
rbuffer->b.b.screen = screen;
rbuffer->b.vtbl = &r600_buffer_vtbl;
rbuffer->b.vtbl = &si_buffer_vtbl;
threaded_resource_init(&rbuffer->b.b);
rbuffer->buf = NULL;
@ -631,7 +631,7 @@ static struct pipe_resource *si_buffer_create(struct pipe_screen *screen,
unsigned alignment)
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
struct r600_resource *rbuffer = si_alloc_buffer_struct(screen, templ);
if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
rbuffer->b.b.flags |= R600_RESOURCE_FLAG_UNMAPPABLE;
@ -676,7 +676,7 @@ si_buffer_from_user_memory(struct pipe_screen *screen,
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct radeon_winsys *ws = sscreen->ws;
struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
struct r600_resource *rbuffer = si_alloc_buffer_struct(screen, templ);
rbuffer->domains = RADEON_DOMAIN_GTT;
rbuffer->flags = 0;

View file

@ -84,10 +84,10 @@ static inline void radeon_add_to_buffer_list(struct si_context *sctx,
*
* When this SHOULD NOT be used:
*
* - if r600_context_add_resource_size has been called for the buffer
* - if si_context_add_resource_size has been called for the buffer
* followed by *_need_cs_space for checking the memory usage
*
* - if r600_need_dma_space has been called for the buffer
* - if si_need_dma_space has been called for the buffer
*
* - when emitting state packets and draw packets (because preceding packets
* can't be re-emitted at that point)

View file

@ -77,8 +77,8 @@
p_atomic_inc(&counters->named.field.idle); \
} while (0)
static void r600_update_mmio_counters(struct si_screen *sscreen,
union r600_mmio_counters *counters)
static void si_update_mmio_counters(struct si_screen *sscreen,
union si_mmio_counters *counters)
{
uint32_t value = 0;
bool gui_busy, sdma_busy = false;
@ -129,7 +129,7 @@ static void r600_update_mmio_counters(struct si_screen *sscreen,
#undef UPDATE_COUNTER
static int
r600_gpu_load_thread(void *param)
si_gpu_load_thread(void *param)
{
struct si_screen *sscreen = (struct si_screen*)param;
const int period_us = 1000000 / SAMPLES_PER_SEC;
@ -154,7 +154,7 @@ r600_gpu_load_thread(void *param)
last_time = cur_time;
/* Update the counters. */
r600_update_mmio_counters(sscreen, &sscreen->mmio_counters);
si_update_mmio_counters(sscreen, &sscreen->mmio_counters);
}
p_atomic_dec(&sscreen->gpu_load_stop_thread);
return 0;
@ -170,8 +170,8 @@ void si_gpu_load_kill_thread(struct si_screen *sscreen)
sscreen->gpu_load_thread = 0;
}
static uint64_t r600_read_mmio_counter(struct si_screen *sscreen,
unsigned busy_index)
static uint64_t si_read_mmio_counter(struct si_screen *sscreen,
unsigned busy_index)
{
/* Start the thread if needed. */
if (!sscreen->gpu_load_thread) {
@ -179,7 +179,7 @@ static uint64_t r600_read_mmio_counter(struct si_screen *sscreen,
/* Check again inside the mutex. */
if (!sscreen->gpu_load_thread)
sscreen->gpu_load_thread =
u_thread_create(r600_gpu_load_thread, sscreen);
u_thread_create(si_gpu_load_thread, sscreen);
mtx_unlock(&sscreen->gpu_load_mutex);
}
@ -189,10 +189,10 @@ static uint64_t r600_read_mmio_counter(struct si_screen *sscreen,
return busy | ((uint64_t)idle << 32);
}
static unsigned r600_end_mmio_counter(struct si_screen *sscreen,
uint64_t begin, unsigned busy_index)
static unsigned si_end_mmio_counter(struct si_screen *sscreen,
uint64_t begin, unsigned busy_index)
{
uint64_t end = r600_read_mmio_counter(sscreen, busy_index);
uint64_t end = si_read_mmio_counter(sscreen, busy_index);
unsigned busy = (end & 0xffffffff) - (begin & 0xffffffff);
unsigned idle = (end >> 32) - (begin >> 32);
@ -205,10 +205,10 @@ static unsigned r600_end_mmio_counter(struct si_screen *sscreen,
if (idle || busy) {
return busy*100 / (busy + idle);
} else {
union r600_mmio_counters counters;
union si_mmio_counters counters;
memset(&counters, 0, sizeof(counters));
r600_update_mmio_counters(sscreen, &counters);
si_update_mmio_counters(sscreen, &counters);
return counters.array[busy_index] ? 100 : 0;
}
}
@ -270,12 +270,12 @@ static unsigned busy_index_from_type(struct si_screen *sscreen,
uint64_t si_begin_counter(struct si_screen *sscreen, unsigned type)
{
unsigned busy_index = busy_index_from_type(sscreen, type);
return r600_read_mmio_counter(sscreen, busy_index);
return si_read_mmio_counter(sscreen, busy_index);
}
unsigned si_end_counter(struct si_screen *sscreen, unsigned type,
uint64_t begin)
{
unsigned busy_index = busy_index_from_type(sscreen, type);
return r600_end_mmio_counter(sscreen, begin, busy_index);
return si_end_mmio_counter(sscreen, begin, busy_index);
}

View file

@ -30,11 +30,11 @@
/* Max counters per HW block */
#define R600_QUERY_MAX_COUNTERS 16
static struct r600_perfcounter_block *
lookup_counter(struct r600_perfcounters *pc, unsigned index,
static struct si_perfcounter_block *
lookup_counter(struct si_perfcounters *pc, unsigned index,
unsigned *base_gid, unsigned *sub_index)
{
struct r600_perfcounter_block *block = pc->blocks;
struct si_perfcounter_block *block = pc->blocks;
unsigned bid;
*base_gid = 0;
@ -53,11 +53,11 @@ lookup_counter(struct r600_perfcounters *pc, unsigned index,
return NULL;
}
static struct r600_perfcounter_block *
lookup_group(struct r600_perfcounters *pc, unsigned *index)
static struct si_perfcounter_block *
lookup_group(struct si_perfcounters *pc, unsigned *index)
{
unsigned bid;
struct r600_perfcounter_block *block = pc->blocks;
struct si_perfcounter_block *block = pc->blocks;
for (bid = 0; bid < pc->num_blocks; ++bid, ++block) {
if (*index < block->num_groups)
@ -68,9 +68,9 @@ lookup_group(struct r600_perfcounters *pc, unsigned *index)
return NULL;
}
struct r600_pc_group {
struct r600_pc_group *next;
struct r600_perfcounter_block *block;
struct si_pc_group {
struct si_pc_group *next;
struct si_perfcounter_block *block;
unsigned sub_gid; /* only used during init */
unsigned result_base; /* only used during init */
int se;
@ -79,30 +79,30 @@ struct r600_pc_group {
unsigned selectors[R600_QUERY_MAX_COUNTERS];
};
struct r600_pc_counter {
struct si_pc_counter {
unsigned base;
unsigned qwords;
unsigned stride; /* in uint64s */
};
#define R600_PC_SHADERS_WINDOWING (1 << 31)
#define SI_PC_SHADERS_WINDOWING (1 << 31)
struct r600_query_pc {
struct r600_query_hw b;
struct si_query_pc {
struct si_query_hw b;
unsigned shaders;
unsigned num_counters;
struct r600_pc_counter *counters;
struct r600_pc_group *groups;
struct si_pc_counter *counters;
struct si_pc_group *groups;
};
static void r600_pc_query_destroy(struct si_screen *sscreen,
struct r600_query *rquery)
static void si_pc_query_destroy(struct si_screen *sscreen,
struct si_query *rquery)
{
struct r600_query_pc *query = (struct r600_query_pc *)rquery;
struct si_query_pc *query = (struct si_query_pc *)rquery;
while (query->groups) {
struct r600_pc_group *group = query->groups;
struct si_pc_group *group = query->groups;
query->groups = group->next;
FREE(group);
}
@ -112,21 +112,21 @@ static void r600_pc_query_destroy(struct si_screen *sscreen,
si_query_hw_destroy(sscreen, rquery);
}
static bool r600_pc_query_prepare_buffer(struct si_screen *screen,
struct r600_query_hw *hwquery,
struct r600_resource *buffer)
static bool si_pc_query_prepare_buffer(struct si_screen *screen,
struct si_query_hw *hwquery,
struct r600_resource *buffer)
{
/* no-op */
return true;
}
static void r600_pc_query_emit_start(struct si_context *sctx,
struct r600_query_hw *hwquery,
struct r600_resource *buffer, uint64_t va)
static void si_pc_query_emit_start(struct si_context *sctx,
struct si_query_hw *hwquery,
struct r600_resource *buffer, uint64_t va)
{
struct r600_perfcounters *pc = sctx->screen->perfcounters;
struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
struct r600_pc_group *group;
struct si_perfcounters *pc = sctx->screen->perfcounters;
struct si_query_pc *query = (struct si_query_pc *)hwquery;
struct si_pc_group *group;
int current_se = -1;
int current_instance = -1;
@ -134,7 +134,7 @@ static void r600_pc_query_emit_start(struct si_context *sctx,
pc->emit_shaders(sctx, query->shaders);
for (group = query->groups; group; group = group->next) {
struct r600_perfcounter_block *block = group->block;
struct si_perfcounter_block *block = group->block;
if (group->se != current_se || group->instance != current_instance) {
current_se = group->se;
@ -151,18 +151,18 @@ static void r600_pc_query_emit_start(struct si_context *sctx,
pc->emit_start(sctx, buffer, va);
}
static void r600_pc_query_emit_stop(struct si_context *sctx,
struct r600_query_hw *hwquery,
struct r600_resource *buffer, uint64_t va)
static void si_pc_query_emit_stop(struct si_context *sctx,
struct si_query_hw *hwquery,
struct r600_resource *buffer, uint64_t va)
{
struct r600_perfcounters *pc = sctx->screen->perfcounters;
struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
struct r600_pc_group *group;
struct si_perfcounters *pc = sctx->screen->perfcounters;
struct si_query_pc *query = (struct si_query_pc *)hwquery;
struct si_pc_group *group;
pc->emit_stop(sctx, buffer, va);
for (group = query->groups; group; group = group->next) {
struct r600_perfcounter_block *block = group->block;
struct si_perfcounter_block *block = group->block;
unsigned se = group->se >= 0 ? group->se : 0;
unsigned se_end = se + 1;
@ -185,25 +185,25 @@ static void r600_pc_query_emit_stop(struct si_context *sctx,
pc->emit_instance(sctx, -1, -1);
}
static void r600_pc_query_clear_result(struct r600_query_hw *hwquery,
union pipe_query_result *result)
static void si_pc_query_clear_result(struct si_query_hw *hwquery,
union pipe_query_result *result)
{
struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
struct si_query_pc *query = (struct si_query_pc *)hwquery;
memset(result, 0, sizeof(result->batch[0]) * query->num_counters);
}
static void r600_pc_query_add_result(struct si_screen *sscreen,
struct r600_query_hw *hwquery,
void *buffer,
union pipe_query_result *result)
static void si_pc_query_add_result(struct si_screen *sscreen,
struct si_query_hw *hwquery,
void *buffer,
union pipe_query_result *result)
{
struct r600_query_pc *query = (struct r600_query_pc *)hwquery;
struct si_query_pc *query = (struct si_query_pc *)hwquery;
uint64_t *results = buffer;
unsigned i, j;
for (i = 0; i < query->num_counters; ++i) {
struct r600_pc_counter *counter = &query->counters[i];
struct si_pc_counter *counter = &query->counters[i];
for (j = 0; j < counter->qwords; ++j) {
uint32_t value = results[counter->base + j * counter->stride];
@ -212,27 +212,27 @@ static void r600_pc_query_add_result(struct si_screen *sscreen,
}
}
static struct r600_query_ops batch_query_ops = {
.destroy = r600_pc_query_destroy,
static struct si_query_ops batch_query_ops = {
.destroy = si_pc_query_destroy,
.begin = si_query_hw_begin,
.end = si_query_hw_end,
.get_result = si_query_hw_get_result
};
static struct r600_query_hw_ops batch_query_hw_ops = {
.prepare_buffer = r600_pc_query_prepare_buffer,
.emit_start = r600_pc_query_emit_start,
.emit_stop = r600_pc_query_emit_stop,
.clear_result = r600_pc_query_clear_result,
.add_result = r600_pc_query_add_result,
static struct si_query_hw_ops batch_query_hw_ops = {
.prepare_buffer = si_pc_query_prepare_buffer,
.emit_start = si_pc_query_emit_start,
.emit_stop = si_pc_query_emit_stop,
.clear_result = si_pc_query_clear_result,
.add_result = si_pc_query_add_result,
};
static struct r600_pc_group *get_group_state(struct si_screen *screen,
struct r600_query_pc *query,
struct r600_perfcounter_block *block,
static struct si_pc_group *get_group_state(struct si_screen *screen,
struct si_query_pc *query,
struct si_perfcounter_block *block,
unsigned sub_gid)
{
struct r600_pc_group *group = query->groups;
struct si_pc_group *group = query->groups;
while (group) {
if (group->block == block && group->sub_gid == sub_gid)
@ -240,7 +240,7 @@ static struct r600_pc_group *get_group_state(struct si_screen *screen,
group = group->next;
}
group = CALLOC_STRUCT(r600_pc_group);
group = CALLOC_STRUCT(si_pc_group);
if (!group)
return NULL;
@ -260,9 +260,9 @@ static struct r600_pc_group *get_group_state(struct si_screen *screen,
shaders = screen->perfcounters->shader_type_bits[shader_id];
query_shaders = query->shaders & ~R600_PC_SHADERS_WINDOWING;
query_shaders = query->shaders & ~SI_PC_SHADERS_WINDOWING;
if (query_shaders && query_shaders != shaders) {
fprintf(stderr, "r600_perfcounter: incompatible shader groups\n");
fprintf(stderr, "si_perfcounter: incompatible shader groups\n");
FREE(group);
return NULL;
}
@ -272,7 +272,7 @@ static struct r600_pc_group *get_group_state(struct si_screen *screen,
if (block->flags & R600_PC_BLOCK_SHADER_WINDOWED && !query->shaders) {
// A non-zero value in query->shaders ensures that the shader
// masking is reset unless the user explicitly requests one.
query->shaders = R600_PC_SHADERS_WINDOWING;
query->shaders = SI_PC_SHADERS_WINDOWING;
}
if (block->flags & R600_PC_BLOCK_SE_GROUPS) {
@ -300,17 +300,17 @@ struct pipe_query *si_create_batch_query(struct pipe_context *ctx,
{
struct si_screen *screen =
(struct si_screen *)ctx->screen;
struct r600_perfcounters *pc = screen->perfcounters;
struct r600_perfcounter_block *block;
struct r600_pc_group *group;
struct r600_query_pc *query;
struct si_perfcounters *pc = screen->perfcounters;
struct si_perfcounter_block *block;
struct si_pc_group *group;
struct si_query_pc *query;
unsigned base_gid, sub_gid, sub_index;
unsigned i, j;
if (!pc)
return NULL;
query = CALLOC_STRUCT(r600_query_pc);
query = CALLOC_STRUCT(si_query_pc);
if (!query)
return NULL;
@ -354,7 +354,7 @@ struct pipe_query *si_create_batch_query(struct pipe_context *ctx,
i = 0;
for (group = query->groups; group; group = group->next) {
struct r600_perfcounter_block *block = group->block;
struct si_perfcounter_block *block = group->block;
unsigned read_dw;
unsigned instances = 1;
@ -373,15 +373,15 @@ struct pipe_query *si_create_batch_query(struct pipe_context *ctx,
}
if (query->shaders) {
if (query->shaders == R600_PC_SHADERS_WINDOWING)
if (query->shaders == SI_PC_SHADERS_WINDOWING)
query->shaders = 0xffffffff;
}
/* Map user-supplied query array to result indices */
query->counters = CALLOC(num_queries, sizeof(*query->counters));
for (i = 0; i < num_queries; ++i) {
struct r600_pc_counter *counter = &query->counters[i];
struct r600_perfcounter_block *block;
struct si_pc_counter *counter = &query->counters[i];
struct si_perfcounter_block *block;
block = lookup_counter(pc, query_types[i] - R600_QUERY_FIRST_PERFCOUNTER,
&base_gid, &sub_index);
@ -413,12 +413,12 @@ struct pipe_query *si_create_batch_query(struct pipe_context *ctx,
return (struct pipe_query *)query;
error:
r600_pc_query_destroy(screen, &query->b.b);
si_pc_query_destroy(screen, &query->b.b);
return NULL;
}
static bool r600_init_block_names(struct si_screen *screen,
struct r600_perfcounter_block *block)
static bool si_init_block_names(struct si_screen *screen,
struct si_perfcounter_block *block)
{
unsigned i, j, k;
unsigned groups_shader = 1, groups_se = 1, groups_instance = 1;
@ -505,8 +505,8 @@ int si_get_perfcounter_info(struct si_screen *screen,
unsigned index,
struct pipe_driver_query_info *info)
{
struct r600_perfcounters *pc = screen->perfcounters;
struct r600_perfcounter_block *block;
struct si_perfcounters *pc = screen->perfcounters;
struct si_perfcounter_block *block;
unsigned base_gid, sub;
if (!pc)
@ -528,7 +528,7 @@ int si_get_perfcounter_info(struct si_screen *screen,
return 0;
if (!block->selector_names) {
if (!r600_init_block_names(screen, block))
if (!si_init_block_names(screen, block))
return 0;
}
info->name = block->selector_names + sub * block->selector_name_stride;
@ -547,8 +547,8 @@ int si_get_perfcounter_group_info(struct si_screen *screen,
unsigned index,
struct pipe_driver_query_group_info *info)
{
struct r600_perfcounters *pc = screen->perfcounters;
struct r600_perfcounter_block *block;
struct si_perfcounters *pc = screen->perfcounters;
struct si_perfcounter_block *block;
if (!pc)
return 0;
@ -561,7 +561,7 @@ int si_get_perfcounter_group_info(struct si_screen *screen,
return 0;
if (!block->group_names) {
if (!r600_init_block_names(screen, block))
if (!si_init_block_names(screen, block))
return 0;
}
info->name = block->group_names + index * block->group_name_stride;
@ -576,10 +576,10 @@ void si_perfcounters_destroy(struct si_screen *sscreen)
sscreen->perfcounters->cleanup(sscreen);
}
bool si_perfcounters_init(struct r600_perfcounters *pc,
bool si_perfcounters_init(struct si_perfcounters *pc,
unsigned num_blocks)
{
pc->blocks = CALLOC(num_blocks, sizeof(struct r600_perfcounter_block));
pc->blocks = CALLOC(num_blocks, sizeof(struct si_perfcounter_block));
if (!pc->blocks)
return false;
@ -590,12 +590,12 @@ bool si_perfcounters_init(struct r600_perfcounters *pc,
}
void si_perfcounters_add_block(struct si_screen *sscreen,
struct r600_perfcounters *pc,
struct si_perfcounters *pc,
const char *name, unsigned flags,
unsigned counters, unsigned selectors,
unsigned instances, void *data)
{
struct r600_perfcounter_block *block = &pc->blocks[pc->num_blocks];
struct si_perfcounter_block *block = &pc->blocks[pc->num_blocks];
assert(counters <= R600_QUERY_MAX_COUNTERS);
@ -626,7 +626,7 @@ void si_perfcounters_add_block(struct si_screen *sscreen,
pc->num_groups += block->num_groups;
}
void si_perfcounters_do_destroy(struct r600_perfcounters *pc)
void si_perfcounters_do_destroy(struct si_perfcounters *pc)
{
unsigned i;

View file

@ -32,7 +32,7 @@
* pipe_context
*/
static enum pipe_reset_status r600_get_reset_status(struct pipe_context *ctx)
static enum pipe_reset_status si_get_reset_status(struct pipe_context *ctx)
{
struct si_context *sctx = (struct si_context *)ctx;
unsigned latest = sctx->b.ws->query_value(sctx->b.ws,
@ -45,7 +45,7 @@ static enum pipe_reset_status r600_get_reset_status(struct pipe_context *ctx)
return PIPE_UNKNOWN_CONTEXT_RESET;
}
static void r600_set_device_reset_callback(struct pipe_context *ctx,
static void si_set_device_reset_callback(struct pipe_context *ctx,
const struct pipe_device_reset_callback *cb)
{
struct si_context *sctx = (struct si_context *)ctx;
@ -75,10 +75,10 @@ bool si_check_device_reset(struct si_context *sctx)
return true;
}
static bool r600_resource_commit(struct pipe_context *pctx,
struct pipe_resource *resource,
unsigned level, struct pipe_box *box,
bool commit)
static bool si_resource_commit(struct pipe_context *pctx,
struct pipe_resource *resource,
unsigned level, struct pipe_box *box,
bool commit)
{
struct si_context *ctx = (struct si_context *)pctx;
struct r600_resource *res = r600_resource(resource);
@ -122,16 +122,16 @@ bool si_common_context_init(struct si_context *sctx,
sctx->b.family = sscreen->info.family;
sctx->b.chip_class = sscreen->info.chip_class;
sctx->b.b.resource_commit = r600_resource_commit;
sctx->b.b.resource_commit = si_resource_commit;
if (sscreen->info.drm_major == 2 && sscreen->info.drm_minor >= 43) {
sctx->b.b.get_device_reset_status = r600_get_reset_status;
sctx->b.b.get_device_reset_status = si_get_reset_status;
sctx->b.gpu_reset_counter =
sctx->b.ws->query_value(sctx->b.ws,
RADEON_GPU_RESET_COUNTER);
}
sctx->b.b.set_device_reset_callback = r600_set_device_reset_callback;
sctx->b.b.set_device_reset_callback = si_set_device_reset_callback;
si_init_context_texture_functions(sctx);
si_init_query_functions(sctx);

View file

@ -127,9 +127,9 @@ enum {
#define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024
struct r600_perfcounters;
struct si_perfcounters;
struct tgsi_shader_info;
struct r600_qbo_state;
struct si_qbo_state;
/* Only 32-bit buffer allocations are supported, gallium doesn't support more
* at the moment.
@ -314,42 +314,42 @@ struct r600_surface {
unsigned db_htile_surface;
};
struct r600_mmio_counter {
struct si_mmio_counter {
unsigned busy;
unsigned idle;
};
union r600_mmio_counters {
union si_mmio_counters {
struct {
/* For global GPU load including SDMA. */
struct r600_mmio_counter gpu;
struct si_mmio_counter gpu;
/* GRBM_STATUS */
struct r600_mmio_counter spi;
struct r600_mmio_counter gui;
struct r600_mmio_counter ta;
struct r600_mmio_counter gds;
struct r600_mmio_counter vgt;
struct r600_mmio_counter ia;
struct r600_mmio_counter sx;
struct r600_mmio_counter wd;
struct r600_mmio_counter bci;
struct r600_mmio_counter sc;
struct r600_mmio_counter pa;
struct r600_mmio_counter db;
struct r600_mmio_counter cp;
struct r600_mmio_counter cb;
struct si_mmio_counter spi;
struct si_mmio_counter gui;
struct si_mmio_counter ta;
struct si_mmio_counter gds;
struct si_mmio_counter vgt;
struct si_mmio_counter ia;
struct si_mmio_counter sx;
struct si_mmio_counter wd;
struct si_mmio_counter bci;
struct si_mmio_counter sc;
struct si_mmio_counter pa;
struct si_mmio_counter db;
struct si_mmio_counter cp;
struct si_mmio_counter cb;
/* SRBM_STATUS2 */
struct r600_mmio_counter sdma;
struct si_mmio_counter sdma;
/* CP_STAT */
struct r600_mmio_counter pfp;
struct r600_mmio_counter meq;
struct r600_mmio_counter me;
struct r600_mmio_counter surf_sync;
struct r600_mmio_counter cp_dma;
struct r600_mmio_counter scratch_ram;
struct si_mmio_counter pfp;
struct si_mmio_counter meq;
struct si_mmio_counter me;
struct si_mmio_counter surf_sync;
struct si_mmio_counter cp_dma;
struct si_mmio_counter scratch_ram;
} named;
unsigned array[0];
};

View file

@ -33,9 +33,9 @@
#include "tgsi/tgsi_text.h"
#include "amd/common/sid.h"
#define R600_MAX_STREAMS 4
#define SI_MAX_STREAMS 4
struct r600_hw_query_params {
struct si_hw_query_params {
unsigned start_offset;
unsigned end_offset;
unsigned fence_offset;
@ -44,8 +44,8 @@ struct r600_hw_query_params {
};
/* Queries without buffer handling or suspend/resume. */
struct r600_query_sw {
struct r600_query b;
struct si_query_sw {
struct si_query b;
uint64_t begin_result;
uint64_t end_result;
@ -57,10 +57,10 @@ struct r600_query_sw {
struct pipe_fence_handle *fence;
};
static void r600_query_sw_destroy(struct si_screen *sscreen,
struct r600_query *rquery)
static void si_query_sw_destroy(struct si_screen *sscreen,
struct si_query *rquery)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
struct si_query_sw *query = (struct si_query_sw *)rquery;
sscreen->b.fence_reference(&sscreen->b, &query->fence, NULL);
FREE(query);
@ -93,10 +93,10 @@ static enum radeon_value_id winsys_id_from_type(unsigned type)
}
}
static bool r600_query_sw_begin(struct si_context *sctx,
struct r600_query *rquery)
static bool si_query_sw_begin(struct si_context *sctx,
struct si_query *rquery)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
struct si_query_sw *query = (struct si_query_sw *)rquery;
enum radeon_value_id ws_id;
switch(query->b.type) {
@ -245,16 +245,16 @@ static bool r600_query_sw_begin(struct si_context *sctx,
case R600_QUERY_GPIN_NUM_SE:
break;
default:
unreachable("r600_query_sw_begin: bad query type");
unreachable("si_query_sw_begin: bad query type");
}
return true;
}
static bool r600_query_sw_end(struct si_context *sctx,
struct r600_query *rquery)
static bool si_query_sw_end(struct si_context *sctx,
struct si_query *rquery)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
struct si_query_sw *query = (struct si_query_sw *)rquery;
enum radeon_value_id ws_id;
switch(query->b.type) {
@ -407,18 +407,18 @@ static bool r600_query_sw_end(struct si_context *sctx,
case R600_QUERY_GPIN_NUM_SE:
break;
default:
unreachable("r600_query_sw_end: bad query type");
unreachable("si_query_sw_end: bad query type");
}
return true;
}
static bool r600_query_sw_get_result(struct si_context *sctx,
struct r600_query *rquery,
bool wait,
union pipe_query_result *result)
static bool si_query_sw_get_result(struct si_context *sctx,
struct si_query *rquery,
bool wait,
union pipe_query_result *result)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
struct si_query_sw *query = (struct si_query_sw *)rquery;
switch (query->b.type) {
case PIPE_QUERY_TIMESTAMP_DISJOINT:
@ -479,19 +479,19 @@ static bool r600_query_sw_get_result(struct si_context *sctx,
}
static struct r600_query_ops sw_query_ops = {
.destroy = r600_query_sw_destroy,
.begin = r600_query_sw_begin,
.end = r600_query_sw_end,
.get_result = r600_query_sw_get_result,
static struct si_query_ops sw_query_ops = {
.destroy = si_query_sw_destroy,
.begin = si_query_sw_begin,
.end = si_query_sw_end,
.get_result = si_query_sw_get_result,
.get_result_resource = NULL
};
static struct pipe_query *r600_query_sw_create(unsigned query_type)
static struct pipe_query *si_query_sw_create(unsigned query_type)
{
struct r600_query_sw *query;
struct si_query_sw *query;
query = CALLOC_STRUCT(r600_query_sw);
query = CALLOC_STRUCT(si_query_sw);
if (!query)
return NULL;
@ -502,14 +502,14 @@ static struct pipe_query *r600_query_sw_create(unsigned query_type)
}
void si_query_hw_destroy(struct si_screen *sscreen,
struct r600_query *rquery)
struct si_query *rquery)
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
struct r600_query_buffer *prev = query->buffer.previous;
struct si_query_hw *query = (struct si_query_hw *)rquery;
struct si_query_buffer *prev = query->buffer.previous;
/* Release all query buffers. */
while (prev) {
struct r600_query_buffer *qbuf = prev;
struct si_query_buffer *qbuf = prev;
prev = prev->previous;
r600_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
@ -520,8 +520,8 @@ void si_query_hw_destroy(struct si_screen *sscreen,
FREE(rquery);
}
static struct r600_resource *r600_new_query_buffer(struct si_screen *sscreen,
struct r600_query_hw *query)
static struct r600_resource *si_new_query_buffer(struct si_screen *sscreen,
struct si_query_hw *query)
{
unsigned buf_size = MAX2(query->result_size,
sscreen->info.min_alloc_size);
@ -544,9 +544,9 @@ static struct r600_resource *r600_new_query_buffer(struct si_screen *sscreen,
return buf;
}
static bool r600_query_hw_prepare_buffer(struct si_screen *sscreen,
struct r600_query_hw *query,
struct r600_resource *buffer)
static bool si_query_hw_prepare_buffer(struct si_screen *sscreen,
struct si_query_hw *query,
struct r600_resource *buffer)
{
/* Callers ensure that the buffer is currently unused by the GPU. */
uint32_t *results = sscreen->ws->buffer_map(buffer->buf, NULL,
@ -581,59 +581,59 @@ static bool r600_query_hw_prepare_buffer(struct si_screen *sscreen,
return true;
}
static void r600_query_hw_get_result_resource(struct si_context *sctx,
struct r600_query *rquery,
bool wait,
enum pipe_query_value_type result_type,
int index,
struct pipe_resource *resource,
unsigned offset);
static void si_query_hw_get_result_resource(struct si_context *sctx,
struct si_query *rquery,
bool wait,
enum pipe_query_value_type result_type,
int index,
struct pipe_resource *resource,
unsigned offset);
static struct r600_query_ops query_hw_ops = {
static struct si_query_ops query_hw_ops = {
.destroy = si_query_hw_destroy,
.begin = si_query_hw_begin,
.end = si_query_hw_end,
.get_result = si_query_hw_get_result,
.get_result_resource = r600_query_hw_get_result_resource,
.get_result_resource = si_query_hw_get_result_resource,
};
static void r600_query_hw_do_emit_start(struct si_context *sctx,
struct r600_query_hw *query,
struct r600_resource *buffer,
uint64_t va);
static void r600_query_hw_do_emit_stop(struct si_context *sctx,
struct r600_query_hw *query,
struct r600_resource *buffer,
uint64_t va);
static void r600_query_hw_add_result(struct si_screen *sscreen,
struct r600_query_hw *, void *buffer,
union pipe_query_result *result);
static void r600_query_hw_clear_result(struct r600_query_hw *,
union pipe_query_result *);
static void si_query_hw_do_emit_start(struct si_context *sctx,
struct si_query_hw *query,
struct r600_resource *buffer,
uint64_t va);
static void si_query_hw_do_emit_stop(struct si_context *sctx,
struct si_query_hw *query,
struct r600_resource *buffer,
uint64_t va);
static void si_query_hw_add_result(struct si_screen *sscreen,
struct si_query_hw *, void *buffer,
union pipe_query_result *result);
static void si_query_hw_clear_result(struct si_query_hw *,
union pipe_query_result *);
static struct r600_query_hw_ops query_hw_default_hw_ops = {
.prepare_buffer = r600_query_hw_prepare_buffer,
.emit_start = r600_query_hw_do_emit_start,
.emit_stop = r600_query_hw_do_emit_stop,
.clear_result = r600_query_hw_clear_result,
.add_result = r600_query_hw_add_result,
static struct si_query_hw_ops query_hw_default_hw_ops = {
.prepare_buffer = si_query_hw_prepare_buffer,
.emit_start = si_query_hw_do_emit_start,
.emit_stop = si_query_hw_do_emit_stop,
.clear_result = si_query_hw_clear_result,
.add_result = si_query_hw_add_result,
};
bool si_query_hw_init(struct si_screen *sscreen,
struct r600_query_hw *query)
struct si_query_hw *query)
{
query->buffer.buf = r600_new_query_buffer(sscreen, query);
query->buffer.buf = si_new_query_buffer(sscreen, query);
if (!query->buffer.buf)
return false;
return true;
}
static struct pipe_query *r600_query_hw_create(struct si_screen *sscreen,
unsigned query_type,
unsigned index)
static struct pipe_query *si_query_hw_create(struct si_screen *sscreen,
unsigned query_type,
unsigned index)
{
struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
struct si_query_hw *query = CALLOC_STRUCT(si_query_hw);
if (!query)
return NULL;
@ -669,8 +669,8 @@ static struct pipe_query *r600_query_hw_create(struct si_screen *sscreen,
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
query->result_size = 32 * R600_MAX_STREAMS;
query->num_cs_dw_end = 6 * R600_MAX_STREAMS;
query->result_size = 32 * SI_MAX_STREAMS;
query->num_cs_dw_end = 6 * SI_MAX_STREAMS;
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
/* 11 values on GCN. */
@ -692,8 +692,8 @@ static struct pipe_query *r600_query_hw_create(struct si_screen *sscreen,
return (struct pipe_query *)query;
}
static void r600_update_occlusion_query_state(struct si_context *sctx,
unsigned type, int diff)
static void si_update_occlusion_query_state(struct si_context *sctx,
unsigned type, int diff)
{
if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
type == PIPE_QUERY_OCCLUSION_PREDICATE ||
@ -740,8 +740,8 @@ static void emit_sample_streamout(struct radeon_winsys_cs *cs, uint64_t va,
radeon_emit(cs, va >> 32);
}
static void r600_query_hw_do_emit_start(struct si_context *sctx,
struct r600_query_hw *query,
static void si_query_hw_do_emit_start(struct si_context *sctx,
struct si_query_hw *query,
struct r600_resource *buffer,
uint64_t va)
{
@ -763,7 +763,7 @@ static void r600_query_hw_do_emit_start(struct si_context *sctx,
emit_sample_streamout(cs, va, query->stream);
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream)
emit_sample_streamout(cs, va + 32 * stream, stream);
break;
case PIPE_QUERY_TIME_ELAPSED:
@ -792,26 +792,26 @@ static void r600_query_hw_do_emit_start(struct si_context *sctx,
RADEON_PRIO_QUERY);
}
static void r600_query_hw_emit_start(struct si_context *sctx,
struct r600_query_hw *query)
static void si_query_hw_emit_start(struct si_context *sctx,
struct si_query_hw *query)
{
uint64_t va;
if (!query->buffer.buf)
return; // previous buffer allocation failure
r600_update_occlusion_query_state(sctx, query->b.type, 1);
si_update_occlusion_query_state(sctx, query->b.type, 1);
si_update_prims_generated_query_state(sctx, query->b.type, 1);
si_need_gfx_cs_space(sctx);
/* Get a new query buffer if needed. */
if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
struct si_query_buffer *qbuf = MALLOC_STRUCT(si_query_buffer);
*qbuf = query->buffer;
query->buffer.results_end = 0;
query->buffer.previous = qbuf;
query->buffer.buf = r600_new_query_buffer(sctx->screen, query);
query->buffer.buf = si_new_query_buffer(sctx->screen, query);
if (!query->buffer.buf)
return;
}
@ -824,8 +824,8 @@ static void r600_query_hw_emit_start(struct si_context *sctx,
sctx->b.num_cs_dw_queries_suspend += query->num_cs_dw_end;
}
static void r600_query_hw_do_emit_stop(struct si_context *sctx,
struct r600_query_hw *query,
static void si_query_hw_do_emit_stop(struct si_context *sctx,
struct si_query_hw *query,
struct r600_resource *buffer,
uint64_t va)
{
@ -853,7 +853,7 @@ static void r600_query_hw_do_emit_stop(struct si_context *sctx,
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
va += 16;
for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream)
for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream)
emit_sample_streamout(cs, va + 32 * stream, stream);
break;
case PIPE_QUERY_TIME_ELAPSED:
@ -890,8 +890,8 @@ static void r600_query_hw_do_emit_stop(struct si_context *sctx,
query->b.type);
}
static void r600_query_hw_emit_stop(struct si_context *sctx,
struct r600_query_hw *query)
static void si_query_hw_emit_stop(struct si_context *sctx,
struct si_query_hw *query)
{
uint64_t va;
@ -912,7 +912,7 @@ static void r600_query_hw_emit_stop(struct si_context *sctx,
if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
sctx->b.num_cs_dw_queries_suspend -= query->num_cs_dw_end;
r600_update_occlusion_query_state(sctx, query->b.type, -1);
si_update_occlusion_query_state(sctx, query->b.type, -1);
si_update_prims_generated_query_state(sctx, query->b.type, -1);
}
@ -936,11 +936,11 @@ static void emit_set_predicate(struct si_context *ctx,
RADEON_PRIO_QUERY);
}
static void r600_emit_query_predication(struct si_context *ctx,
struct r600_atom *atom)
static void si_emit_query_predication(struct si_context *ctx,
struct r600_atom *atom)
{
struct r600_query_hw *query = (struct r600_query_hw *)ctx->b.render_cond;
struct r600_query_buffer *qbuf;
struct si_query_hw *query = (struct si_query_hw *)ctx->b.render_cond;
struct si_query_buffer *qbuf;
uint32_t op;
bool flag_wait, invert;
@ -1001,7 +1001,7 @@ static void r600_emit_query_predication(struct si_context *ctx,
uint64_t va = va_base + results_base;
if (query->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE) {
for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
emit_set_predicate(ctx, qbuf->buf, va + 32 * stream, op);
/* set CONTINUE bit for all packets except the first */
@ -1017,7 +1017,7 @@ static void r600_emit_query_predication(struct si_context *ctx,
}
}
static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
static struct pipe_query *si_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
{
struct si_screen *sscreen =
(struct si_screen *)ctx->screen;
@ -1025,36 +1025,36 @@ static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned q
if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
query_type == PIPE_QUERY_GPU_FINISHED ||
query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
return r600_query_sw_create(query_type);
return si_query_sw_create(query_type);
return r600_query_hw_create(sscreen, query_type, index);
return si_query_hw_create(sscreen, query_type, index);
}
static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
static void si_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
{
struct si_context *sctx = (struct si_context *)ctx;
struct r600_query *rquery = (struct r600_query *)query;
struct si_query *rquery = (struct si_query *)query;
rquery->ops->destroy(sctx->screen, rquery);
}
static boolean r600_begin_query(struct pipe_context *ctx,
static boolean si_begin_query(struct pipe_context *ctx,
struct pipe_query *query)
{
struct si_context *sctx = (struct si_context *)ctx;
struct r600_query *rquery = (struct r600_query *)query;
struct si_query *rquery = (struct si_query *)query;
return rquery->ops->begin(sctx, rquery);
}
void si_query_hw_reset_buffers(struct si_context *sctx,
struct r600_query_hw *query)
struct si_query_hw *query)
{
struct r600_query_buffer *prev = query->buffer.previous;
struct si_query_buffer *prev = query->buffer.previous;
/* Discard the old query buffers. */
while (prev) {
struct r600_query_buffer *qbuf = prev;
struct si_query_buffer *qbuf = prev;
prev = prev->previous;
r600_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
@ -1067,7 +1067,7 @@ void si_query_hw_reset_buffers(struct si_context *sctx,
if (si_rings_is_buffer_referenced(sctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
!sctx->b.ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
r600_resource_reference(&query->buffer.buf, NULL);
query->buffer.buf = r600_new_query_buffer(sctx->screen, query);
query->buffer.buf = si_new_query_buffer(sctx->screen, query);
} else {
if (!query->ops->prepare_buffer(sctx->screen, query, query->buffer.buf))
r600_resource_reference(&query->buffer.buf, NULL);
@ -1075,9 +1075,9 @@ void si_query_hw_reset_buffers(struct si_context *sctx,
}
bool si_query_hw_begin(struct si_context *sctx,
struct r600_query *rquery)
struct si_query *rquery)
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
struct si_query_hw *query = (struct si_query_hw *)rquery;
if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
assert(0);
@ -1089,7 +1089,7 @@ bool si_query_hw_begin(struct si_context *sctx,
r600_resource_reference(&query->workaround_buf, NULL);
r600_query_hw_emit_start(sctx, query);
si_query_hw_emit_start(sctx, query);
if (!query->buffer.buf)
return false;
@ -1097,23 +1097,23 @@ bool si_query_hw_begin(struct si_context *sctx,
return true;
}
static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
static bool si_end_query(struct pipe_context *ctx, struct pipe_query *query)
{
struct si_context *sctx = (struct si_context *)ctx;
struct r600_query *rquery = (struct r600_query *)query;
struct si_query *rquery = (struct si_query *)query;
return rquery->ops->end(sctx, rquery);
}
bool si_query_hw_end(struct si_context *sctx,
struct r600_query *rquery)
struct si_query *rquery)
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
struct si_query_hw *query = (struct si_query_hw *)rquery;
if (query->flags & R600_QUERY_HW_FLAG_NO_START)
si_query_hw_reset_buffers(sctx, query);
r600_query_hw_emit_stop(sctx, query);
si_query_hw_emit_stop(sctx, query);
if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
LIST_DELINIT(&query->list);
@ -1124,9 +1124,9 @@ bool si_query_hw_end(struct si_context *sctx,
return true;
}
static void r600_get_hw_query_params(struct si_context *sctx,
struct r600_query_hw *rquery, int index,
struct r600_hw_query_params *params)
static void si_get_hw_query_params(struct si_context *sctx,
struct si_query_hw *rquery, int index,
struct si_hw_query_params *params)
{
unsigned max_rbs = sctx->screen->info.num_render_backends;
@ -1169,7 +1169,7 @@ static void r600_get_hw_query_params(struct si_context *sctx,
params->fence_offset = params->end_offset + 4;
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
params->pair_count = R600_MAX_STREAMS;
params->pair_count = SI_MAX_STREAMS;
params->pair_stride = 32;
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
params->start_offset = 0;
@ -1191,12 +1191,12 @@ static void r600_get_hw_query_params(struct si_context *sctx,
break;
}
default:
unreachable("r600_get_hw_query_params unsupported");
unreachable("si_get_hw_query_params unsupported");
}
}
static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
bool test_status_bit)
static unsigned si_query_read_result(void *map, unsigned start_index, unsigned end_index,
bool test_status_bit)
{
uint32_t *current_result = (uint32_t*)map;
uint64_t start, end;
@ -1213,8 +1213,8 @@ static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned
return 0;
}
static void r600_query_hw_add_result(struct si_screen *sscreen,
struct r600_query_hw *query,
static void si_query_hw_add_result(struct si_screen *sscreen,
struct si_query_hw *query,
void *buffer,
union pipe_query_result *result)
{
@ -1225,7 +1225,7 @@ static void r600_query_hw_add_result(struct si_screen *sscreen,
for (unsigned i = 0; i < max_rbs; ++i) {
unsigned results_base = i * 16;
result->u64 +=
r600_query_read_result(buffer + results_base, 0, 2, true);
si_query_read_result(buffer + results_base, 0, 2, true);
}
break;
}
@ -1234,12 +1234,12 @@ static void r600_query_hw_add_result(struct si_screen *sscreen,
for (unsigned i = 0; i < max_rbs; ++i) {
unsigned results_base = i * 16;
result->b = result->b ||
r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
si_query_read_result(buffer + results_base, 0, 2, true) != 0;
}
break;
}
case PIPE_QUERY_TIME_ELAPSED:
result->u64 += r600_query_read_result(buffer, 0, 2, false);
result->u64 += si_query_read_result(buffer, 0, 2, false);
break;
case PIPE_QUERY_TIMESTAMP:
result->u64 = *(uint64_t*)buffer;
@ -1251,54 +1251,54 @@ static void r600_query_hw_add_result(struct si_screen *sscreen,
* u64 PrimitiveStorageNeeded;
* }
* We only need NumPrimitivesWritten here. */
result->u64 += r600_query_read_result(buffer, 2, 6, true);
result->u64 += si_query_read_result(buffer, 2, 6, true);
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
/* Here we read PrimitiveStorageNeeded. */
result->u64 += r600_query_read_result(buffer, 0, 4, true);
result->u64 += si_query_read_result(buffer, 0, 4, true);
break;
case PIPE_QUERY_SO_STATISTICS:
result->so_statistics.num_primitives_written +=
r600_query_read_result(buffer, 2, 6, true);
si_query_read_result(buffer, 2, 6, true);
result->so_statistics.primitives_storage_needed +=
r600_query_read_result(buffer, 0, 4, true);
si_query_read_result(buffer, 0, 4, true);
break;
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
result->b = result->b ||
r600_query_read_result(buffer, 2, 6, true) !=
r600_query_read_result(buffer, 0, 4, true);
si_query_read_result(buffer, 2, 6, true) !=
si_query_read_result(buffer, 0, 4, true);
break;
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
for (unsigned stream = 0; stream < R600_MAX_STREAMS; ++stream) {
for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
result->b = result->b ||
r600_query_read_result(buffer, 2, 6, true) !=
r600_query_read_result(buffer, 0, 4, true);
si_query_read_result(buffer, 2, 6, true) !=
si_query_read_result(buffer, 0, 4, true);
buffer = (char *)buffer + 32;
}
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
result->pipeline_statistics.ps_invocations +=
r600_query_read_result(buffer, 0, 22, false);
si_query_read_result(buffer, 0, 22, false);
result->pipeline_statistics.c_primitives +=
r600_query_read_result(buffer, 2, 24, false);
si_query_read_result(buffer, 2, 24, false);
result->pipeline_statistics.c_invocations +=
r600_query_read_result(buffer, 4, 26, false);
si_query_read_result(buffer, 4, 26, false);
result->pipeline_statistics.vs_invocations +=
r600_query_read_result(buffer, 6, 28, false);
si_query_read_result(buffer, 6, 28, false);
result->pipeline_statistics.gs_invocations +=
r600_query_read_result(buffer, 8, 30, false);
si_query_read_result(buffer, 8, 30, false);
result->pipeline_statistics.gs_primitives +=
r600_query_read_result(buffer, 10, 32, false);
si_query_read_result(buffer, 10, 32, false);
result->pipeline_statistics.ia_primitives +=
r600_query_read_result(buffer, 12, 34, false);
si_query_read_result(buffer, 12, 34, false);
result->pipeline_statistics.ia_vertices +=
r600_query_read_result(buffer, 14, 36, false);
si_query_read_result(buffer, 14, 36, false);
result->pipeline_statistics.hs_invocations +=
r600_query_read_result(buffer, 16, 38, false);
si_query_read_result(buffer, 16, 38, false);
result->pipeline_statistics.ds_invocations +=
r600_query_read_result(buffer, 18, 40, false);
si_query_read_result(buffer, 18, 40, false);
result->pipeline_statistics.cs_invocations +=
r600_query_read_result(buffer, 20, 42, false);
si_query_read_result(buffer, 20, 42, false);
#if 0 /* for testing */
printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
"DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, "
@ -1321,44 +1321,44 @@ static void r600_query_hw_add_result(struct si_screen *sscreen,
}
}
static boolean r600_get_query_result(struct pipe_context *ctx,
struct pipe_query *query, boolean wait,
union pipe_query_result *result)
static boolean si_get_query_result(struct pipe_context *ctx,
struct pipe_query *query, boolean wait,
union pipe_query_result *result)
{
struct si_context *sctx = (struct si_context *)ctx;
struct r600_query *rquery = (struct r600_query *)query;
struct si_query *rquery = (struct si_query *)query;
return rquery->ops->get_result(sctx, rquery, wait, result);
}
static void r600_get_query_result_resource(struct pipe_context *ctx,
struct pipe_query *query,
boolean wait,
enum pipe_query_value_type result_type,
int index,
struct pipe_resource *resource,
unsigned offset)
static void si_get_query_result_resource(struct pipe_context *ctx,
struct pipe_query *query,
boolean wait,
enum pipe_query_value_type result_type,
int index,
struct pipe_resource *resource,
unsigned offset)
{
struct si_context *sctx = (struct si_context *)ctx;
struct r600_query *rquery = (struct r600_query *)query;
struct si_query *rquery = (struct si_query *)query;
rquery->ops->get_result_resource(sctx, rquery, wait, result_type, index,
resource, offset);
}
static void r600_query_hw_clear_result(struct r600_query_hw *query,
static void si_query_hw_clear_result(struct si_query_hw *query,
union pipe_query_result *result)
{
util_query_clear_result(result, query->b.type);
}
bool si_query_hw_get_result(struct si_context *sctx,
struct r600_query *rquery,
struct si_query *rquery,
bool wait, union pipe_query_result *result)
{
struct si_screen *sscreen = sctx->screen;
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
struct r600_query_buffer *qbuf;
struct si_query_hw *query = (struct si_query_hw *)rquery;
struct si_query_buffer *qbuf;
query->ops->clear_result(query, result);
@ -1423,7 +1423,7 @@ bool si_query_hw_get_result(struct si_context *sctx,
* BUFFER[1] = previous summary buffer
* BUFFER[2] = next summary buffer or user-supplied buffer
*/
static void r600_create_query_result_shader(struct si_context *sctx)
static void si_create_query_result_shader(struct si_context *sctx)
{
/* TEMP[0].xy = accumulated result so far
* TEMP[0].z = result not available
@ -1607,8 +1607,8 @@ static void r600_create_query_result_shader(struct si_context *sctx)
sctx->b.query_result_shader = sctx->b.b.create_compute_state(&sctx->b.b, &state);
}
static void r600_restore_qbo_state(struct si_context *sctx,
struct r600_qbo_state *st)
static void si_restore_qbo_state(struct si_context *sctx,
struct si_qbo_state *st)
{
sctx->b.b.bind_compute_state(&sctx->b.b, st->saved_compute);
@ -1620,24 +1620,24 @@ static void r600_restore_qbo_state(struct si_context *sctx,
pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
}
static void r600_query_hw_get_result_resource(struct si_context *sctx,
struct r600_query *rquery,
static void si_query_hw_get_result_resource(struct si_context *sctx,
struct si_query *rquery,
bool wait,
enum pipe_query_value_type result_type,
int index,
struct pipe_resource *resource,
unsigned offset)
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
struct r600_query_buffer *qbuf;
struct r600_query_buffer *qbuf_prev;
struct si_query_hw *query = (struct si_query_hw *)rquery;
struct si_query_buffer *qbuf;
struct si_query_buffer *qbuf_prev;
struct pipe_resource *tmp_buffer = NULL;
unsigned tmp_buffer_offset = 0;
struct r600_qbo_state saved_state = {};
struct si_qbo_state saved_state = {};
struct pipe_grid_info grid = {};
struct pipe_constant_buffer constant_buffer = {};
struct pipe_shader_buffer ssbo[3];
struct r600_hw_query_params params;
struct si_hw_query_params params;
struct {
uint32_t end_offset;
uint32_t result_stride;
@ -1649,7 +1649,7 @@ static void r600_query_hw_get_result_resource(struct si_context *sctx,
} consts;
if (!sctx->b.query_result_shader) {
r600_create_query_result_shader(sctx);
si_create_query_result_shader(sctx);
if (!sctx->b.query_result_shader)
return;
}
@ -1663,7 +1663,7 @@ static void r600_query_hw_get_result_resource(struct si_context *sctx,
si_save_qbo_state(sctx, &saved_state);
r600_get_hw_query_params(sctx, query, index >= 0 ? index : 0, &params);
si_get_hw_query_params(sctx, query, index >= 0 ? index : 0, &params);
consts.end_offset = params.end_offset - params.start_offset;
consts.fence_offset = params.fence_offset - params.start_offset;
consts.result_stride = query->result_size;
@ -1765,17 +1765,17 @@ static void r600_query_hw_get_result_resource(struct si_context *sctx,
sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
}
r600_restore_qbo_state(sctx, &saved_state);
si_restore_qbo_state(sctx, &saved_state);
pipe_resource_reference(&tmp_buffer, NULL);
}
static void r600_render_condition(struct pipe_context *ctx,
struct pipe_query *query,
boolean condition,
enum pipe_render_cond_flag mode)
static void si_render_condition(struct pipe_context *ctx,
struct pipe_query *query,
boolean condition,
enum pipe_render_cond_flag mode)
{
struct si_context *sctx = (struct si_context *)ctx;
struct r600_query_hw *rquery = (struct r600_query_hw *)query;
struct si_query_hw *rquery = (struct si_query_hw *)query;
struct r600_atom *atom = &sctx->b.render_cond_atom;
if (query) {
@ -1831,17 +1831,17 @@ static void r600_render_condition(struct pipe_context *ctx,
void si_suspend_queries(struct si_context *sctx)
{
struct r600_query_hw *query;
struct si_query_hw *query;
LIST_FOR_EACH_ENTRY(query, &sctx->b.active_queries, list) {
r600_query_hw_emit_stop(sctx, query);
si_query_hw_emit_stop(sctx, query);
}
assert(sctx->b.num_cs_dw_queries_suspend == 0);
}
void si_resume_queries(struct si_context *sctx)
{
struct r600_query_hw *query;
struct si_query_hw *query;
assert(sctx->b.num_cs_dw_queries_suspend == 0);
@ -1849,7 +1849,7 @@ void si_resume_queries(struct si_context *sctx)
si_need_gfx_cs_space(sctx);
LIST_FOR_EACH_ENTRY(query, &sctx->b.active_queries, list) {
r600_query_hw_emit_start(sctx, query);
si_query_hw_emit_start(sctx, query);
}
}
@ -1868,7 +1868,7 @@ void si_resume_queries(struct si_context *sctx)
#define XG(group_, name_, query_type_, type_, result_type_) \
XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
static struct pipe_driver_query_info r600_driver_query_list[] = {
static struct pipe_driver_query_info si_driver_query_list[] = {
X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
X("num-shader-cache-hits", NUM_SHADER_CACHE_HITS, UINT64, CUMULATIVE),
@ -1956,26 +1956,26 @@ static struct pipe_driver_query_info r600_driver_query_list[] = {
#undef XG
#undef XFULL
static unsigned r600_get_num_queries(struct si_screen *sscreen)
static unsigned si_get_num_queries(struct si_screen *sscreen)
{
if (sscreen->info.drm_major == 2 && sscreen->info.drm_minor >= 42)
return ARRAY_SIZE(r600_driver_query_list);
return ARRAY_SIZE(si_driver_query_list);
else if (sscreen->info.drm_major == 3) {
if (sscreen->info.chip_class >= VI)
return ARRAY_SIZE(r600_driver_query_list);
return ARRAY_SIZE(si_driver_query_list);
else
return ARRAY_SIZE(r600_driver_query_list) - 7;
return ARRAY_SIZE(si_driver_query_list) - 7;
}
else
return ARRAY_SIZE(r600_driver_query_list) - 25;
return ARRAY_SIZE(si_driver_query_list) - 25;
}
static int r600_get_driver_query_info(struct pipe_screen *screen,
unsigned index,
struct pipe_driver_query_info *info)
static int si_get_driver_query_info(struct pipe_screen *screen,
unsigned index,
struct pipe_driver_query_info *info)
{
struct si_screen *sscreen = (struct si_screen*)screen;
unsigned num_queries = r600_get_num_queries(sscreen);
unsigned num_queries = si_get_num_queries(sscreen);
if (!info) {
unsigned num_perfcounters =
@ -1987,7 +1987,7 @@ static int r600_get_driver_query_info(struct pipe_screen *screen,
if (index >= num_queries)
return si_get_perfcounter_info(sscreen, index - num_queries, info);
*info = r600_driver_query_list[index];
*info = si_driver_query_list[index];
switch (info->query_type) {
case R600_QUERY_REQUESTED_VRAM:
@ -2018,9 +2018,9 @@ static int r600_get_driver_query_info(struct pipe_screen *screen,
* performance counter groups, so be careful when changing this and related
* functions.
*/
static int r600_get_driver_query_group_info(struct pipe_screen *screen,
unsigned index,
struct pipe_driver_query_group_info *info)
static int si_get_driver_query_group_info(struct pipe_screen *screen,
unsigned index,
struct pipe_driver_query_group_info *info)
{
struct si_screen *sscreen = (struct si_screen *)screen;
unsigned num_pc_groups = 0;
@ -2046,23 +2046,23 @@ static int r600_get_driver_query_group_info(struct pipe_screen *screen,
void si_init_query_functions(struct si_context *sctx)
{
sctx->b.b.create_query = r600_create_query;
sctx->b.b.create_query = si_create_query;
sctx->b.b.create_batch_query = si_create_batch_query;
sctx->b.b.destroy_query = r600_destroy_query;
sctx->b.b.begin_query = r600_begin_query;
sctx->b.b.end_query = r600_end_query;
sctx->b.b.get_query_result = r600_get_query_result;
sctx->b.b.get_query_result_resource = r600_get_query_result_resource;
sctx->b.render_cond_atom.emit = r600_emit_query_predication;
sctx->b.b.destroy_query = si_destroy_query;
sctx->b.b.begin_query = si_begin_query;
sctx->b.b.end_query = si_end_query;
sctx->b.b.get_query_result = si_get_query_result;
sctx->b.b.get_query_result_resource = si_get_query_result_resource;
sctx->b.render_cond_atom.emit = si_emit_query_predication;
if (((struct si_screen*)sctx->b.b.screen)->info.num_render_backends > 0)
sctx->b.b.render_condition = r600_render_condition;
sctx->b.b.render_condition = si_render_condition;
LIST_INITHEAD(&sctx->b.active_queries);
}
void si_init_screen_query_functions(struct si_screen *sscreen)
{
sscreen->b.get_driver_query_info = r600_get_driver_query_info;
sscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
sscreen->b.get_driver_query_info = si_get_driver_query_info;
sscreen->b.get_driver_query_group_info = si_get_driver_query_group_info;
}

View file

@ -33,8 +33,8 @@ struct pipe_resource;
struct si_screen;
struct si_context;
struct r600_query;
struct r600_query_hw;
struct si_query;
struct si_query_hw;
struct r600_resource;
enum {
@ -118,24 +118,24 @@ enum {
R600_NUM_SW_QUERY_GROUPS
};
struct r600_query_ops {
void (*destroy)(struct si_screen *, struct r600_query *);
bool (*begin)(struct si_context *, struct r600_query *);
bool (*end)(struct si_context *, struct r600_query *);
struct si_query_ops {
void (*destroy)(struct si_screen *, struct si_query *);
bool (*begin)(struct si_context *, struct si_query *);
bool (*end)(struct si_context *, struct si_query *);
bool (*get_result)(struct si_context *,
struct r600_query *, bool wait,
struct si_query *, bool wait,
union pipe_query_result *result);
void (*get_result_resource)(struct si_context *,
struct r600_query *, bool wait,
struct si_query *, bool wait,
enum pipe_query_value_type result_type,
int index,
struct pipe_resource *resource,
unsigned offset);
};
struct r600_query {
struct si_query {
struct threaded_query b;
struct r600_query_ops *ops;
struct si_query_ops *ops;
/* The type of query */
unsigned type;
@ -148,23 +148,23 @@ enum {
R600_QUERY_HW_FLAG_BEGIN_RESUMES = (1 << 2),
};
struct r600_query_hw_ops {
struct si_query_hw_ops {
bool (*prepare_buffer)(struct si_screen *,
struct r600_query_hw *,
struct si_query_hw *,
struct r600_resource *);
void (*emit_start)(struct si_context *,
struct r600_query_hw *,
struct si_query_hw *,
struct r600_resource *buffer, uint64_t va);
void (*emit_stop)(struct si_context *,
struct r600_query_hw *,
struct si_query_hw *,
struct r600_resource *buffer, uint64_t va);
void (*clear_result)(struct r600_query_hw *, union pipe_query_result *);
void (*clear_result)(struct si_query_hw *, union pipe_query_result *);
void (*add_result)(struct si_screen *screen,
struct r600_query_hw *, void *buffer,
struct si_query_hw *, void *buffer,
union pipe_query_result *result);
};
struct r600_query_buffer {
struct si_query_buffer {
/* The buffer where query results are stored. */
struct r600_resource *buf;
/* Offset of the next free result after current query data */
@ -172,16 +172,16 @@ struct r600_query_buffer {
/* If a query buffer is full, a new buffer is created and the old one
* is put in here. When we calculate the result, we sum up the samples
* from all buffers. */
struct r600_query_buffer *previous;
struct si_query_buffer *previous;
};
struct r600_query_hw {
struct r600_query b;
struct r600_query_hw_ops *ops;
struct si_query_hw {
struct si_query b;
struct si_query_hw_ops *ops;
unsigned flags;
/* The query buffer and how many results are in it. */
struct r600_query_buffer buffer;
struct si_query_buffer buffer;
/* Size of the result in memory for both begin_query and end_query,
* this can be one or two numbers, or it could even be a size of a structure. */
unsigned result_size;
@ -198,15 +198,15 @@ struct r600_query_hw {
};
bool si_query_hw_init(struct si_screen *sscreen,
struct r600_query_hw *query);
struct si_query_hw *query);
void si_query_hw_destroy(struct si_screen *sscreen,
struct r600_query *rquery);
struct si_query *rquery);
bool si_query_hw_begin(struct si_context *sctx,
struct r600_query *rquery);
struct si_query *rquery);
bool si_query_hw_end(struct si_context *sctx,
struct r600_query *rquery);
struct si_query *rquery);
bool si_query_hw_get_result(struct si_context *sctx,
struct r600_query *rquery,
struct si_query *rquery,
bool wait,
union pipe_query_result *result);
@ -238,7 +238,7 @@ enum {
* (c) expose one performance counter group per instance, but summed over all
* shader engines.
*/
struct r600_perfcounter_block {
struct si_perfcounter_block {
const char *basename;
unsigned flags;
unsigned num_counters;
@ -255,10 +255,10 @@ struct r600_perfcounter_block {
void *data;
};
struct r600_perfcounters {
struct si_perfcounters {
unsigned num_groups;
unsigned num_blocks;
struct r600_perfcounter_block *blocks;
struct si_perfcounter_block *blocks;
unsigned num_stop_cs_dwords;
unsigned num_instance_cs_dwords;
@ -271,14 +271,14 @@ struct r600_perfcounters {
int se, int instance);
void (*emit_shaders)(struct si_context *, unsigned shaders);
void (*emit_select)(struct si_context *,
struct r600_perfcounter_block *,
struct si_perfcounter_block *,
unsigned count, unsigned *selectors);
void (*emit_start)(struct si_context *,
struct r600_resource *buffer, uint64_t va);
void (*emit_stop)(struct si_context *,
struct r600_resource *buffer, uint64_t va);
void (*emit_read)(struct si_context *,
struct r600_perfcounter_block *,
struct si_perfcounter_block *,
unsigned count, unsigned *selectors,
struct r600_resource *buffer, uint64_t va);
@ -299,17 +299,17 @@ int si_get_perfcounter_group_info(struct si_screen *,
unsigned index,
struct pipe_driver_query_group_info *info);
bool si_perfcounters_init(struct r600_perfcounters *, unsigned num_blocks);
bool si_perfcounters_init(struct si_perfcounters *, unsigned num_blocks);
void si_perfcounters_add_block(struct si_screen *,
struct r600_perfcounters *,
struct si_perfcounters *,
const char *name, unsigned flags,
unsigned counters, unsigned selectors,
unsigned instances, void *data);
void si_perfcounters_do_destroy(struct r600_perfcounters *);
void si_perfcounters_do_destroy(struct si_perfcounters *);
void si_query_hw_reset_buffers(struct si_context *sctx,
struct r600_query_hw *query);
struct si_query_hw *query);
struct r600_qbo_state {
struct si_qbo_state {
void *saved_compute;
struct pipe_constant_buffer saved_const0;
struct pipe_shader_buffer saved_ssbo[3];

View file

@ -39,8 +39,8 @@
#include "amd/common/sid.h"
static enum radeon_surf_mode
r600_choose_tiling(struct si_screen *sscreen,
const struct pipe_resource *templ);
si_choose_tiling(struct si_screen *sscreen,
const struct pipe_resource *templ);
bool si_prepare_for_dma_blit(struct si_context *sctx,
@ -104,13 +104,13 @@ bool si_prepare_for_dma_blit(struct si_context *sctx,
}
/* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */
static void r600_copy_region_with_blit(struct pipe_context *pipe,
struct pipe_resource *dst,
unsigned dst_level,
unsigned dstx, unsigned dsty, unsigned dstz,
struct pipe_resource *src,
unsigned src_level,
const struct pipe_box *src_box)
static void si_copy_region_with_blit(struct pipe_context *pipe,
struct pipe_resource *dst,
unsigned dst_level,
unsigned dstx, unsigned dsty, unsigned dstz,
struct pipe_resource *src,
unsigned src_level,
const struct pipe_box *src_box)
{
struct pipe_blit_info blit;
@ -138,7 +138,7 @@ static void r600_copy_region_with_blit(struct pipe_context *pipe,
}
/* Copy from a full GPU texture to a transfer's staging one. */
static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
static void si_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
@ -146,7 +146,7 @@ static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_t
struct pipe_resource *src = transfer->resource;
if (src->nr_samples > 1) {
r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
si_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
src, transfer->level, &transfer->box);
return;
}
@ -156,7 +156,7 @@ static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_t
}
/* Copy from a transfer's staging texture to a full GPU one. */
static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
static void si_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
@ -167,7 +167,7 @@ static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600
u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
if (dst->nr_samples > 1) {
r600_copy_region_with_blit(ctx, dst, transfer->level,
si_copy_region_with_blit(ctx, dst, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,
src, 0, &sbox);
return;
@ -178,11 +178,11 @@ static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600
src, 0, &sbox);
}
static unsigned r600_texture_get_offset(struct si_screen *sscreen,
struct r600_texture *rtex, unsigned level,
const struct pipe_box *box,
unsigned *stride,
unsigned *layer_stride)
static unsigned si_texture_get_offset(struct si_screen *sscreen,
struct r600_texture *rtex, unsigned level,
const struct pipe_box *box,
unsigned *stride,
unsigned *layer_stride)
{
if (sscreen->info.chip_class >= GFX9) {
*stride = rtex->surface.u.gfx9.surf_pitch * rtex->surface.bpe;
@ -217,16 +217,16 @@ static unsigned r600_texture_get_offset(struct si_screen *sscreen,
}
}
static int r600_init_surface(struct si_screen *sscreen,
struct radeon_surf *surface,
const struct pipe_resource *ptex,
enum radeon_surf_mode array_mode,
unsigned pitch_in_bytes_override,
unsigned offset,
bool is_imported,
bool is_scanout,
bool is_flushed_depth,
bool tc_compatible_htile)
static int si_init_surface(struct si_screen *sscreen,
struct radeon_surf *surface,
const struct pipe_resource *ptex,
enum radeon_surf_mode array_mode,
unsigned pitch_in_bytes_override,
unsigned offset,
bool is_imported,
bool is_scanout,
bool is_flushed_depth,
bool tc_compatible_htile)
{
const struct util_format_description *desc =
util_format_description(ptex->format);
@ -321,9 +321,9 @@ static int r600_init_surface(struct si_screen *sscreen,
return 0;
}
static void r600_texture_init_metadata(struct si_screen *sscreen,
struct r600_texture *rtex,
struct radeon_bo_metadata *metadata)
static void si_texture_init_metadata(struct si_screen *sscreen,
struct r600_texture *rtex,
struct radeon_bo_metadata *metadata)
{
struct radeon_surf *surface = &rtex->surface;
@ -347,11 +347,11 @@ static void r600_texture_init_metadata(struct si_screen *sscreen,
}
}
static void r600_surface_import_metadata(struct si_screen *sscreen,
struct radeon_surf *surf,
struct radeon_bo_metadata *metadata,
enum radeon_surf_mode *array_mode,
bool *is_scanout)
static void si_surface_import_metadata(struct si_screen *sscreen,
struct radeon_surf *surf,
struct radeon_bo_metadata *metadata,
enum radeon_surf_mode *array_mode,
bool *is_scanout)
{
if (sscreen->info.chip_class >= GFX9) {
if (metadata->u.gfx9.swizzle_mode > 0)
@ -425,7 +425,7 @@ void si_texture_discard_cmask(struct si_screen *sscreen,
p_atomic_inc(&sscreen->compressed_colortex_counter);
}
static bool r600_can_disable_dcc(struct r600_texture *rtex)
static bool si_can_disable_dcc(struct r600_texture *rtex)
{
/* We can't disable DCC if it can be written by another process. */
return rtex->dcc_offset &&
@ -433,10 +433,10 @@ static bool r600_can_disable_dcc(struct r600_texture *rtex)
!(rtex->resource.external_usage & PIPE_HANDLE_USAGE_WRITE));
}
static bool r600_texture_discard_dcc(struct si_screen *sscreen,
struct r600_texture *rtex)
static bool si_texture_discard_dcc(struct si_screen *sscreen,
struct r600_texture *rtex)
{
if (!r600_can_disable_dcc(rtex))
if (!si_can_disable_dcc(rtex))
return false;
assert(rtex->dcc_separate_buffer == NULL);
@ -475,7 +475,7 @@ bool si_texture_disable_dcc(struct si_context *sctx,
{
struct si_screen *sscreen = sctx->screen;
if (!r600_can_disable_dcc(rtex))
if (!si_can_disable_dcc(rtex))
return false;
if (&sctx->b.b == sscreen->aux_context)
@ -488,13 +488,13 @@ bool si_texture_disable_dcc(struct si_context *sctx,
if (&sctx->b.b == sscreen->aux_context)
mtx_unlock(&sscreen->aux_context_lock);
return r600_texture_discard_dcc(sscreen, rtex);
return si_texture_discard_dcc(sscreen, rtex);
}
static void r600_reallocate_texture_inplace(struct si_context *sctx,
struct r600_texture *rtex,
unsigned new_bind_flag,
bool invalidate_storage)
static void si_reallocate_texture_inplace(struct si_context *sctx,
struct r600_texture *rtex,
unsigned new_bind_flag,
bool invalidate_storage)
{
struct pipe_screen *screen = sctx->b.b.screen;
struct r600_texture *new_tex;
@ -511,7 +511,7 @@ static void r600_reallocate_texture_inplace(struct si_context *sctx,
return;
/* This fails with MSAA, depth, and compressed textures. */
if (r600_choose_tiling(sctx->screen, &templ) !=
if (si_choose_tiling(sctx->screen, &templ) !=
RADEON_SURF_MODE_LINEAR_ALIGNED)
return;
}
@ -536,7 +536,7 @@ static void r600_reallocate_texture_inplace(struct si_context *sctx,
if (new_bind_flag == PIPE_BIND_LINEAR) {
si_texture_discard_cmask(sctx->screen, rtex);
r600_texture_discard_dcc(sctx->screen, rtex);
si_texture_discard_dcc(sctx->screen, rtex);
}
/* Replace the structure fields of rtex. */
@ -674,11 +674,11 @@ static void si_apply_opaque_metadata(struct si_screen *sscreen,
rtex->dcc_offset = 0;
}
static boolean r600_texture_get_handle(struct pipe_screen* screen,
struct pipe_context *ctx,
struct pipe_resource *resource,
struct winsys_handle *whandle,
unsigned usage)
static boolean si_texture_get_handle(struct pipe_screen* screen,
struct pipe_context *ctx,
struct pipe_resource *resource,
struct winsys_handle *whandle,
unsigned usage)
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct si_context *sctx;
@ -706,7 +706,7 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen,
sscreen->info.has_local_buffers &&
whandle->type != DRM_API_HANDLE_TYPE_KMS)) {
assert(!res->b.is_shared);
r600_reallocate_texture_inplace(sctx, rtex,
si_reallocate_texture_inplace(sctx, rtex,
PIPE_BIND_SHARED, false);
flush = true;
assert(res->b.b.bind & PIPE_BIND_SHARED);
@ -743,7 +743,7 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen,
/* Set metadata. */
if (!res->b.is_shared || update_metadata) {
r600_texture_init_metadata(sscreen, rtex, &metadata);
si_texture_init_metadata(sscreen, rtex, &metadata);
si_query_opaque_metadata(sscreen, rtex, &metadata);
sscreen->ws->buffer_set_metadata(res->buf, &metadata);
@ -817,8 +817,8 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen,
slice_size, whandle);
}
static void r600_texture_destroy(struct pipe_screen *screen,
struct pipe_resource *ptex)
static void si_texture_destroy(struct pipe_screen *screen,
struct pipe_resource *ptex)
{
struct r600_texture *rtex = (struct r600_texture*)ptex;
struct r600_resource *resource = &rtex->resource;
@ -834,7 +834,7 @@ static void r600_texture_destroy(struct pipe_screen *screen,
FREE(rtex);
}
static const struct u_resource_vtbl r600_texture_vtbl;
static const struct u_resource_vtbl si_texture_vtbl;
/* The number of samples can be specified independently of the texture. */
void si_texture_get_fmask_info(struct si_screen *sscreen,
@ -892,8 +892,8 @@ void si_texture_get_fmask_info(struct si_screen *sscreen,
out->size = fmask.surf_size;
}
static void r600_texture_allocate_fmask(struct si_screen *sscreen,
struct r600_texture *rtex)
static void si_texture_allocate_fmask(struct si_screen *sscreen,
struct r600_texture *rtex)
{
si_texture_get_fmask_info(sscreen, rtex,
rtex->resource.b.b.nr_samples, &rtex->fmask);
@ -956,8 +956,8 @@ void si_texture_get_cmask_info(struct si_screen *sscreen,
align(slice_bytes, base_align);
}
static void r600_texture_allocate_cmask(struct si_screen *sscreen,
struct r600_texture *rtex)
static void si_texture_allocate_cmask(struct si_screen *sscreen,
struct r600_texture *rtex)
{
si_texture_get_cmask_info(sscreen, rtex, &rtex->cmask);
@ -967,8 +967,8 @@ static void r600_texture_allocate_cmask(struct si_screen *sscreen,
rtex->cb_color_info |= S_028C70_FAST_CLEAR(1);
}
static void r600_texture_get_htile_size(struct si_screen *sscreen,
struct r600_texture *rtex)
static void si_texture_get_htile_size(struct si_screen *sscreen,
struct r600_texture *rtex)
{
unsigned cl_width, cl_height, width, height;
unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align;
@ -1035,11 +1035,11 @@ static void r600_texture_get_htile_size(struct si_screen *sscreen,
align(slice_bytes, base_align);
}
static void r600_texture_allocate_htile(struct si_screen *sscreen,
struct r600_texture *rtex)
static void si_texture_allocate_htile(struct si_screen *sscreen,
struct r600_texture *rtex)
{
if (sscreen->info.chip_class <= VI && !rtex->tc_compatible_htile)
r600_texture_get_htile_size(sscreen, rtex);
si_texture_get_htile_size(sscreen, rtex);
if (!rtex->surface.htile_size)
return;
@ -1198,10 +1198,10 @@ void si_print_texture_info(struct si_screen *sscreen,
/* Common processing for r600_texture_create and r600_texture_from_handle */
static struct r600_texture *
r600_texture_create_object(struct pipe_screen *screen,
const struct pipe_resource *base,
struct pb_buffer *buf,
struct radeon_surf *surface)
si_texture_create_object(struct pipe_screen *screen,
const struct pipe_resource *base,
struct pb_buffer *buf,
struct radeon_surf *surface)
{
struct r600_texture *rtex;
struct r600_resource *resource;
@ -1214,7 +1214,7 @@ r600_texture_create_object(struct pipe_screen *screen,
resource = &rtex->resource;
resource->b.b = *base;
resource->b.b.next = NULL;
resource->b.vtbl = &r600_texture_vtbl;
resource->b.vtbl = &si_texture_vtbl;
pipe_reference_init(&resource->b.b.reference, 1);
resource->b.b.screen = screen;
@ -1267,14 +1267,14 @@ r600_texture_create_object(struct pipe_screen *screen,
rtex->db_compatible = true;
if (!(sscreen->debug_flags & DBG(NO_HYPERZ)))
r600_texture_allocate_htile(sscreen, rtex);
si_texture_allocate_htile(sscreen, rtex);
}
} else {
if (base->nr_samples > 1 &&
!buf &&
!(sscreen->debug_flags & DBG(NO_FMASK))) {
r600_texture_allocate_fmask(sscreen, rtex);
r600_texture_allocate_cmask(sscreen, rtex);
si_texture_allocate_fmask(sscreen, rtex);
si_texture_allocate_cmask(sscreen, rtex);
rtex->cmask_buffer = &rtex->resource;
if (!rtex->fmask.size || !rtex->cmask.size) {
@ -1369,7 +1369,7 @@ r600_texture_create_object(struct pipe_screen *screen,
}
static enum radeon_surf_mode
r600_choose_tiling(struct si_screen *sscreen,
si_choose_tiling(struct si_screen *sscreen,
const struct pipe_resource *templ)
{
const struct util_format_description *desc = util_format_description(templ->format);
@ -1453,8 +1453,8 @@ struct pipe_resource *si_texture_create(struct pipe_screen *screen,
int r;
r = r600_init_surface(sscreen, &surface, templ,
r600_choose_tiling(sscreen, templ), 0, 0,
r = si_init_surface(sscreen, &surface, templ,
si_choose_tiling(sscreen, templ), 0, 0,
false, false, is_flushed_depth,
tc_compatible_htile);
if (r) {
@ -1462,13 +1462,13 @@ struct pipe_resource *si_texture_create(struct pipe_screen *screen,
}
return (struct pipe_resource *)
r600_texture_create_object(screen, templ, NULL, &surface);
si_texture_create_object(screen, templ, NULL, &surface);
}
static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
const struct pipe_resource *templ,
struct winsys_handle *whandle,
unsigned usage)
static struct pipe_resource *si_texture_from_handle(struct pipe_screen *screen,
const struct pipe_resource *templ,
struct winsys_handle *whandle,
unsigned usage)
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct pb_buffer *buf = NULL;
@ -1490,16 +1490,16 @@ static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen
return NULL;
sscreen->ws->buffer_get_metadata(buf, &metadata);
r600_surface_import_metadata(sscreen, &surface, &metadata,
si_surface_import_metadata(sscreen, &surface, &metadata,
&array_mode, &is_scanout);
r = r600_init_surface(sscreen, &surface, templ, array_mode, stride,
r = si_init_surface(sscreen, &surface, templ, array_mode, stride,
offset, true, is_scanout, false, false);
if (r) {
return NULL;
}
rtex = r600_texture_create_object(screen, templ, buf, &surface);
rtex = si_texture_create_object(screen, templ, buf, &surface);
if (!rtex)
return NULL;
@ -1584,10 +1584,10 @@ bool si_init_flushed_depth_texture(struct pipe_context *ctx,
* which is supposed to hold a subregion of the texture "orig" at the given
* mipmap level.
*/
static void r600_init_temp_resource_from_box(struct pipe_resource *res,
struct pipe_resource *orig,
const struct pipe_box *box,
unsigned level, unsigned flags)
static void si_init_temp_resource_from_box(struct pipe_resource *res,
struct pipe_resource *orig,
const struct pipe_box *box,
unsigned level, unsigned flags)
{
memset(res, 0, sizeof(*res));
res->format = orig->format;
@ -1607,10 +1607,10 @@ static void r600_init_temp_resource_from_box(struct pipe_resource *res,
}
}
static bool r600_can_invalidate_texture(struct si_screen *sscreen,
struct r600_texture *rtex,
unsigned transfer_usage,
const struct pipe_box *box)
static bool si_can_invalidate_texture(struct si_screen *sscreen,
struct r600_texture *rtex,
unsigned transfer_usage,
const struct pipe_box *box)
{
return !rtex->resource.b.is_shared &&
!(transfer_usage & PIPE_TRANSFER_READ) &&
@ -1621,8 +1621,8 @@ static bool r600_can_invalidate_texture(struct si_screen *sscreen,
box->depth);
}
static void r600_texture_invalidate_storage(struct si_context *sctx,
struct r600_texture *rtex)
static void si_texture_invalidate_storage(struct si_context *sctx,
struct r600_texture *rtex)
{
struct si_screen *sscreen = sctx->screen;
@ -1642,12 +1642,12 @@ static void r600_texture_invalidate_storage(struct si_context *sctx,
sctx->b.num_alloc_tex_transfer_bytes += rtex->size;
}
static void *r600_texture_transfer_map(struct pipe_context *ctx,
struct pipe_resource *texture,
unsigned level,
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **ptransfer)
static void *si_texture_transfer_map(struct pipe_context *ctx,
struct pipe_resource *texture,
unsigned level,
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **ptransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_texture *rtex = (struct r600_texture*)texture;
@ -1671,10 +1671,10 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
box->width >= 4 && box->height >= 4 &&
p_atomic_inc_return(&rtex->num_level0_transfers) == 10) {
bool can_invalidate =
r600_can_invalidate_texture(sctx->screen, rtex,
si_can_invalidate_texture(sctx->screen, rtex,
usage, box);
r600_reallocate_texture_inplace(sctx, rtex,
si_reallocate_texture_inplace(sctx, rtex,
PIPE_BIND_LINEAR,
can_invalidate);
}
@ -1700,9 +1700,9 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
!sctx->b.ws->buffer_wait(rtex->resource.buf, 0,
RADEON_USAGE_READWRITE)) {
/* It's busy. */
if (r600_can_invalidate_texture(sctx->screen, rtex,
if (si_can_invalidate_texture(sctx->screen, rtex,
usage, box))
r600_texture_invalidate_storage(sctx, rtex);
si_texture_invalidate_storage(sctx, rtex);
else
use_staging_texture = true;
}
@ -1732,7 +1732,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
*/
struct pipe_resource resource;
r600_init_temp_resource_from_box(&resource, texture, box, level, 0);
si_init_temp_resource_from_box(&resource, texture, box, level, 0);
if (!si_init_flushed_depth_texture(ctx, &resource, &staging_depth)) {
R600_ERR("failed to create temporary texture to hold untiled copy\n");
@ -1748,14 +1748,14 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
return NULL;
}
r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
si_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box);
si_blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth,
0, 0, 0, box->depth, 0, 0);
pipe_resource_reference(&temp, NULL);
}
/* Just get the strides. */
r600_texture_get_offset(sctx->screen, staging_depth, level, NULL,
si_texture_get_offset(sctx->screen, staging_depth, level, NULL,
&trans->b.b.stride,
&trans->b.b.layer_stride);
} else {
@ -1772,7 +1772,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
box->z, box->z + box->depth - 1,
0, 0);
offset = r600_texture_get_offset(sctx->screen, staging_depth,
offset = si_texture_get_offset(sctx->screen, staging_depth,
level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
@ -1784,7 +1784,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
struct pipe_resource resource;
struct r600_texture *staging;
r600_init_temp_resource_from_box(&resource, texture, box, level,
si_init_temp_resource_from_box(&resource, texture, box, level,
R600_RESOURCE_FLAG_TRANSFER);
resource.usage = (usage & PIPE_TRANSFER_READ) ?
PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
@ -1799,19 +1799,19 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
trans->staging = &staging->resource;
/* Just get the strides. */
r600_texture_get_offset(sctx->screen, staging, 0, NULL,
si_texture_get_offset(sctx->screen, staging, 0, NULL,
&trans->b.b.stride,
&trans->b.b.layer_stride);
if (usage & PIPE_TRANSFER_READ)
r600_copy_to_staging_texture(ctx, trans);
si_copy_to_staging_texture(ctx, trans);
else
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
buf = trans->staging;
} else {
/* the resource is mapped directly */
offset = r600_texture_get_offset(sctx->screen, rtex, level, box,
offset = si_texture_get_offset(sctx->screen, rtex, level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
buf = &rtex->resource;
@ -1827,8 +1827,8 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
return map + offset;
}
static void r600_texture_transfer_unmap(struct pipe_context *ctx,
struct pipe_transfer* transfer)
static void si_texture_transfer_unmap(struct pipe_context *ctx,
struct pipe_transfer* transfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
@ -1842,7 +1842,7 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx,
&rtransfer->staging->b.b, transfer->level,
&transfer->box);
} else {
r600_copy_from_staging_texture(ctx, rtransfer);
si_copy_from_staging_texture(ctx, rtransfer);
}
}
@ -1873,13 +1873,13 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx,
FREE(transfer);
}
static const struct u_resource_vtbl r600_texture_vtbl =
static const struct u_resource_vtbl si_texture_vtbl =
{
NULL, /* get_handle */
r600_texture_destroy, /* resource_destroy */
r600_texture_transfer_map, /* transfer_map */
si_texture_destroy, /* resource_destroy */
si_texture_transfer_map, /* transfer_map */
u_default_transfer_flush_region, /* transfer_flush_region */
r600_texture_transfer_unmap, /* transfer_unmap */
si_texture_transfer_unmap, /* transfer_unmap */
};
/* DCC channel type categories within which formats can be reinterpreted
@ -2011,9 +2011,9 @@ struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe,
return &surface->base;
}
static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
struct pipe_resource *tex,
const struct pipe_surface *templ)
static struct pipe_surface *si_create_surface(struct pipe_context *pipe,
struct pipe_resource *tex,
const struct pipe_surface *templ)
{
unsigned level = templ->u.tex.level;
unsigned width = u_minify(tex->width0, level);
@ -2049,8 +2049,8 @@ static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
width, height);
}
static void r600_surface_destroy(struct pipe_context *pipe,
struct pipe_surface *surface)
static void si_surface_destroy(struct pipe_context *pipe,
struct pipe_surface *surface)
{
pipe_resource_reference(&surface->texture, NULL);
FREE(surface);
@ -2188,7 +2188,7 @@ static unsigned vi_get_context_dcc_stats_index(struct si_context *sctx,
static struct pipe_query *
vi_create_resuming_pipestats_query(struct si_context *sctx)
{
struct r600_query_hw *query = (struct r600_query_hw*)
struct si_query_hw *query = (struct si_query_hw*)
sctx->b.b.create_query(&sctx->b.b, PIPE_QUERY_PIPELINE_STATISTICS, 0);
query->flags |= R600_QUERY_HW_FLAG_BEGIN_RESUMES;
@ -2312,7 +2312,7 @@ void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
ctx->get_query_result(ctx, sctx->b.dcc_stats[i].ps_stats[2],
true, &result);
si_query_hw_reset_buffers(sctx,
(struct r600_query_hw*)
(struct si_query_hw*)
sctx->b.dcc_stats[i].ps_stats[2]);
/* Compute the approximate number of fullscreen draws. */
@ -2353,9 +2353,9 @@ void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
}
static struct pipe_memory_object *
r600_memobj_from_handle(struct pipe_screen *screen,
struct winsys_handle *whandle,
bool dedicated)
si_memobj_from_handle(struct pipe_screen *screen,
struct winsys_handle *whandle,
bool dedicated)
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct r600_memory_object *memobj = CALLOC_STRUCT(r600_memory_object);
@ -2382,8 +2382,8 @@ r600_memobj_from_handle(struct pipe_screen *screen,
}
static void
r600_memobj_destroy(struct pipe_screen *screen,
struct pipe_memory_object *_memobj)
si_memobj_destroy(struct pipe_screen *screen,
struct pipe_memory_object *_memobj)
{
struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
@ -2392,10 +2392,10 @@ r600_memobj_destroy(struct pipe_screen *screen,
}
static struct pipe_resource *
r600_texture_from_memobj(struct pipe_screen *screen,
const struct pipe_resource *templ,
struct pipe_memory_object *_memobj,
uint64_t offset)
si_texture_from_memobj(struct pipe_screen *screen,
const struct pipe_resource *templ,
struct pipe_memory_object *_memobj,
uint64_t offset)
{
int r;
struct si_screen *sscreen = (struct si_screen*)screen;
@ -2409,7 +2409,7 @@ r600_texture_from_memobj(struct pipe_screen *screen,
if (memobj->b.dedicated) {
sscreen->ws->buffer_get_metadata(memobj->buf, &metadata);
r600_surface_import_metadata(sscreen, &surface, &metadata,
si_surface_import_metadata(sscreen, &surface, &metadata,
&array_mode, &is_scanout);
} else {
/**
@ -2439,14 +2439,14 @@ r600_texture_from_memobj(struct pipe_screen *screen,
}
r = r600_init_surface(sscreen, &surface, templ,
r = si_init_surface(sscreen, &surface, templ,
array_mode, memobj->stride,
offset, true, is_scanout,
false, false);
if (r)
return NULL;
rtex = r600_texture_create_object(screen, templ, memobj->buf, &surface);
rtex = si_texture_create_object(screen, templ, memobj->buf, &surface);
if (!rtex)
return NULL;
@ -2485,16 +2485,16 @@ static bool si_check_resource_capability(struct pipe_screen *screen,
void si_init_screen_texture_functions(struct si_screen *sscreen)
{
sscreen->b.resource_from_handle = r600_texture_from_handle;
sscreen->b.resource_get_handle = r600_texture_get_handle;
sscreen->b.resource_from_memobj = r600_texture_from_memobj;
sscreen->b.memobj_create_from_handle = r600_memobj_from_handle;
sscreen->b.memobj_destroy = r600_memobj_destroy;
sscreen->b.resource_from_handle = si_texture_from_handle;
sscreen->b.resource_get_handle = si_texture_get_handle;
sscreen->b.resource_from_memobj = si_texture_from_memobj;
sscreen->b.memobj_create_from_handle = si_memobj_from_handle;
sscreen->b.memobj_destroy = si_memobj_destroy;
sscreen->b.check_resource_capability = si_check_resource_capability;
}
void si_init_context_texture_functions(struct si_context *sctx)
{
sctx->b.b.create_surface = r600_create_surface;
sctx->b.b.surface_destroy = r600_surface_destroy;
sctx->b.b.create_surface = si_create_surface;
sctx->b.b.surface_destroy = si_surface_destroy;
}

View file

@ -456,7 +456,7 @@ static void si_pc_emit_shaders(struct si_context *sctx,
}
static void si_pc_emit_select(struct si_context *sctx,
struct r600_perfcounter_block *group,
struct si_perfcounter_block *group,
unsigned count, unsigned *selectors)
{
struct si_pc_block *sigroup = (struct si_pc_block *)group->data;
@ -597,7 +597,7 @@ static void si_pc_emit_stop(struct si_context *sctx,
}
static void si_pc_emit_read(struct si_context *sctx,
struct r600_perfcounter_block *group,
struct si_perfcounter_block *group,
unsigned count, unsigned *selectors,
struct r600_resource *buffer, uint64_t va)
{
@ -650,7 +650,7 @@ static void si_pc_cleanup(struct si_screen *sscreen)
void si_init_perfcounters(struct si_screen *screen)
{
struct r600_perfcounters *pc;
struct si_perfcounters *pc;
struct si_pc_block *blocks;
unsigned num_blocks;
unsigned i;
@ -680,7 +680,7 @@ void si_init_perfcounters(struct si_screen *screen)
screen->info.max_sh_per_se);
}
pc = CALLOC_STRUCT(r600_perfcounters);
pc = CALLOC_STRUCT(si_perfcounters);
if (!pc)
return;

View file

@ -153,11 +153,11 @@ struct si_screen {
/* GPU load thread. */
mtx_t gpu_load_mutex;
thrd_t gpu_load_thread;
union r600_mmio_counters mmio_counters;
union si_mmio_counters mmio_counters;
volatile unsigned gpu_load_stop_thread; /* bool */
/* Performance counters. */
struct r600_perfcounters *perfcounters;
struct si_perfcounters *perfcounters;
/* If pipe_screen wants to recompute and re-emit the framebuffer,
* sampler, and image states of all contexts, it should atomically

View file

@ -1359,7 +1359,7 @@ void si_set_occlusion_query_state(struct si_context *sctx,
si_mark_atom_dirty(sctx, &sctx->msaa_config);
}
void si_save_qbo_state(struct si_context *sctx, struct r600_qbo_state *st)
void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st)
{
st->saved_compute = sctx->cs_shader_state.program;

View file

@ -405,7 +405,7 @@ si_create_sampler_view_custom(struct pipe_context *ctx,
unsigned force_level);
void si_update_fb_dirtiness_after_rendering(struct si_context *sctx);
void si_update_ps_iter_samples(struct si_context *sctx);
void si_save_qbo_state(struct si_context *sctx, struct r600_qbo_state *st);
void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
void si_set_occlusion_query_state(struct si_context *sctx,
bool old_perfect_enable);