r600: refactor step 2 - r600_resource cast is replaced by a function

The function name is updated to r600_as_resource().

This is done with the command below with some manual adjustments:
find . -type f -name "*.c" -exec grep -lE " = [(]struct[[:space:]]+r600_resource[[:space:]]*\*[)]" {} + |\
xargs sed -r -i "s/[(]struct[[:space:]]+r600_resource[[:space:]]*\*[)]([^;]*);/r600_as_resource(\1);/"

Signed-off-by: Patrick Lerda <patrick9876@free.fr>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35967>
This commit is contained in:
Patrick Lerda 2025-07-04 14:51:34 +02:00 committed by Marge Bot
parent 43784efeba
commit bb749d3e56
16 changed files with 108 additions and 112 deletions

View file

@ -68,7 +68,7 @@ struct r600_resource *r600_compute_buffer_alloc_vram(struct r600_screen *screen,
buffer = pipe_buffer_create((struct pipe_screen*) screen,
0, PIPE_USAGE_IMMUTABLE, size);
return (struct r600_resource *)buffer;
return r600_as_resource(buffer);
}
@ -328,7 +328,7 @@ static void compute_emit_cs(struct r600_context *rctx,
current->shader.has_txq_cube_array_z_comp;
if (info->indirect) {
struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect;
struct r600_resource *indirect_resource = r600_as_resource(info->indirect);
unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource, PIPE_MAP_READ);
unsigned offset = info->indirect_offset / 4;
indirect_grid[0] = data[offset];

View file

@ -19,8 +19,8 @@ void evergreen_dma_copy_buffer(struct r600_context *rctx,
{
struct radeon_cmdbuf *cs = &rctx->b.dma.cs;
unsigned i, ncopy, csize, sub_cmd, shift;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
struct r600_resource *rdst = r600_as_resource(dst);
struct r600_resource *rsrc = r600_as_resource(src);
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
@ -75,10 +75,10 @@ void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
util_range_add(dst, &r600_resource(dst)->valid_buffer_range, offset,
util_range_add(dst, &r600_as_resource(dst)->valid_buffer_range, offset,
offset + size);
offset += r600_resource(dst)->gpu_address;
offset += r600_as_resource(dst)->gpu_address;
/* Flush the cache where the resource is bound. */
rctx->b.flags |= r600_get_flush_flags(coher) |
@ -105,7 +105,7 @@ void evergreen_cp_dma_clear_buffer(struct r600_context *rctx,
/* This must be done after r600_need_cs_space. */
reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)dst, RADEON_USAGE_WRITE |
r600_as_resource(dst), RADEON_USAGE_WRITE |
RADEON_PRIO_CP_DMA);
radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));

View file

@ -1308,7 +1308,7 @@ void evergreen_init_color_surface_rat(struct r600_context *rctx,
struct pipe_resource *pipe_buffer = surf->base.texture;
struct r600_tex_color_info color;
evergreen_set_color_surface_buffer(rctx, (struct r600_resource *)surf->base.texture,
evergreen_set_color_surface_buffer(rctx, r600_as_resource(surf->base.texture),
surf->base.format, 0, pipe_buffer->width0,
&color);
@ -1325,7 +1325,7 @@ void evergreen_init_color_surface_rat(struct r600_context *rctx,
surf->cb_color_view = 0;
/* Set the buffer range the GPU will have access to: */
util_range_add(pipe_buffer, &r600_resource(pipe_buffer)->valid_buffer_range,
util_range_add(pipe_buffer, &r600_as_resource(pipe_buffer)->valid_buffer_range,
0, pipe_buffer->width0);
}
@ -1777,7 +1777,7 @@ evergreen_emit_arb_shader_image_load_store_incomplete(struct r600_context *rctx,
dummy_reloc = radeon_add_to_buffer_list(&rctx->b,
&rctx->b.gfx,
r600_resource(dummy),
r600_as_resource(dummy),
RADEON_USAGE_READ |
RADEON_PRIO_SHADER_RW_BUFFER);
@ -1836,7 +1836,7 @@ static void evergreen_emit_image_state(struct r600_context *rctx, struct r600_at
continue;
}
resource = (struct r600_resource *)image->base.resource;
resource = r600_as_resource(image->base.resource);
if (resource->b.b.target != PIPE_BUFFER)
rtex = r600_as_texture(image->base.resource);
else
@ -1974,7 +1974,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r
tex = r600_as_texture(cb->base.texture);
reloc = radeon_add_to_buffer_list(&rctx->b,
&rctx->b.gfx,
(struct r600_resource*)cb->base.texture,
r600_as_resource(cb->base.texture),
RADEON_USAGE_READWRITE |
(tex->resource.b.b.nr_samples > 1 ?
RADEON_PRIO_COLOR_BUFFER_MSAA :
@ -2032,7 +2032,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r
struct r600_surface *zb = (struct r600_surface*)rctx->framebuffer.fb_zsbuf;
unsigned reloc = radeon_add_to_buffer_list(&rctx->b,
&rctx->b.gfx,
(struct r600_resource*)state->zsbuf.texture,
r600_as_resource(state->zsbuf.texture),
RADEON_USAGE_READWRITE |
(zb->base.texture->nr_samples > 1 ?
RADEON_PRIO_DEPTH_BUFFER_MSAA :
@ -2247,7 +2247,7 @@ static void evergreen_emit_vertex_buffers(struct r600_context *rctx,
1 : shader->strides[buffer_index];
vb = &state->vb[buffer_index];
rbuffer = (struct r600_resource*)vb->buffer.resource;
rbuffer = r600_as_resource(vb->buffer.resource);
assert(rbuffer);
va = rbuffer->gpu_address + vb->buffer_offset;
@ -2308,7 +2308,7 @@ static void evergreen_emit_constant_buffers(struct r600_context *rctx,
unsigned gs_ring_buffer = (buffer_index == R600_GS_RING_CONST_BUFFER);
cb = &state->cb[buffer_index];
rbuffer = (struct r600_resource*)cb->buffer;
rbuffer = r600_as_resource(cb->buffer);
assert(rbuffer);
va = rbuffer->gpu_address + cb->buffer_offset;
@ -3043,7 +3043,7 @@ static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
if (state->enable) {
rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
rbuffer = r600_as_resource(state->esgs_ring.buffer);
radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE,
rbuffer->gpu_address >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
@ -3053,7 +3053,7 @@ static void evergreen_emit_gs_rings(struct r600_context *rctx, struct r600_atom
radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
state->esgs_ring.buffer_size >> 8);
rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
rbuffer = r600_as_resource(state->gsvs_ring.buffer);
radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE,
rbuffer->gpu_address >> 8);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
@ -4378,7 +4378,7 @@ static void evergreen_setup_immed_buffer(struct r600_context *rctx,
uint32_t immed_size = rscreen->b.info.max_se * 256 * 64 * util_format_get_blocksize(pformat);
struct eg_buf_res_params buf_params;
bool skip_reloc = false;
struct r600_resource *resource = (struct r600_resource *)rview->base.resource;
struct r600_resource *resource = r600_as_resource(rview->base.resource);
if (!resource->immed_buffer) {
eg_resource_alloc_immed(&rscreen->b, resource, immed_size);
}
@ -4466,7 +4466,7 @@ static void evergreen_set_shader_buffers(struct pipe_context *ctx,
buf = &buffers[idx];
pipe_resource_reference((struct pipe_resource **)&rview->base.resource, buf->buffer);
resource = (struct r600_resource *)rview->base.resource;
resource = r600_as_resource(rview->base.resource);
evergreen_setup_immed_buffer(rctx, rview, PIPE_FORMAT_R32_UINT);
@ -4587,7 +4587,7 @@ static void evergreen_set_shader_images(struct pipe_context *ctx,
iview = &images[idx];
image = iview->resource;
resource = (struct r600_resource *)image;
resource = r600_as_resource(image);
r600_context_add_resource_size(ctx, image);
@ -4771,7 +4771,7 @@ static void evergreen_get_shader_buffers(struct r600_context *rctx,
pipe_resource_reference(&sbuf[idx].buffer, rview->base.resource);
if (rview->base.resource) {
uint64_t rview_va = ((struct r600_resource *)rview->base.resource)->gpu_address;
uint64_t rview_va = r600_as_resource(rview->base.resource)->gpu_address;
uint64_t prog_va = rview->resource_words[0];
@ -5186,7 +5186,7 @@ void eg_trace_emit(struct r600_context *rctx)
/* This must be done after r600_need_cs_space. */
reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)rctx->trace_buf, RADEON_USAGE_WRITE |
rctx->trace_buf, RADEON_USAGE_WRITE |
RADEON_PRIO_CP_DMA);
rctx->trace_id++;
@ -5419,7 +5419,7 @@ void evergreen_emit_atomic_buffer_setup(struct r600_context *rctx,
for (int i = 0; i < global_atomic_count; i++) {
const struct r600_shader_atomic *atomic = &combined_atomics[i];
struct r600_resource *resource = r600_resource(astate->buffer[atomic->resource_id].buffer);
struct r600_resource *resource = r600_as_resource(astate->buffer[atomic->resource_id].buffer);
assert(resource);
if (rctx->b.gfx_level == CAYMAN)
@ -5449,7 +5449,7 @@ void evergreen_emit_atomic_buffer_save(struct r600_context *rctx,
for (int i = 0; i < global_atomic_count; i++) {
const struct r600_shader_atomic *atomic = &combined_atomics[i];
struct r600_resource *resource = r600_resource(astate->buffer[atomic->resource_id].buffer);
struct r600_resource *resource = r600_as_resource(astate->buffer[atomic->resource_id].buffer);
assert(resource);
if (rctx->b.gfx_level == CAYMAN)
@ -5463,10 +5463,10 @@ void evergreen_emit_atomic_buffer_save(struct r600_context *rctx,
++rctx->append_fence_id;
reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
r600_resource(rctx->append_fence),
r600_as_resource(rctx->append_fence),
RADEON_USAGE_READWRITE |
RADEON_PRIO_SHADER_RW_BUFFER);
dst_offset = r600_resource(rctx->append_fence)->gpu_address;
dst_offset = r600_as_resource(rctx->append_fence)->gpu_address;
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOS, 3, 0) | pkt_flags);
radeon_emit(cs, EVENT_TYPE(event) | EVENT_INDEX(6));
radeon_emit(cs, dst_offset & 0xffffffff);

View file

@ -862,7 +862,7 @@ static void r600_clear_buffer(struct pipe_context *ctx, struct pipe_resource *ds
1, &clear_value);
r600_blitter_end(ctx);
} else {
uint32_t *map = r600_buffer_map_sync_with_rings(&rctx->b, r600_resource(dst),
uint32_t *map = r600_buffer_map_sync_with_rings(&rctx->b, r600_as_resource(dst),
PIPE_MAP_WRITE);
map += offset / 4;
size /= 4;

View file

@ -185,7 +185,7 @@ bool r600_alloc_resource(struct r600_common_screen *rscreen,
void r600_buffer_destroy(struct pipe_screen *screen, struct pipe_resource *buf)
{
struct r600_screen *rscreen = (struct r600_screen*)screen;
struct r600_resource *rbuffer = r600_resource(buf);
struct r600_resource *rbuffer = r600_as_resource(buf);
threaded_resource_deinit(buf);
util_range_destroy(&rbuffer->valid_buffer_range);
@ -229,8 +229,8 @@ void r600_replace_buffer_storage(struct pipe_context *ctx,
struct pipe_resource *src)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct r600_resource *rdst = r600_resource(dst);
struct r600_resource *rsrc = r600_resource(src);
struct r600_resource *rdst = r600_as_resource(dst);
struct r600_resource *rsrc = r600_as_resource(src);
uint64_t old_gpu_address = rdst->gpu_address;
radeon_bo_reference(rctx->ws, &rdst->buf, rsrc->buf);
@ -251,7 +251,7 @@ void r600_invalidate_resource(struct pipe_context *ctx,
struct pipe_resource *resource)
{
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_resource *rbuffer = r600_resource(resource);
struct r600_resource *rbuffer = r600_as_resource(resource);
/* We currently only do anything here for buffers */
if (resource->target == PIPE_BUFFER)
@ -303,10 +303,10 @@ void *r600_buffer_transfer_map(struct pipe_context *ctx,
{
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
struct r600_resource *rbuffer = r600_resource(resource);
struct r600_resource *rbuffer = r600_as_resource(resource);
uint8_t *data;
if (r600_resource(resource)->compute_global_bo) {
if (r600_as_resource(resource)->compute_global_bo) {
if ((data = r600_compute_global_transfer_map(ctx, resource, level, usage, box, ptransfer)))
return data;
}
@ -402,9 +402,9 @@ void *r600_buffer_transfer_map(struct pipe_context *ctx,
struct r600_resource *staging;
assert(!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC));
staging = (struct r600_resource*) pipe_buffer_create(
ctx->screen, 0, PIPE_USAGE_STAGING,
box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT));
staging = r600_as_resource(pipe_buffer_create(
ctx->screen, 0, PIPE_USAGE_STAGING,
box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT)));
if (staging) {
/* Copy the VRAM buffer to the staging buffer. */
rctx->dma_copy(ctx, &staging->b.b, 0,
@ -441,7 +441,7 @@ static void r600_buffer_do_flush_region(struct pipe_context *ctx,
const struct pipe_box *box)
{
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct r600_resource *rbuffer = r600_resource(transfer->resource);
struct r600_resource *rbuffer = r600_as_resource(transfer->resource);
if (rtransfer->staging) {
struct pipe_resource *dst, *src;
@ -469,7 +469,7 @@ void r600_buffer_flush_region(struct pipe_context *ctx,
unsigned required_usage = PIPE_MAP_WRITE |
PIPE_MAP_FLUSH_EXPLICIT;
if (r600_resource(transfer->resource)->compute_global_bo)
if (r600_as_resource(transfer->resource)->compute_global_bo)
return;
if ((transfer->usage & required_usage) == required_usage) {
@ -485,7 +485,7 @@ void r600_buffer_transfer_unmap(struct pipe_context *ctx,
{
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct r600_resource *rtransferr = r600_resource(transfer->resource);
struct r600_resource *rtransferr = r600_as_resource(transfer->resource);
if (rtransferr->compute_global_bo && !rtransferr->b.is_user_ptr) {
r600_compute_global_transfer_unmap(ctx, transfer);
@ -601,7 +601,7 @@ r600_buffer_from_user_memory(struct pipe_screen *screen,
struct r600_resource *rbuffer;
if (templ->bind & PIPE_BIND_GLOBAL) {
rbuffer = r600_resource(r600_compute_global_buffer_create(screen, templ));
rbuffer = r600_as_resource(r600_compute_global_buffer_create(screen, templ));
((struct r600_resource_global *)rbuffer)->chunk->real_buffer = rbuffer;
} else {
rbuffer = r600_alloc_buffer_struct(screen, templ);

View file

@ -320,9 +320,8 @@ void r600_begin_new_cs(struct r600_context *ctx)
/* Create a buffer used for writing trace IDs and initialize it to 0. */
assert(!ctx->trace_buf);
ctx->trace_buf = (struct r600_resource*)
pipe_buffer_create(ctx->b.b.screen, 0,
PIPE_USAGE_STAGING, 4);
ctx->trace_buf = r600_as_resource(pipe_buffer_create(ctx->b.b.screen, 0,
PIPE_USAGE_STAGING, 4));
if (ctx->trace_buf)
pipe_buffer_write_nooverlap(&ctx->b.b, &ctx->trace_buf->b.b,
0, sizeof(zero), &zero);
@ -501,11 +500,11 @@ void r600_cp_dma_copy_buffer(struct r600_context *rctx,
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
* that range. */
util_range_add(dst, &r600_resource(dst)->valid_buffer_range, dst_offset,
util_range_add(dst, &r600_as_resource(dst)->valid_buffer_range, dst_offset,
dst_offset + size);
dst_offset += r600_resource(dst)->gpu_address;
src_offset += r600_resource(src)->gpu_address;
dst_offset += r600_as_resource(dst)->gpu_address;
src_offset += r600_as_resource(src)->gpu_address;
/* Flush the caches where the resources are bound. */
rctx->b.flags |= r600_get_flush_flags(R600_COHERENCY_SHADER) |
@ -533,9 +532,9 @@ void r600_cp_dma_copy_buffer(struct r600_context *rctx,
}
/* This must be done after r600_need_cs_space. */
src_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, (struct r600_resource*)src,
src_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, r600_as_resource(src),
RADEON_USAGE_READ | RADEON_PRIO_CP_DMA);
dst_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, (struct r600_resource*)dst,
dst_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, r600_as_resource(dst),
RADEON_USAGE_WRITE | RADEON_PRIO_CP_DMA);
radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
@ -577,8 +576,8 @@ void r600_dma_copy_buffer(struct r600_context *rctx,
{
struct radeon_cmdbuf *cs = &rctx->b.dma.cs;
unsigned i, ncopy, csize;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
struct r600_resource *rdst = r600_as_resource(dst);
struct r600_resource *rsrc = r600_as_resource(src);
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping

View file

@ -744,7 +744,7 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws,
templ.format = PIPE_FORMAT_R8G8B8A8_UNORM;
templ.usage = PIPE_USAGE_DEFAULT;
struct r600_resource *res = r600_resource(rscreen->screen.resource_create(&rscreen->screen, &templ));
struct r600_resource *res = r600_as_resource(rscreen->screen.resource_create(&rscreen->screen, &templ));
unsigned char *map = ws->buffer_map(res->buf, NULL, PIPE_MAP_WRITE);
memset(map, 0, 256);

View file

@ -543,7 +543,7 @@ static bool r600_resource_commit(struct pipe_context *pctx,
bool commit)
{
struct r600_common_context *ctx = (struct r600_common_context *)pctx;
struct r600_resource *res = r600_resource(resource);
struct r600_resource *res = r600_as_resource(resource);
/*
* Since buffer commitment changes cannot be pipelined, we need to
@ -939,7 +939,7 @@ static void r600_resource_destroy(struct pipe_screen *screen,
struct pipe_resource *res)
{
if (res->target == PIPE_BUFFER) {
if (r600_resource(res)->compute_global_bo)
if (r600_as_resource(res)->compute_global_bo)
r600_compute_global_buffer_destroy(screen, res);
else
r600_buffer_destroy(screen, res);

View file

@ -838,7 +838,7 @@ void cayman_emit_msaa_state(struct radeon_cmdbuf *cs, int nr_samples,
/* Inline helpers. */
static inline struct r600_resource *r600_resource(struct pipe_resource *r)
static inline struct r600_resource *r600_as_resource(struct pipe_resource *r)
{
return (struct r600_resource*)r;
}
@ -860,7 +860,7 @@ static inline void
r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct r600_resource *res = (struct r600_resource *)r;
struct r600_resource *res = r600_as_resource(r);
if (res) {
/* Add memory usage for need_gfx_cs_space */

View file

@ -489,9 +489,9 @@ static struct r600_resource *r600_new_query_buffer(struct r600_common_screen *rs
* being written by the gpu, hence staging is probably a good
* usage pattern.
*/
struct r600_resource *buf = (struct r600_resource*)
pipe_buffer_create(&rscreen->b, 0,
PIPE_USAGE_STAGING, buf_size);
struct r600_resource *buf =
r600_as_resource(pipe_buffer_create(&rscreen->b, 0,
PIPE_USAGE_STAGING, buf_size));
if (!buf)
return NULL;
@ -1874,9 +1874,8 @@ void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
/* otherwise backup path for older kernels */
/* create buffer for event data */
buffer = (struct r600_resource*)
pipe_buffer_create(ctx->b.screen, 0,
PIPE_USAGE_STAGING, max_rbs * 16);
buffer = r600_as_resource(pipe_buffer_create(ctx->b.screen, 0,
PIPE_USAGE_STAGING, max_rbs * 16));
if (!buffer)
return;

View file

@ -107,8 +107,7 @@ static int store_shader(struct pipe_context *ctx,
uint32_t *ptr, i;
if (shader->bo == NULL) {
shader->bo = (struct r600_resource*)
pipe_buffer_create(ctx->screen, 0, PIPE_USAGE_IMMUTABLE, shader->shader.bc.ndw * 4);
shader->bo = r600_as_resource(pipe_buffer_create(ctx->screen, 0, PIPE_USAGE_IMMUTABLE, shader->shader.bc.ndw * 4));
if (shader->bo == NULL) {
return -ENOMEM;
}

View file

@ -980,10 +980,10 @@ static void r600_init_color_surface(struct r600_context *rctx,
void *ptr;
r600_resource_reference(&rctx->dummy_cmask, NULL);
rctx->dummy_cmask = (struct r600_resource*)
r600_aligned_buffer_create(&rscreen->b.b, 0,
PIPE_USAGE_DEFAULT,
cmask.size, cmask.alignment);
rctx->dummy_cmask =
r600_as_resource(r600_aligned_buffer_create(&rscreen->b.b, 0,
PIPE_USAGE_DEFAULT,
cmask.size, cmask.alignment));
if (unlikely(!rctx->dummy_cmask)) {
surf->color_initialized = false;
@ -1002,10 +1002,10 @@ static void r600_init_color_surface(struct r600_context *rctx,
rctx->dummy_fmask->b.b.width0 < fmask.size ||
(1 << rctx->dummy_fmask->buf->alignment_log2) % fmask.alignment != 0) {
r600_resource_reference(&rctx->dummy_fmask, NULL);
rctx->dummy_fmask = (struct r600_resource*)
r600_aligned_buffer_create(&rscreen->b.b, 0,
PIPE_USAGE_DEFAULT,
fmask.size, fmask.alignment);
rctx->dummy_fmask =
r600_as_resource(r600_aligned_buffer_create(&rscreen->b.b, 0,
PIPE_USAGE_DEFAULT,
fmask.size, fmask.alignment));
if (unlikely(!rctx->dummy_fmask)) {
surf->color_initialized = false;
@ -1375,7 +1375,7 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
reloc = radeon_add_to_buffer_list(&rctx->b,
&rctx->b.gfx,
(struct r600_resource*)cb[i]->base.texture,
r600_as_resource(cb[i]->base.texture),
RADEON_USAGE_READWRITE |
(cb[i]->base.texture->nr_samples > 1 ?
RADEON_PRIO_COLOR_BUFFER_MSAA :
@ -1440,7 +1440,7 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a
struct r600_surface *surf = (struct r600_surface*)rctx->framebuffer.fb_zsbuf;
unsigned reloc = radeon_add_to_buffer_list(&rctx->b,
&rctx->b.gfx,
(struct r600_resource*)state->zsbuf.texture,
r600_as_resource(state->zsbuf.texture),
RADEON_USAGE_READWRITE |
(surf->base.texture->nr_samples > 1 ?
RADEON_PRIO_DEPTH_BUFFER_MSAA :
@ -1664,7 +1664,7 @@ static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom
unsigned stride = shader->strides[buffer_index];
vb = &rctx->vertex_buffer_state.vb[buffer_index];
rbuffer = (struct r600_resource*)vb->buffer.resource;
rbuffer = r600_as_resource(vb->buffer.resource);
assert(rbuffer);
offset = vb->buffer_offset;
@ -1704,7 +1704,7 @@ static void r600_emit_constant_buffers(struct r600_context *rctx,
unsigned buffer_index = ffs(dirty_mask) - 1;
unsigned gs_ring_buffer = (buffer_index == R600_GS_RING_CONST_BUFFER);
cb = &state->cb[buffer_index];
rbuffer = (struct r600_resource*)cb->buffer;
rbuffer = r600_as_resource(cb->buffer);
assert(rbuffer);
offset = cb->buffer_offset;
@ -1979,7 +1979,7 @@ static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
if (state->enable) {
rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
rbuffer = r600_as_resource(state->esgs_ring.buffer);
radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,
@ -1988,7 +1988,7 @@ static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
state->esgs_ring.buffer_size >> 8);
rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
rbuffer = r600_as_resource(state->gsvs_ring.buffer);
radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer,

View file

@ -1743,8 +1743,8 @@ void r600_setup_scratch_area_for_shader(struct r600_context *rctx,
pipe_resource_reference((struct pipe_resource**)&scratch->buffer, NULL);
}
scratch->buffer = (struct r600_resource *)pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
PIPE_USAGE_DEFAULT, size);
scratch->buffer = r600_as_resource(pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
PIPE_USAGE_DEFAULT, size));
if (scratch->buffer) {
scratch->size = size;
}
@ -2155,7 +2155,7 @@ r600_draw_parameters(struct r600_context *rctx,
if (!is_mapped) {
*indirect_ptr =
r600_buffer_map_sync_with_rings(&rctx->b,
(struct r600_resource *)indirect->buffer,
r600_as_resource(indirect->buffer),
PIPE_MAP_READ);
*cs_space += R600_DRAW_PARAMETERS_ENABLED_CS * indirect->draw_count;
}
@ -2310,11 +2310,11 @@ r600_indirect_parameters_init(struct r600_context *rctx,
}
reloc_internal = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
r600_resource(indirect_parameters->internal),
r600_as_resource(indirect_parameters->internal),
RADEON_USAGE_READWRITE |
RADEON_PRIO_SHADER_RW_BUFFER);
va_fence = r600_resource(indirect_parameters->internal)->gpu_address +
va_fence = r600_as_resource(indirect_parameters->internal)->gpu_address +
indirect_parameters->internal_offset +
offsetof(struct r600_indirect_gpu_internal, fence);
@ -2330,7 +2330,7 @@ r600_indirect_parameters_init(struct r600_context *rctx,
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, reloc_internal);
va_condition = r600_resource(indirect_parameters->internal)->gpu_address +
va_condition = r600_as_resource(indirect_parameters->internal)->gpu_address +
indirect_parameters->internal_offset +
offsetof(struct r600_indirect_gpu_internal, condition);
@ -2371,24 +2371,24 @@ r600_indirect_parameters_draw(struct r600_context *rctx,
assert(radeon_check_cs(rctx, cs) || true);
va_draw_count = r600_resource(indirect->indirect_draw_count)->gpu_address +
va_draw_count = r600_as_resource(indirect->indirect_draw_count)->gpu_address +
indirect->indirect_draw_count_offset;
va_condition = r600_resource(indirect_parameters->internal)->gpu_address +
va_condition = r600_as_resource(indirect_parameters->internal)->gpu_address +
indirect_parameters->internal_offset +
offsetof(struct r600_indirect_gpu_internal, condition);
va_fence = r600_resource(indirect_parameters->internal)->gpu_address +
va_fence = r600_as_resource(indirect_parameters->internal)->gpu_address +
indirect_parameters->internal_offset +
offsetof(struct r600_indirect_gpu_internal, fence);
reloc_draw_count = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
r600_resource(indirect->indirect_draw_count),
r600_as_resource(indirect->indirect_draw_count),
RADEON_USAGE_READWRITE |
RADEON_PRIO_SHADER_RW_BUFFER);
reloc_internal = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
r600_resource(indirect_parameters->internal),
r600_as_resource(indirect_parameters->internal),
RADEON_USAGE_READWRITE |
RADEON_PRIO_SHADER_RW_BUFFER);
@ -2798,7 +2798,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
radeon_emit(cs, info->instance_count);
} else {
uint64_t va = r600_resource(indirect->buffer)->gpu_address;
uint64_t va = r600_as_resource(indirect->buffer)->gpu_address;
assert(rctx->b.gfx_level >= EVERGREEN);
// Invalidate so non-indirect draw calls reset this state
@ -2812,7 +2812,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)indirect->buffer,
r600_as_resource(indirect->buffer),
RADEON_USAGE_READ |
RADEON_PRIO_DRAW_INDIRECT));
}
@ -2833,7 +2833,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
info->index.user + draws[0].start * index_size, size_bytes);
cs->current.cdw += size_dw;
} else {
uint64_t va = r600_resource(indexbuf)->gpu_address + index_offset;
uint64_t va = r600_as_resource(indexbuf)->gpu_address + index_offset;
if (likely(!indirect)) {
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit));
@ -2843,7 +2843,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)indexbuf,
r600_as_resource(indexbuf),
RADEON_USAGE_READ |
RADEON_PRIO_INDEX_BUFFER));
}
@ -2859,7 +2859,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)indexbuf,
r600_as_resource(indexbuf),
RADEON_USAGE_READ |
RADEON_PRIO_INDEX_BUFFER));
@ -3807,7 +3807,7 @@ uint32_t r600_colorformat_endian_swap(uint32_t colorformat, bool do_endian_swap)
static void r600_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
{
struct r600_context *rctx = (struct r600_context*)ctx;
struct r600_resource *rbuffer = r600_resource(buf);
struct r600_resource *rbuffer = r600_as_resource(buf);
unsigned i, shader, mask;
struct r600_pipe_sampler_view *view;

View file

@ -24,7 +24,7 @@ r600_create_so_target(struct pipe_context *ctx,
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct r600_so_target *t;
struct r600_resource *rbuffer = (struct r600_resource*)buffer;
struct r600_resource *rbuffer = r600_as_resource(buffer);
t = CALLOC_STRUCT(r600_so_target);
if (!t) {
@ -174,7 +174,7 @@ static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r
t[i]->stride_in_dw = stride_in_dw[i];
uint64_t va = r600_resource(t[i]->b.buffer)->gpu_address;
uint64_t va = r600_as_resource(t[i]->b.buffer)->gpu_address;
update_flags |= SURFACE_BASE_UPDATE_STRMOUT(i);
@ -184,7 +184,7 @@ static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r
radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */
radeon_emit(cs, va >> 8); /* BUFFER_BASE */
r600_emit_reloc(rctx, &rctx->gfx, r600_resource(t[i]->b.buffer),
r600_emit_reloc(rctx, &rctx->gfx, r600_as_resource(t[i]->b.buffer),
RADEON_USAGE_WRITE | RADEON_PRIO_SHADER_RW_BUFFER);
/* R7xx requires this packet after updating BUFFER_BASE.
@ -194,7 +194,7 @@ static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r
radeon_emit(cs, i);
radeon_emit(cs, va >> 8);
r600_emit_reloc(rctx, &rctx->gfx, r600_resource(t[i]->b.buffer),
r600_emit_reloc(rctx, &rctx->gfx, r600_as_resource(t[i]->b.buffer),
RADEON_USAGE_WRITE | RADEON_PRIO_SHADER_RW_BUFFER);
}

View file

@ -457,7 +457,7 @@ static bool r600_texture_get_handle(struct pipe_screen* screen,
{
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
struct r600_common_context *rctx;
struct r600_resource *res = (struct r600_resource*)resource;
struct r600_resource *res = r600_as_resource(resource);
struct r600_texture *rtex = r600_as_texture(resource);
struct radeon_bo_metadata metadata;
bool update_metadata = false;
@ -702,12 +702,12 @@ void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen,
r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask);
rtex->cmask_buffer = (struct r600_resource *)
r600_aligned_buffer_create(&rscreen->b,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
rtex->cmask.size,
rtex->cmask.alignment);
rtex->cmask_buffer =
r600_as_resource(r600_aligned_buffer_create(&rscreen->b,
R600_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
rtex->cmask.size,
rtex->cmask.alignment));
if (rtex->cmask_buffer == NULL) {
rtex->cmask.size = 0;
return;
@ -725,9 +725,9 @@ void eg_resource_alloc_immed(struct r600_common_screen *rscreen,
struct r600_resource *res,
unsigned immed_size)
{
res->immed_buffer = (struct r600_resource *)
pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
PIPE_USAGE_DEFAULT, immed_size);
res->immed_buffer =
r600_as_resource(pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM,
PIPE_USAGE_DEFAULT, immed_size));
}
static void r600_texture_get_htile_size(struct r600_common_screen *rscreen,
@ -1392,7 +1392,7 @@ void *r600_texture_transfer_map(struct pipe_context *ctx,
&trans->b.b.layer_stride);
}
trans->staging = (struct r600_resource*)staging_depth;
trans->staging = &staging_depth->resource;
buf = trans->staging;
} else if (use_staging_texture) {
struct pipe_resource resource;

View file

@ -45,9 +45,8 @@ bool rvid_create_buffer(struct pipe_screen *screen, struct rvid_buffer *buffer,
* able to move buffers around individually, so request a
* non-sub-allocated buffer.
*/
buffer->res = (struct r600_resource *)
pipe_buffer_create(screen, PIPE_BIND_SHARED,
usage, size);
buffer->res = r600_as_resource(pipe_buffer_create(screen, PIPE_BIND_SHARED,
usage, size));
return buffer->res != NULL;
}