mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-24 04:30:10 +01:00
r300,r600,radeonsi: switch to pb_buffer_lean
to remove pb_buffer::vtbl from all buffer structures Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26643>
This commit is contained in:
parent
d2c76c4d77
commit
6d913a2bcc
45 changed files with 417 additions and 412 deletions
|
|
@ -294,14 +294,14 @@ struct r300_query {
|
|||
bool begin_emitted;
|
||||
|
||||
/* The buffer where query results are stored. */
|
||||
struct pb_buffer *buf;
|
||||
struct pb_buffer_lean *buf;
|
||||
};
|
||||
|
||||
struct r300_surface {
|
||||
struct pipe_surface base;
|
||||
|
||||
/* Winsys buffer backing the texture. */
|
||||
struct pb_buffer *buf;
|
||||
struct pb_buffer_lean *buf;
|
||||
|
||||
enum radeon_bo_domain domain;
|
||||
|
||||
|
|
@ -392,7 +392,7 @@ struct r300_resource
|
|||
struct pipe_resource b;
|
||||
|
||||
/* Winsys buffer backing this resource. */
|
||||
struct pb_buffer *buf;
|
||||
struct pb_buffer_lean *buf;
|
||||
enum radeon_bo_domain domain;
|
||||
|
||||
/* Constant buffers and SWTCL vertex and index buffers are in user
|
||||
|
|
@ -456,7 +456,7 @@ struct r300_context {
|
|||
/* Draw module. Used mostly for SW TCL. */
|
||||
struct draw_context* draw;
|
||||
/* Vertex buffer for SW TCL. */
|
||||
struct pb_buffer *vbo;
|
||||
struct pb_buffer_lean *vbo;
|
||||
/* Offset and size into the SW TCL VBO. */
|
||||
size_t draw_vbo_offset;
|
||||
|
||||
|
|
|
|||
|
|
@ -770,8 +770,8 @@ void r300_emit_query_end(struct r300_context* r300)
|
|||
query->num_results += query->num_pipes;
|
||||
|
||||
/* XXX grab all the results and reset the counter. */
|
||||
if (query->num_results >= query->buf->base.size / 4 - 4) {
|
||||
query->num_results = (query->buf->base.size / 4) / 2;
|
||||
if (query->num_results >= query->buf->size / 4 - 4) {
|
||||
query->num_results = (query->buf->size / 4) / 2;
|
||||
fprintf(stderr, "r300: Rewinding OQBO...\n");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -950,7 +950,7 @@ static bool r300_render_allocate_vertices(struct vbuf_render* render,
|
|||
|
||||
DBG(r300, DBG_DRAW, "r300: render_allocate_vertices (size: %d)\n", size);
|
||||
|
||||
if (!r300->vbo || size + r300->draw_vbo_offset > r300->vbo->base.size) {
|
||||
if (!r300->vbo || size + r300->draw_vbo_offset > r300->vbo->size) {
|
||||
radeon_bo_reference(r300->rws, &r300->vbo, NULL);
|
||||
r300->vbo = NULL;
|
||||
r300render->vbo_ptr = NULL;
|
||||
|
|
@ -1056,7 +1056,7 @@ static void r300_render_draw_elements(struct vbuf_render* render,
|
|||
{
|
||||
struct r300_render* r300render = r300_render(render);
|
||||
struct r300_context* r300 = r300render->r300;
|
||||
unsigned max_index = (r300->vbo->base.size - r300->draw_vbo_offset) /
|
||||
unsigned max_index = (r300->vbo->size - r300->draw_vbo_offset) /
|
||||
(r300render->r300->vertex_info.size * 4) - 1;
|
||||
struct pipe_resource *index_buffer = NULL;
|
||||
unsigned index_buffer_offset;
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ r300_buffer_transfer_map( struct pipe_context *context,
|
|||
if (r300->rws->cs_is_buffer_referenced(&r300->cs, rbuf->buf, RADEON_USAGE_READWRITE) ||
|
||||
!r300->rws->buffer_wait(r300->rws, rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
|
||||
unsigned i;
|
||||
struct pb_buffer *new_buf;
|
||||
struct pb_buffer_lean *new_buf;
|
||||
|
||||
/* Create a new one in the same pipe_resource. */
|
||||
new_buf = r300->rws->buffer_create(r300->rws, rbuf->b.width0,
|
||||
|
|
|
|||
|
|
@ -1033,7 +1033,7 @@ r300_texture_create_object(struct r300_screen *rscreen,
|
|||
enum radeon_bo_layout microtile,
|
||||
enum radeon_bo_layout macrotile,
|
||||
unsigned stride_in_bytes_override,
|
||||
struct pb_buffer *buffer)
|
||||
struct pb_buffer_lean *buffer)
|
||||
{
|
||||
struct radeon_winsys *rws = rscreen->rws;
|
||||
struct r300_resource *tex = NULL;
|
||||
|
|
@ -1142,7 +1142,7 @@ struct pipe_resource *r300_texture_from_handle(struct pipe_screen *screen,
|
|||
{
|
||||
struct r300_screen *rscreen = r300_screen(screen);
|
||||
struct radeon_winsys *rws = rscreen->rws;
|
||||
struct pb_buffer *buffer;
|
||||
struct pb_buffer_lean *buffer;
|
||||
struct radeon_bo_metadata tiling = {};
|
||||
|
||||
/* Support only 2D textures without mipmaps */
|
||||
|
|
|
|||
|
|
@ -606,17 +606,17 @@ void r300_texture_desc_init(struct r300_screen *rscreen,
|
|||
r300_setup_miptree(rscreen, tex, true);
|
||||
/* If the required buffer size is larger than the given max size,
|
||||
* try again without the alignment for the CBZB clear. */
|
||||
if (tex->buf && tex->tex.size_in_bytes > tex->buf->base.size) {
|
||||
if (tex->buf && tex->tex.size_in_bytes > tex->buf->size) {
|
||||
r300_setup_miptree(rscreen, tex, false);
|
||||
|
||||
/* Make sure the buffer we got is large enough. */
|
||||
if (tex->tex.size_in_bytes > tex->buf->base.size) {
|
||||
if (tex->tex.size_in_bytes > tex->buf->size) {
|
||||
fprintf(stderr,
|
||||
"r300: I got a pre-allocated buffer to use it as a texture "
|
||||
"storage, but the buffer is too small. I'll use the buffer "
|
||||
"anyway, because I can't crash here, but it's dangerous. "
|
||||
"This can be a DDX bug. Got: %"PRIu64"B, Need: %uB, Info:\n",
|
||||
tex->buf->base.size, tex->tex.size_in_bytes);
|
||||
tex->buf->size, tex->tex.size_in_bytes);
|
||||
r300_tex_print_info(tex, "texture_desc_init");
|
||||
/* Oops, what now. Apps will break if we fail this,
|
||||
* so just pretend everything's okay. */
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@
|
|||
#include <stdio.h>
|
||||
|
||||
bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
|
||||
struct pb_buffer *buf,
|
||||
struct pb_buffer_lean *buf,
|
||||
unsigned usage)
|
||||
{
|
||||
if (ctx->ws->cs_is_buffer_referenced(&ctx->gfx.cs, buf, usage)) {
|
||||
|
|
@ -166,7 +166,7 @@ void r600_init_resource_fields(struct r600_common_screen *rscreen,
|
|||
bool r600_alloc_resource(struct r600_common_screen *rscreen,
|
||||
struct r600_resource *res)
|
||||
{
|
||||
struct pb_buffer *old_buf, *new_buf;
|
||||
struct pb_buffer_lean *old_buf, *new_buf;
|
||||
|
||||
/* Allocate a new resource. */
|
||||
new_buf = rscreen->ws->buffer_create(rscreen->ws, res->bo_size,
|
||||
|
|
@ -195,8 +195,8 @@ bool r600_alloc_resource(struct r600_common_screen *rscreen,
|
|||
/* Print debug information. */
|
||||
if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
|
||||
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
|
||||
res->gpu_address, res->gpu_address + res->buf->base.size,
|
||||
res->buf->base.size);
|
||||
res->gpu_address, res->gpu_address + res->buf->size,
|
||||
res->buf->size);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -125,7 +125,7 @@ struct r600_resource {
|
|||
struct threaded_resource b;
|
||||
|
||||
/* Winsys objects. */
|
||||
struct pb_buffer *buf;
|
||||
struct pb_buffer_lean *buf;
|
||||
uint64_t gpu_address;
|
||||
/* Memory usage if the buffer placement is optimal. */
|
||||
uint64_t vram_usage;
|
||||
|
|
@ -314,7 +314,7 @@ union r600_mmio_counters {
|
|||
|
||||
struct r600_memory_object {
|
||||
struct pipe_memory_object b;
|
||||
struct pb_buffer *buf;
|
||||
struct pb_buffer_lean *buf;
|
||||
uint32_t stride;
|
||||
uint32_t offset;
|
||||
};
|
||||
|
|
@ -616,7 +616,7 @@ struct r600_common_context {
|
|||
|
||||
/* r600_buffer_common.c */
|
||||
bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
|
||||
struct pb_buffer *buf,
|
||||
struct pb_buffer_lean *buf,
|
||||
unsigned usage);
|
||||
void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
|
||||
struct r600_resource *resource,
|
||||
|
|
|
|||
|
|
@ -990,7 +990,7 @@ static void r600_init_color_surface(struct r600_context *rctx,
|
|||
/* CMASK. */
|
||||
if (!rctx->dummy_cmask ||
|
||||
rctx->dummy_cmask->b.b.width0 < cmask.size ||
|
||||
(1 << rctx->dummy_cmask->buf->base.alignment_log2) % cmask.alignment != 0) {
|
||||
(1 << rctx->dummy_cmask->buf->alignment_log2) % cmask.alignment != 0) {
|
||||
struct pipe_transfer *transfer;
|
||||
void *ptr;
|
||||
|
||||
|
|
@ -1015,7 +1015,7 @@ static void r600_init_color_surface(struct r600_context *rctx,
|
|||
/* FMASK. */
|
||||
if (!rctx->dummy_fmask ||
|
||||
rctx->dummy_fmask->b.b.width0 < fmask.size ||
|
||||
(1 << rctx->dummy_fmask->buf->base.alignment_log2) % fmask.alignment != 0) {
|
||||
(1 << rctx->dummy_fmask->buf->alignment_log2) % fmask.alignment != 0) {
|
||||
r600_resource_reference(&rctx->dummy_fmask, NULL);
|
||||
rctx->dummy_fmask = (struct r600_resource*)
|
||||
r600_aligned_buffer_create(&rscreen->b.b, 0,
|
||||
|
|
|
|||
|
|
@ -897,7 +897,7 @@ void r600_print_texture_info(struct r600_common_screen *rscreen,
|
|||
static struct r600_texture *
|
||||
r600_texture_create_object(struct pipe_screen *screen,
|
||||
const struct pipe_resource *base,
|
||||
struct pb_buffer *buf,
|
||||
struct pb_buffer_lean *buf,
|
||||
struct radeon_surf *surface)
|
||||
{
|
||||
struct r600_texture *rtex;
|
||||
|
|
@ -973,13 +973,13 @@ r600_texture_create_object(struct pipe_screen *screen,
|
|||
} else {
|
||||
resource->buf = buf;
|
||||
resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->buf);
|
||||
resource->bo_size = buf->base.size;
|
||||
resource->bo_alignment = 1 << buf->base.alignment_log2;
|
||||
resource->bo_size = buf->size;
|
||||
resource->bo_alignment = 1 << buf->alignment_log2;
|
||||
resource->domains = rscreen->ws->buffer_get_initial_domain(resource->buf);
|
||||
if (resource->domains & RADEON_DOMAIN_VRAM)
|
||||
resource->vram_usage = buf->base.size;
|
||||
resource->vram_usage = buf->size;
|
||||
else if (resource->domains & RADEON_DOMAIN_GTT)
|
||||
resource->gart_usage = buf->base.size;
|
||||
resource->gart_usage = buf->size;
|
||||
}
|
||||
|
||||
if (rtex->cmask.size) {
|
||||
|
|
@ -1004,7 +1004,7 @@ r600_texture_create_object(struct pipe_screen *screen,
|
|||
if (rscreen->debug_flags & DBG_VM) {
|
||||
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
|
||||
rtex->resource.gpu_address,
|
||||
rtex->resource.gpu_address + rtex->resource.buf->base.size,
|
||||
rtex->resource.gpu_address + rtex->resource.buf->size,
|
||||
base->width0, base->height0, util_num_layers(base, 0), base->last_level+1,
|
||||
base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
|
||||
}
|
||||
|
|
@ -1107,7 +1107,7 @@ static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen
|
|||
unsigned usage)
|
||||
{
|
||||
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
|
||||
struct pb_buffer *buf = NULL;
|
||||
struct pb_buffer_lean *buf = NULL;
|
||||
enum radeon_surf_mode array_mode;
|
||||
struct radeon_surf surface = {};
|
||||
int r;
|
||||
|
|
@ -1486,7 +1486,7 @@ void r600_texture_transfer_unmap(struct pipe_context *ctx,
|
|||
}
|
||||
|
||||
if (rtransfer->staging) {
|
||||
rctx->num_alloc_tex_transfer_bytes += rtransfer->staging->buf->base.size;
|
||||
rctx->num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size;
|
||||
r600_resource_reference(&rtransfer->staging, NULL);
|
||||
}
|
||||
|
||||
|
|
@ -1778,7 +1778,7 @@ r600_memobj_from_handle(struct pipe_screen *screen,
|
|||
{
|
||||
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
|
||||
struct r600_memory_object *memobj = CALLOC_STRUCT(r600_memory_object);
|
||||
struct pb_buffer *buf = NULL;
|
||||
struct pb_buffer_lean *buf = NULL;
|
||||
|
||||
if (!memobj)
|
||||
return NULL;
|
||||
|
|
@ -1824,7 +1824,7 @@ r600_texture_from_memobj(struct pipe_screen *screen,
|
|||
struct radeon_bo_metadata metadata = {};
|
||||
enum radeon_surf_mode array_mode;
|
||||
bool is_scanout;
|
||||
struct pb_buffer *buf = NULL;
|
||||
struct pb_buffer_lean *buf = NULL;
|
||||
|
||||
if (memobj->b.dedicated) {
|
||||
rscreen->ws->buffer_get_metadata(rscreen->ws, memobj->buf, &metadata, NULL);
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ struct pipe_video_buffer *r600_video_buffer_create(struct pipe_context *pipe,
|
|||
struct r600_context *ctx = (struct r600_context *)pipe;
|
||||
struct r600_texture *resources[VL_NUM_COMPONENTS] = {};
|
||||
struct radeon_surf* surfaces[VL_NUM_COMPONENTS] = {};
|
||||
struct pb_buffer **pbs[VL_NUM_COMPONENTS] = {};
|
||||
struct pb_buffer_lean **pbs[VL_NUM_COMPONENTS] = {};
|
||||
enum pipe_format resource_formats[3];
|
||||
struct pipe_video_buffer template;
|
||||
struct pipe_resource templ;
|
||||
|
|
@ -156,7 +156,7 @@ static uint32_t eg_num_banks(uint32_t nbanks)
|
|||
}
|
||||
|
||||
/* set the decoding target buffer offsets */
|
||||
static struct pb_buffer* r600_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_buffer *buf)
|
||||
static struct pb_buffer_lean* r600_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_buffer *buf)
|
||||
{
|
||||
struct r600_screen *rscreen = (struct r600_screen*)buf->base.context->screen;
|
||||
struct r600_texture *luma = (struct r600_texture *)buf->resources[0];
|
||||
|
|
@ -172,7 +172,7 @@ static struct pb_buffer* r600_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_
|
|||
|
||||
/* get the radeon resources for VCE */
|
||||
static void r600_vce_get_buffer(struct pipe_resource *resource,
|
||||
struct pb_buffer **handle,
|
||||
struct pb_buffer_lean **handle,
|
||||
struct radeon_surf **surface)
|
||||
{
|
||||
struct r600_texture *res = (struct r600_texture *)resource;
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ static void set_reg(struct ruvd_decoder *dec, unsigned reg, uint32_t val)
|
|||
|
||||
/* send a command to the VCPU through the GPCOM registers */
|
||||
static void send_cmd(struct ruvd_decoder *dec, unsigned cmd,
|
||||
struct pb_buffer* buf, uint32_t off,
|
||||
struct pb_buffer_lean* buf, uint32_t off,
|
||||
unsigned usage, enum radeon_bo_domain domain)
|
||||
{
|
||||
int reloc_idx;
|
||||
|
|
@ -257,38 +257,38 @@ static unsigned calc_dpb_size(struct ruvd_decoder *dec)
|
|||
case PIPE_VIDEO_FORMAT_MPEG4_AVC: {
|
||||
if (!dec->use_legacy) {
|
||||
unsigned fs_in_mb = width_in_mb * height_in_mb;
|
||||
unsigned alignment = 64, num_dpb_buffer;
|
||||
unsigned alignment = 64, num_dpb_buffer_lean;
|
||||
|
||||
if (dec->stream_type == RUVD_CODEC_H264_PERF)
|
||||
alignment = 256;
|
||||
switch(dec->base.level) {
|
||||
case 30:
|
||||
num_dpb_buffer = 8100 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 8100 / fs_in_mb;
|
||||
break;
|
||||
case 31:
|
||||
num_dpb_buffer = 18000 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 18000 / fs_in_mb;
|
||||
break;
|
||||
case 32:
|
||||
num_dpb_buffer = 20480 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 20480 / fs_in_mb;
|
||||
break;
|
||||
case 41:
|
||||
num_dpb_buffer = 32768 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 32768 / fs_in_mb;
|
||||
break;
|
||||
case 42:
|
||||
num_dpb_buffer = 34816 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 34816 / fs_in_mb;
|
||||
break;
|
||||
case 50:
|
||||
num_dpb_buffer = 110400 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 110400 / fs_in_mb;
|
||||
break;
|
||||
case 51:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 184320 / fs_in_mb;
|
||||
break;
|
||||
default:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 184320 / fs_in_mb;
|
||||
break;
|
||||
}
|
||||
num_dpb_buffer++;
|
||||
max_references = MAX2(MIN2(NUM_H264_REFS, num_dpb_buffer), max_references);
|
||||
num_dpb_buffer_lean++;
|
||||
max_references = MAX2(MIN2(NUM_H264_REFS, num_dpb_buffer_lean), max_references);
|
||||
dpb_size = image_size * max_references;
|
||||
if ((dec->stream_type != RUVD_CODEC_H264_PERF)) {
|
||||
dpb_size += max_references * align(width_in_mb * height_in_mb * 192, alignment);
|
||||
|
|
@ -887,7 +887,7 @@ static void ruvd_decode_bitstream(struct pipe_video_codec *decoder,
|
|||
if (format == PIPE_VIDEO_FORMAT_JPEG)
|
||||
new_size += 2; /* save for EOI */
|
||||
|
||||
if (new_size > buf->res->buf->base.size) {
|
||||
if (new_size > buf->res->buf->size) {
|
||||
dec->ws->buffer_unmap(dec->ws, buf->res->buf);
|
||||
dec->bs_ptr = NULL;
|
||||
if (!rvid_resize_buffer(dec->screen, &dec->cs, buf, new_size)) {
|
||||
|
|
@ -925,7 +925,7 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder,
|
|||
struct pipe_picture_desc *picture)
|
||||
{
|
||||
struct ruvd_decoder *dec = (struct ruvd_decoder*)decoder;
|
||||
struct pb_buffer *dt;
|
||||
struct pb_buffer_lean *dt;
|
||||
struct rvid_buffer *msg_fb_it_buf, *bs_buf;
|
||||
unsigned bs_size;
|
||||
|
||||
|
|
@ -960,7 +960,7 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder,
|
|||
}
|
||||
|
||||
if (dec->dpb.res)
|
||||
dec->msg->body.decode.dpb_size = dec->dpb.res->buf->base.size;
|
||||
dec->msg->body.decode.dpb_size = dec->dpb.res->buf->size;
|
||||
dec->msg->body.decode.bsd_size = bs_size;
|
||||
dec->msg->body.decode.db_pitch = align(dec->base.width, get_db_pitch_alignment(dec));
|
||||
|
||||
|
|
|
|||
|
|
@ -428,7 +428,7 @@ struct ruvd_msg {
|
|||
};
|
||||
|
||||
/* driver dependent callback */
|
||||
typedef struct pb_buffer* (*ruvd_set_dtb)
|
||||
typedef struct pb_buffer_lean* (*ruvd_set_dtb)
|
||||
(struct ruvd_msg* msg, struct vl_video_buffer *vb);
|
||||
|
||||
/* create an UVD decode */
|
||||
|
|
|
|||
|
|
@ -513,7 +513,7 @@ bool rvce_is_fw_version_supported(struct r600_common_screen *rscreen)
|
|||
/**
|
||||
* Add the buffer as relocation to the current command submission
|
||||
*/
|
||||
void rvce_add_buffer(struct rvce_encoder *enc, struct pb_buffer *buf,
|
||||
void rvce_add_buffer(struct rvce_encoder *enc, struct pb_buffer_lean *buf,
|
||||
unsigned usage, enum radeon_bo_domain domain,
|
||||
signed offset)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ struct r600_common_screen;
|
|||
|
||||
/* driver dependent callback */
|
||||
typedef void (*rvce_get_buffer)(struct pipe_resource *resource,
|
||||
struct pb_buffer **handle,
|
||||
struct pb_buffer_lean **handle,
|
||||
struct radeon_surf **surface);
|
||||
|
||||
/* Coded picture buffer slot */
|
||||
|
|
@ -391,11 +391,11 @@ struct rvce_encoder {
|
|||
|
||||
rvce_get_buffer get_buffer;
|
||||
|
||||
struct pb_buffer* handle;
|
||||
struct pb_buffer_lean* handle;
|
||||
struct radeon_surf* luma;
|
||||
struct radeon_surf* chroma;
|
||||
|
||||
struct pb_buffer* bs_handle;
|
||||
struct pb_buffer_lean* bs_handle;
|
||||
unsigned bs_size;
|
||||
|
||||
struct rvce_cpb_slot *cpb_array;
|
||||
|
|
@ -430,7 +430,7 @@ struct pipe_video_codec *rvce_create_encoder(struct pipe_context *context,
|
|||
|
||||
bool rvce_is_fw_version_supported(struct r600_common_screen *rscreen);
|
||||
|
||||
void rvce_add_buffer(struct rvce_encoder *enc, struct pb_buffer *buf,
|
||||
void rvce_add_buffer(struct rvce_encoder *enc, struct pb_buffer_lean *buf,
|
||||
unsigned usage, enum radeon_bo_domain domain,
|
||||
signed offset);
|
||||
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ bool rvid_resize_buffer(struct pipe_screen *screen, struct radeon_cmdbuf *cs,
|
|||
{
|
||||
struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
|
||||
struct radeon_winsys* ws = rscreen->ws;
|
||||
unsigned bytes = MIN2(new_buf->res->buf->base.size, new_size);
|
||||
unsigned bytes = MIN2(new_buf->res->buf->size, new_size);
|
||||
struct rvid_buffer old_buf = *new_buf;
|
||||
void *src = NULL, *dst = NULL;
|
||||
|
||||
|
|
@ -132,7 +132,7 @@ void rvid_clear_buffer(struct pipe_context *context, struct rvid_buffer* buffer)
|
|||
struct r600_common_context *rctx = (struct r600_common_context*)context;
|
||||
|
||||
rctx->dma_clear_buffer(context, &buffer->res->b.b, 0,
|
||||
buffer->res->buf->base.size, 0);
|
||||
buffer->res->buf->size, 0);
|
||||
context->flush(context, NULL, 0);
|
||||
}
|
||||
|
||||
|
|
@ -141,13 +141,13 @@ void rvid_clear_buffer(struct pipe_context *context, struct rvid_buffer* buffer)
|
|||
* sumup their sizes and replace the backend buffers with a single bo
|
||||
*/
|
||||
void rvid_join_surfaces(struct r600_common_context *rctx,
|
||||
struct pb_buffer** buffers[VL_NUM_COMPONENTS],
|
||||
struct pb_buffer_lean** buffers[VL_NUM_COMPONENTS],
|
||||
struct radeon_surf *surfaces[VL_NUM_COMPONENTS])
|
||||
{
|
||||
struct radeon_winsys* ws;
|
||||
unsigned best_tiling, best_wh, off;
|
||||
unsigned size, alignment;
|
||||
struct pb_buffer *pb;
|
||||
struct pb_buffer_lean *pb;
|
||||
unsigned i, j;
|
||||
|
||||
ws = rctx->ws;
|
||||
|
|
@ -189,9 +189,9 @@ void rvid_join_surfaces(struct r600_common_context *rctx,
|
|||
if (!buffers[i] || !*buffers[i])
|
||||
continue;
|
||||
|
||||
size = align(size, 1 << (*buffers[i])->base.alignment_log2);
|
||||
size += (*buffers[i])->base.size;
|
||||
alignment = MAX2(alignment, 1 << (*buffers[i])->base.alignment_log2);
|
||||
size = align(size, 1 << (*buffers[i])->alignment_log2);
|
||||
size += (*buffers[i])->size;
|
||||
alignment = MAX2(alignment, 1 << (*buffers[i])->alignment_log2);
|
||||
}
|
||||
|
||||
if (!size)
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ void rvid_clear_buffer(struct pipe_context *context, struct rvid_buffer* buffer)
|
|||
/* join surfaces into the same buffer with identical tiling params
|
||||
sum up their sizes and replace the backend buffers with a single bo */
|
||||
void rvid_join_surfaces(struct r600_common_context *rctx,
|
||||
struct pb_buffer** buffers[VL_NUM_COMPONENTS],
|
||||
struct pb_buffer_lean** buffers[VL_NUM_COMPONENTS],
|
||||
struct radeon_surf *surfaces[VL_NUM_COMPONENTS]);
|
||||
|
||||
/* returns supported codecs and other parameters */
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ static void set_reg(struct ruvd_decoder *dec, unsigned reg, uint32_t val)
|
|||
}
|
||||
|
||||
/* send a command to the VCPU through the GPCOM registers */
|
||||
static void send_cmd(struct ruvd_decoder *dec, unsigned cmd, struct pb_buffer *buf, uint32_t off,
|
||||
static void send_cmd(struct ruvd_decoder *dec, unsigned cmd, struct pb_buffer_lean *buf, uint32_t off,
|
||||
unsigned usage, enum radeon_bo_domain domain)
|
||||
{
|
||||
int reloc_idx;
|
||||
|
|
@ -226,35 +226,35 @@ static unsigned calc_ctx_size_h264_perf(struct ruvd_decoder *dec)
|
|||
|
||||
if (!dec->use_legacy) {
|
||||
unsigned fs_in_mb = width_in_mb * height_in_mb;
|
||||
unsigned num_dpb_buffer;
|
||||
unsigned num_dpb_buffer_lean;
|
||||
switch (dec->base.level) {
|
||||
case 30:
|
||||
num_dpb_buffer = 8100 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 8100 / fs_in_mb;
|
||||
break;
|
||||
case 31:
|
||||
num_dpb_buffer = 18000 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 18000 / fs_in_mb;
|
||||
break;
|
||||
case 32:
|
||||
num_dpb_buffer = 20480 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 20480 / fs_in_mb;
|
||||
break;
|
||||
case 41:
|
||||
num_dpb_buffer = 32768 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 32768 / fs_in_mb;
|
||||
break;
|
||||
case 42:
|
||||
num_dpb_buffer = 34816 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 34816 / fs_in_mb;
|
||||
break;
|
||||
case 50:
|
||||
num_dpb_buffer = 110400 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 110400 / fs_in_mb;
|
||||
break;
|
||||
case 51:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 184320 / fs_in_mb;
|
||||
break;
|
||||
default:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 184320 / fs_in_mb;
|
||||
break;
|
||||
}
|
||||
num_dpb_buffer++;
|
||||
max_references = MAX2(MIN2(NUM_H264_REFS, num_dpb_buffer), max_references);
|
||||
num_dpb_buffer_lean++;
|
||||
max_references = MAX2(MIN2(NUM_H264_REFS, num_dpb_buffer_lean), max_references);
|
||||
ctx_size = max_references * align(width_in_mb * height_in_mb * 192, 256);
|
||||
} else {
|
||||
// the firmware seems to always assume a minimum of ref frames
|
||||
|
|
@ -351,38 +351,38 @@ static unsigned calc_dpb_size(struct ruvd_decoder *dec)
|
|||
case PIPE_VIDEO_FORMAT_MPEG4_AVC: {
|
||||
if (!dec->use_legacy) {
|
||||
unsigned fs_in_mb = width_in_mb * height_in_mb;
|
||||
unsigned alignment = 64, num_dpb_buffer;
|
||||
unsigned alignment = 64, num_dpb_buffer_lean;
|
||||
|
||||
if (dec->stream_type == RUVD_CODEC_H264_PERF)
|
||||
alignment = 256;
|
||||
switch (dec->base.level) {
|
||||
case 30:
|
||||
num_dpb_buffer = 8100 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 8100 / fs_in_mb;
|
||||
break;
|
||||
case 31:
|
||||
num_dpb_buffer = 18000 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 18000 / fs_in_mb;
|
||||
break;
|
||||
case 32:
|
||||
num_dpb_buffer = 20480 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 20480 / fs_in_mb;
|
||||
break;
|
||||
case 41:
|
||||
num_dpb_buffer = 32768 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 32768 / fs_in_mb;
|
||||
break;
|
||||
case 42:
|
||||
num_dpb_buffer = 34816 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 34816 / fs_in_mb;
|
||||
break;
|
||||
case 50:
|
||||
num_dpb_buffer = 110400 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 110400 / fs_in_mb;
|
||||
break;
|
||||
case 51:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 184320 / fs_in_mb;
|
||||
break;
|
||||
default:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 184320 / fs_in_mb;
|
||||
break;
|
||||
}
|
||||
num_dpb_buffer++;
|
||||
max_references = MAX2(MIN2(NUM_H264_REFS, num_dpb_buffer), max_references);
|
||||
num_dpb_buffer_lean++;
|
||||
max_references = MAX2(MIN2(NUM_H264_REFS, num_dpb_buffer_lean), max_references);
|
||||
dpb_size = image_size * max_references;
|
||||
if ((dec->stream_type != RUVD_CODEC_H264_PERF) ||
|
||||
(((struct si_screen *)dec->screen)->info.family < CHIP_POLARIS10)) {
|
||||
|
|
@ -1046,7 +1046,7 @@ static void ruvd_decode_bitstream(struct pipe_video_codec *decoder,
|
|||
struct rvid_buffer *buf = &dec->bs_buffers[dec->cur_buffer];
|
||||
unsigned new_size = dec->bs_size + sizes[i];
|
||||
|
||||
if (new_size > buf->res->buf->base.size) {
|
||||
if (new_size > buf->res->buf->size) {
|
||||
dec->ws->buffer_unmap(dec->ws, buf->res->buf);
|
||||
if (!si_vid_resize_buffer(dec->screen, &dec->cs, buf, new_size, NULL)) {
|
||||
RVID_ERR("Can't resize bitstream buffer!");
|
||||
|
|
@ -1074,7 +1074,7 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder, struct pipe_video_b
|
|||
struct pipe_picture_desc *picture)
|
||||
{
|
||||
struct ruvd_decoder *dec = (struct ruvd_decoder *)decoder;
|
||||
struct pb_buffer *dt;
|
||||
struct pb_buffer_lean *dt;
|
||||
struct rvid_buffer *msg_fb_it_buf, *bs_buf;
|
||||
unsigned bs_size;
|
||||
|
||||
|
|
@ -1110,13 +1110,13 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder, struct pipe_video_b
|
|||
}
|
||||
|
||||
if (dec->dpb.res)
|
||||
dec->msg->body.decode.dpb_size = dec->dpb.res->buf->base.size;
|
||||
dec->msg->body.decode.dpb_size = dec->dpb.res->buf->size;
|
||||
dec->msg->body.decode.bsd_size = bs_size;
|
||||
dec->msg->body.decode.db_pitch = align(dec->base.width, get_db_pitch_alignment(dec));
|
||||
|
||||
if (dec->stream_type == RUVD_CODEC_H264_PERF &&
|
||||
((struct si_screen *)dec->screen)->info.family >= CHIP_POLARIS10)
|
||||
dec->msg->body.decode.dpb_reserved = dec->ctx.res->buf->base.size;
|
||||
dec->msg->body.decode.dpb_reserved = dec->ctx.res->buf->size;
|
||||
|
||||
dt = dec->set_dtb(dec->msg, (struct vl_video_buffer *)target);
|
||||
if (((struct si_screen *)dec->screen)->info.family >= CHIP_STONEY)
|
||||
|
|
@ -1144,7 +1144,7 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder, struct pipe_video_b
|
|||
}
|
||||
|
||||
if (dec->ctx.res)
|
||||
dec->msg->body.decode.dpb_reserved = dec->ctx.res->buf->base.size;
|
||||
dec->msg->body.decode.dpb_reserved = dec->ctx.res->buf->size;
|
||||
break;
|
||||
|
||||
case PIPE_VIDEO_FORMAT_VC1:
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
#include "ac_uvd_dec.h"
|
||||
|
||||
/* driver dependent callback */
|
||||
typedef struct pb_buffer *(*ruvd_set_dtb)(struct ruvd_msg *msg, struct vl_video_buffer *vb);
|
||||
typedef struct pb_buffer_lean *(*ruvd_set_dtb)(struct ruvd_msg *msg, struct vl_video_buffer *vb);
|
||||
|
||||
/* create an UVD decode */
|
||||
struct pipe_video_codec *si_common_uvd_create_decoder(struct pipe_context *context,
|
||||
|
|
|
|||
|
|
@ -328,7 +328,7 @@ typedef struct ruvd_enc_vui_info_s
|
|||
uint32_t max_num_reorder_frames;
|
||||
} ruvd_enc_vui_info;
|
||||
|
||||
typedef void (*radeon_uvd_enc_get_buffer)(struct pipe_resource *resource, struct pb_buffer **handle,
|
||||
typedef void (*radeon_uvd_enc_get_buffer)(struct pipe_resource *resource, struct pb_buffer_lean **handle,
|
||||
struct radeon_surf **surface);
|
||||
|
||||
struct pipe_video_codec *radeon_uvd_create_encoder(struct pipe_context *context,
|
||||
|
|
@ -406,11 +406,11 @@ struct radeon_uvd_encoder {
|
|||
|
||||
radeon_uvd_enc_get_buffer get_buffer;
|
||||
|
||||
struct pb_buffer *handle;
|
||||
struct pb_buffer_lean *handle;
|
||||
struct radeon_surf *luma;
|
||||
struct radeon_surf *chroma;
|
||||
|
||||
struct pb_buffer *bs_handle;
|
||||
struct pb_buffer_lean *bs_handle;
|
||||
unsigned bs_size;
|
||||
|
||||
unsigned cpb_num;
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@
|
|||
|
||||
static const unsigned index_to_shifts[4] = {24, 16, 8, 0};
|
||||
|
||||
static void radeon_uvd_enc_add_buffer(struct radeon_uvd_encoder *enc, struct pb_buffer *buf,
|
||||
static void radeon_uvd_enc_add_buffer(struct radeon_uvd_encoder *enc, struct pb_buffer_lean *buf,
|
||||
unsigned usage, enum radeon_bo_domain domain,
|
||||
signed offset)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -528,7 +528,7 @@ bool si_vce_is_fw_version_supported(struct si_screen *sscreen)
|
|||
/**
|
||||
* Add the buffer as relocation to the current command submission
|
||||
*/
|
||||
void si_vce_add_buffer(struct rvce_encoder *enc, struct pb_buffer *buf, unsigned usage,
|
||||
void si_vce_add_buffer(struct rvce_encoder *enc, struct pb_buffer_lean *buf, unsigned usage,
|
||||
enum radeon_bo_domain domain, signed offset)
|
||||
{
|
||||
int reloc_idx;
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@
|
|||
struct si_screen;
|
||||
|
||||
/* driver dependent callback */
|
||||
typedef void (*rvce_get_buffer)(struct pipe_resource *resource, struct pb_buffer **handle,
|
||||
typedef void (*rvce_get_buffer)(struct pipe_resource *resource, struct pb_buffer_lean **handle,
|
||||
struct radeon_surf **surface);
|
||||
|
||||
/* Coded picture buffer slot */
|
||||
|
|
@ -374,11 +374,11 @@ struct rvce_encoder {
|
|||
|
||||
rvce_get_buffer get_buffer;
|
||||
|
||||
struct pb_buffer *handle;
|
||||
struct pb_buffer_lean *handle;
|
||||
struct radeon_surf *luma;
|
||||
struct radeon_surf *chroma;
|
||||
|
||||
struct pb_buffer *bs_handle;
|
||||
struct pb_buffer_lean *bs_handle;
|
||||
unsigned bs_size;
|
||||
|
||||
struct rvce_cpb_slot *cpb_array;
|
||||
|
|
@ -413,7 +413,7 @@ struct pipe_video_codec *si_vce_create_encoder(struct pipe_context *context,
|
|||
|
||||
bool si_vce_is_fw_version_supported(struct si_screen *sscreen);
|
||||
|
||||
void si_vce_add_buffer(struct rvce_encoder *enc, struct pb_buffer *buf, unsigned usage,
|
||||
void si_vce_add_buffer(struct rvce_encoder *enc, struct pb_buffer_lean *buf, unsigned usage,
|
||||
enum radeon_bo_domain domain, signed offset);
|
||||
|
||||
/* init vce fw 40.2.2 specific callbacks */
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ static void encode(struct rvce_encoder *enc)
|
|||
|
||||
if (enc->dual_pipe) {
|
||||
unsigned aux_offset =
|
||||
enc->cpb.res->buf->base.size - RVCE_MAX_AUX_BUFFER_NUM * RVCE_MAX_BITSTREAM_OUTPUT_ROW_SIZE * 2;
|
||||
enc->cpb.res->buf->size - RVCE_MAX_AUX_BUFFER_NUM * RVCE_MAX_BITSTREAM_OUTPUT_ROW_SIZE * 2;
|
||||
RVCE_BEGIN(0x05000002); // auxiliary buffer
|
||||
for (i = 0; i < 8; ++i) {
|
||||
RVCE_CS(aux_offset);
|
||||
|
|
|
|||
|
|
@ -226,7 +226,7 @@ static void encode(struct rvce_encoder *enc)
|
|||
|
||||
if (enc->dual_pipe) {
|
||||
unsigned aux_offset =
|
||||
enc->cpb.res->buf->base.size - RVCE_MAX_AUX_BUFFER_NUM * RVCE_MAX_BITSTREAM_OUTPUT_ROW_SIZE * 2;
|
||||
enc->cpb.res->buf->size - RVCE_MAX_AUX_BUFFER_NUM * RVCE_MAX_BITSTREAM_OUTPUT_ROW_SIZE * 2;
|
||||
RVCE_BEGIN(0x05000002); // auxiliary buffer
|
||||
for (i = 0; i < 8; ++i) {
|
||||
RVCE_CS(aux_offset);
|
||||
|
|
|
|||
|
|
@ -1912,9 +1912,9 @@ static unsigned rvcn_dec_dynamic_dpb_t2_message(struct radeon_decoder *dec, rvcn
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct pb_buffer *rvcn_dec_message_decode(struct radeon_decoder *dec,
|
||||
struct pipe_video_buffer *target,
|
||||
struct pipe_picture_desc *picture)
|
||||
static struct pb_buffer_lean *rvcn_dec_message_decode(struct radeon_decoder *dec,
|
||||
struct pipe_video_buffer *target,
|
||||
struct pipe_picture_desc *picture)
|
||||
{
|
||||
DECRYPT_PARAMETERS *decrypt = (DECRYPT_PARAMETERS *)picture->decrypt_key;
|
||||
bool encrypted = picture->protected_playback;
|
||||
|
|
@ -2164,14 +2164,14 @@ static struct pb_buffer *rvcn_dec_message_decode(struct radeon_decoder *dec,
|
|||
luma = (struct si_texture *)((struct vl_video_buffer *)out_surf)->resources[0];
|
||||
chroma = (struct si_texture *)((struct vl_video_buffer *)out_surf)->resources[1];
|
||||
|
||||
decode->dpb_size = (dec->dpb_type != DPB_DYNAMIC_TIER_2) ? dec->dpb.res->buf->base.size : 0;
|
||||
decode->dpb_size = (dec->dpb_type != DPB_DYNAMIC_TIER_2) ? dec->dpb.res->buf->size : 0;
|
||||
|
||||
/* When texture being created, the bo will be created with total size of planes,
|
||||
* and all planes point to the same buffer */
|
||||
assert(si_resource(((struct vl_video_buffer *)out_surf)->resources[0])->buf->base.size ==
|
||||
si_resource(((struct vl_video_buffer *)out_surf)->resources[1])->buf->base.size);
|
||||
assert(si_resource(((struct vl_video_buffer *)out_surf)->resources[0])->buf->size ==
|
||||
si_resource(((struct vl_video_buffer *)out_surf)->resources[1])->buf->size);
|
||||
|
||||
decode->dt_size = si_resource(((struct vl_video_buffer *)out_surf)->resources[0])->buf->base.size;
|
||||
decode->dt_size = si_resource(((struct vl_video_buffer *)out_surf)->resources[0])->buf->size;
|
||||
|
||||
decode->sct_size = 0;
|
||||
decode->sc_coeff_size = 0;
|
||||
|
|
@ -2364,7 +2364,7 @@ static struct pb_buffer *rvcn_dec_message_decode(struct radeon_decoder *dec,
|
|||
}
|
||||
|
||||
if (dec->ctx.res)
|
||||
decode->hw_ctxt_size = dec->ctx.res->buf->base.size;
|
||||
decode->hw_ctxt_size = dec->ctx.res->buf->size;
|
||||
|
||||
if (dec->dpb_type == DPB_DYNAMIC_TIER_2)
|
||||
if (rvcn_dec_dynamic_dpb_t2_message(dec, decode, dynamic_dpb_t2, encrypted))
|
||||
|
|
@ -2418,7 +2418,7 @@ static void set_reg(struct radeon_decoder *dec, unsigned reg, uint32_t val)
|
|||
}
|
||||
|
||||
/* send a command to the VCPU through the GPCOM registers */
|
||||
static void send_cmd(struct radeon_decoder *dec, unsigned cmd, struct pb_buffer *buf, uint32_t off,
|
||||
static void send_cmd(struct radeon_decoder *dec, unsigned cmd, struct pb_buffer_lean *buf, uint32_t off,
|
||||
unsigned usage, enum radeon_bo_domain domain)
|
||||
{
|
||||
uint64_t addr;
|
||||
|
|
@ -2586,35 +2586,35 @@ static unsigned calc_ctx_size_h264_perf(struct radeon_decoder *dec)
|
|||
height_in_mb = align(height / VL_MACROBLOCK_HEIGHT, 2);
|
||||
|
||||
unsigned fs_in_mb = width_in_mb * height_in_mb;
|
||||
unsigned num_dpb_buffer;
|
||||
unsigned num_dpb_buffer_lean;
|
||||
switch (dec->base.level) {
|
||||
case 30:
|
||||
num_dpb_buffer = 8100 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 8100 / fs_in_mb;
|
||||
break;
|
||||
case 31:
|
||||
num_dpb_buffer = 18000 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 18000 / fs_in_mb;
|
||||
break;
|
||||
case 32:
|
||||
num_dpb_buffer = 20480 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 20480 / fs_in_mb;
|
||||
break;
|
||||
case 41:
|
||||
num_dpb_buffer = 32768 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 32768 / fs_in_mb;
|
||||
break;
|
||||
case 42:
|
||||
num_dpb_buffer = 34816 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 34816 / fs_in_mb;
|
||||
break;
|
||||
case 50:
|
||||
num_dpb_buffer = 110400 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 110400 / fs_in_mb;
|
||||
break;
|
||||
case 51:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 184320 / fs_in_mb;
|
||||
break;
|
||||
default:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 184320 / fs_in_mb;
|
||||
break;
|
||||
}
|
||||
num_dpb_buffer++;
|
||||
max_references = MAX2(MIN2(NUM_H264_REFS, num_dpb_buffer), max_references);
|
||||
num_dpb_buffer_lean++;
|
||||
max_references = MAX2(MIN2(NUM_H264_REFS, num_dpb_buffer_lean), max_references);
|
||||
ctx_size = max_references * align(width_in_mb * height_in_mb * 192, 256);
|
||||
|
||||
return ctx_size;
|
||||
|
|
@ -2644,36 +2644,36 @@ static unsigned calc_dpb_size(struct radeon_decoder *dec)
|
|||
switch (u_reduce_video_profile(dec->base.profile)) {
|
||||
case PIPE_VIDEO_FORMAT_MPEG4_AVC: {
|
||||
unsigned fs_in_mb = width_in_mb * height_in_mb;
|
||||
unsigned num_dpb_buffer;
|
||||
unsigned num_dpb_buffer_lean;
|
||||
|
||||
switch (dec->base.level) {
|
||||
case 30:
|
||||
num_dpb_buffer = 8100 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 8100 / fs_in_mb;
|
||||
break;
|
||||
case 31:
|
||||
num_dpb_buffer = 18000 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 18000 / fs_in_mb;
|
||||
break;
|
||||
case 32:
|
||||
num_dpb_buffer = 20480 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 20480 / fs_in_mb;
|
||||
break;
|
||||
case 41:
|
||||
num_dpb_buffer = 32768 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 32768 / fs_in_mb;
|
||||
break;
|
||||
case 42:
|
||||
num_dpb_buffer = 34816 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 34816 / fs_in_mb;
|
||||
break;
|
||||
case 50:
|
||||
num_dpb_buffer = 110400 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 110400 / fs_in_mb;
|
||||
break;
|
||||
case 51:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 184320 / fs_in_mb;
|
||||
break;
|
||||
default:
|
||||
num_dpb_buffer = 184320 / fs_in_mb;
|
||||
num_dpb_buffer_lean = 184320 / fs_in_mb;
|
||||
break;
|
||||
}
|
||||
num_dpb_buffer++;
|
||||
max_references = MAX2(MIN2(NUM_H264_REFS, num_dpb_buffer), max_references);
|
||||
num_dpb_buffer_lean++;
|
||||
max_references = MAX2(MIN2(NUM_H264_REFS, num_dpb_buffer_lean), max_references);
|
||||
dpb_size = image_size * max_references;
|
||||
break;
|
||||
}
|
||||
|
|
@ -2882,7 +2882,7 @@ static void radeon_dec_decode_bitstream(struct pipe_video_codec *decoder,
|
|||
|
||||
struct rvid_buffer *buf = &dec->bs_buffers[dec->cur_buffer];
|
||||
|
||||
if (total_bs_size > buf->res->buf->base.size) {
|
||||
if (total_bs_size > buf->res->buf->size) {
|
||||
dec->ws->buffer_unmap(dec->ws, buf->res->buf);
|
||||
dec->bs_ptr = NULL;
|
||||
if (!si_vid_resize_buffer(dec->screen, &dec->cs, buf, total_bs_size, NULL)) {
|
||||
|
|
@ -2911,7 +2911,7 @@ static void radeon_dec_decode_bitstream(struct pipe_video_codec *decoder,
|
|||
void send_cmd_dec(struct radeon_decoder *dec, struct pipe_video_buffer *target,
|
||||
struct pipe_picture_desc *picture)
|
||||
{
|
||||
struct pb_buffer *dt;
|
||||
struct pb_buffer_lean *dt;
|
||||
struct rvid_buffer *msg_fb_it_probs_buf, *bs_buf;
|
||||
|
||||
msg_fb_it_probs_buf = &dec->msg_fb_it_probs_buffers[dec->cur_buffer];
|
||||
|
|
|
|||
|
|
@ -16,9 +16,9 @@
|
|||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
|
||||
static struct pb_buffer *radeon_jpeg_get_decode_param(struct radeon_decoder *dec,
|
||||
struct pipe_video_buffer *target,
|
||||
struct pipe_picture_desc *picture)
|
||||
static struct pb_buffer_lean *radeon_jpeg_get_decode_param(struct radeon_decoder *dec,
|
||||
struct pipe_video_buffer *target,
|
||||
struct pipe_picture_desc *picture)
|
||||
{
|
||||
struct si_texture *luma = (struct si_texture *)((struct vl_video_buffer *)target)->resources[0];
|
||||
struct si_texture *chroma, *chromav;
|
||||
|
|
@ -62,7 +62,7 @@ static void set_reg_jpeg(struct radeon_decoder *dec, unsigned reg, unsigned cond
|
|||
}
|
||||
|
||||
/* send a bitstream buffer command */
|
||||
static void send_cmd_bitstream(struct radeon_decoder *dec, struct pb_buffer *buf, uint32_t off,
|
||||
static void send_cmd_bitstream(struct radeon_decoder *dec, struct pb_buffer_lean *buf, uint32_t off,
|
||||
unsigned usage, enum radeon_bo_domain domain)
|
||||
{
|
||||
uint64_t addr;
|
||||
|
|
@ -105,7 +105,7 @@ static void send_cmd_bitstream(struct radeon_decoder *dec, struct pb_buffer *buf
|
|||
}
|
||||
|
||||
/* send a target buffer command */
|
||||
static void send_cmd_target(struct radeon_decoder *dec, struct pb_buffer *buf, uint32_t off,
|
||||
static void send_cmd_target(struct radeon_decoder *dec, struct pb_buffer_lean *buf, uint32_t off,
|
||||
unsigned usage, enum radeon_bo_domain domain)
|
||||
{
|
||||
uint64_t addr;
|
||||
|
|
@ -184,7 +184,7 @@ static void send_cmd_target(struct radeon_decoder *dec, struct pb_buffer *buf, u
|
|||
}
|
||||
|
||||
/* send a bitstream buffer command */
|
||||
static void send_cmd_bitstream_direct(struct radeon_decoder *dec, struct pb_buffer *buf,
|
||||
static void send_cmd_bitstream_direct(struct radeon_decoder *dec, struct pb_buffer_lean *buf,
|
||||
uint32_t off, unsigned usage,
|
||||
enum radeon_bo_domain domain)
|
||||
{
|
||||
|
|
@ -224,7 +224,7 @@ static void send_cmd_bitstream_direct(struct radeon_decoder *dec, struct pb_buff
|
|||
}
|
||||
|
||||
/* send a target buffer command */
|
||||
static void send_cmd_target_direct(struct radeon_decoder *dec, struct pb_buffer *buf, uint32_t off,
|
||||
static void send_cmd_target_direct(struct radeon_decoder *dec, struct pb_buffer_lean *buf, uint32_t off,
|
||||
unsigned usage, enum radeon_bo_domain domain,
|
||||
enum pipe_format buffer_format)
|
||||
{
|
||||
|
|
@ -360,7 +360,7 @@ static void send_cmd_target_direct(struct radeon_decoder *dec, struct pb_buffer
|
|||
void send_cmd_jpeg(struct radeon_decoder *dec, struct pipe_video_buffer *target,
|
||||
struct pipe_picture_desc *picture)
|
||||
{
|
||||
struct pb_buffer *dt;
|
||||
struct pb_buffer_lean *dt;
|
||||
struct rvid_buffer *bs_buf;
|
||||
|
||||
bs_buf = &dec->bs_buffers[dec->cur_buffer];
|
||||
|
|
|
|||
|
|
@ -1160,7 +1160,7 @@ static void radeon_enc_encode_bitstream(struct pipe_video_codec *encoder,
|
|||
|
||||
if (vid_buf->base.statistics_data) {
|
||||
enc->get_buffer(vid_buf->base.statistics_data, &enc->stats, NULL);
|
||||
if (enc->stats->base.size < sizeof(rvcn_encode_stats_type_0_t)) {
|
||||
if (enc->stats->size < sizeof(rvcn_encode_stats_type_0_t)) {
|
||||
RVID_ERR("Encoder statistics output buffer is too small.\n");
|
||||
enc->stats = NULL;
|
||||
}
|
||||
|
|
@ -1291,7 +1291,7 @@ error:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void radeon_enc_add_buffer(struct radeon_encoder *enc, struct pb_buffer *buf,
|
||||
void radeon_enc_add_buffer(struct radeon_encoder *enc, struct pb_buffer_lean *buf,
|
||||
unsigned usage, enum radeon_bo_domain domain, signed offset)
|
||||
{
|
||||
enc->ws->cs_add_buffer(&enc->cs, buf, usage | RADEON_USAGE_SYNCHRONIZED, domain);
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@
|
|||
} \
|
||||
} while(0)
|
||||
|
||||
typedef void (*radeon_enc_get_buffer)(struct pipe_resource *resource, struct pb_buffer **handle,
|
||||
typedef void (*radeon_enc_get_buffer)(struct pipe_resource *resource, struct pb_buffer_lean **handle,
|
||||
struct radeon_surf **surface);
|
||||
|
||||
struct pipe_video_codec *radeon_create_encoder(struct pipe_context *context,
|
||||
|
|
@ -233,11 +233,11 @@ struct radeon_encoder {
|
|||
|
||||
radeon_enc_get_buffer get_buffer;
|
||||
|
||||
struct pb_buffer *handle;
|
||||
struct pb_buffer_lean *handle;
|
||||
struct radeon_surf *luma;
|
||||
struct radeon_surf *chroma;
|
||||
|
||||
struct pb_buffer *bs_handle;
|
||||
struct pb_buffer_lean *bs_handle;
|
||||
unsigned bs_size;
|
||||
|
||||
struct rvid_buffer *si;
|
||||
|
|
@ -246,7 +246,7 @@ struct radeon_encoder {
|
|||
struct rvid_buffer *cdf;
|
||||
struct rvid_buffer *roi;
|
||||
struct radeon_enc_pic enc_pic;
|
||||
struct pb_buffer *stats;
|
||||
struct pb_buffer_lean *stats;
|
||||
rvcn_enc_cmd_t cmd;
|
||||
|
||||
unsigned alignment;
|
||||
|
|
@ -270,7 +270,7 @@ struct radeon_encoder {
|
|||
struct pipe_context *ectx;
|
||||
};
|
||||
|
||||
void radeon_enc_add_buffer(struct radeon_encoder *enc, struct pb_buffer *buf,
|
||||
void radeon_enc_add_buffer(struct radeon_encoder *enc, struct pb_buffer_lean *buf,
|
||||
unsigned usage, enum radeon_bo_domain domain, signed offset);
|
||||
|
||||
void radeon_enc_dummy(struct radeon_encoder *enc);
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ bool si_vid_resize_buffer(struct pipe_screen *screen, struct radeon_cmdbuf *cs,
|
|||
{
|
||||
struct si_screen *sscreen = (struct si_screen *)screen;
|
||||
struct radeon_winsys *ws = sscreen->ws;
|
||||
unsigned bytes = MIN2(new_buf->res->buf->base.size, new_size);
|
||||
unsigned bytes = MIN2(new_buf->res->buf->size, new_size);
|
||||
struct rvid_buffer old_buf = *new_buf;
|
||||
void *src = NULL, *dst = NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@
|
|||
#include <inttypes.h>
|
||||
#include <stdio.h>
|
||||
|
||||
bool si_cs_is_buffer_referenced(struct si_context *sctx, struct pb_buffer *buf,
|
||||
bool si_cs_is_buffer_referenced(struct si_context *sctx, struct pb_buffer_lean *buf,
|
||||
unsigned usage)
|
||||
{
|
||||
return sctx->ws->cs_is_buffer_referenced(&sctx->gfx_cs, buf, usage);
|
||||
|
|
@ -144,7 +144,7 @@ void si_init_resource_fields(struct si_screen *sscreen, struct si_resource *res,
|
|||
|
||||
bool si_alloc_resource(struct si_screen *sscreen, struct si_resource *res)
|
||||
{
|
||||
struct pb_buffer *old_buf, *new_buf;
|
||||
struct pb_buffer_lean *old_buf, *new_buf;
|
||||
|
||||
/* Allocate a new resource. */
|
||||
new_buf = sscreen->ws->buffer_create(sscreen->ws, res->bo_size, 1 << res->bo_alignment_log2,
|
||||
|
|
@ -179,7 +179,7 @@ bool si_alloc_resource(struct si_screen *sscreen, struct si_resource *res)
|
|||
/* Print debug information. */
|
||||
if (sscreen->debug_flags & DBG(VM) && res->b.b.target == PIPE_BUFFER) {
|
||||
fprintf(stderr, "VM start=0x%" PRIX64 " end=0x%" PRIX64 " | Buffer %" PRIu64 " bytes | Flags: ",
|
||||
res->gpu_address, res->gpu_address + res->buf->base.size, res->buf->base.size);
|
||||
res->gpu_address, res->gpu_address + res->buf->size, res->buf->size);
|
||||
si_res_print_flags(res->flags);
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
|
|
@ -640,10 +640,10 @@ static struct pipe_resource *si_buffer_from_user_memory(struct pipe_screen *scre
|
|||
|
||||
struct pipe_resource *si_buffer_from_winsys_buffer(struct pipe_screen *screen,
|
||||
const struct pipe_resource *templ,
|
||||
struct pb_buffer *imported_buf,
|
||||
struct pb_buffer_lean *imported_buf,
|
||||
uint64_t offset)
|
||||
{
|
||||
if (offset + templ->width0 > imported_buf->base.size)
|
||||
if (offset + templ->width0 > imported_buf->size)
|
||||
return NULL;
|
||||
|
||||
struct si_screen *sscreen = (struct si_screen *)screen;
|
||||
|
|
@ -679,8 +679,8 @@ struct pipe_resource *si_buffer_from_winsys_buffer(struct pipe_screen *screen,
|
|||
res->b.b.usage = PIPE_USAGE_STAGING;
|
||||
}
|
||||
|
||||
si_init_resource_fields(sscreen, res, imported_buf->base.size,
|
||||
1 << imported_buf->base.alignment_log2);
|
||||
si_init_resource_fields(sscreen, res, imported_buf->size,
|
||||
1 << imported_buf->alignment_log2);
|
||||
|
||||
res->b.is_shared = true;
|
||||
res->b.buffer_id_unique = util_idalloc_mt_alloc(&sscreen->buffer_ids);
|
||||
|
|
|
|||
|
|
@ -899,7 +899,7 @@ si_spm_init(struct si_context *sctx)
|
|||
void
|
||||
si_spm_finish(struct si_context *sctx)
|
||||
{
|
||||
struct pb_buffer *bo = sctx->spm.bo;
|
||||
struct pb_buffer_lean *bo = sctx->spm.bo;
|
||||
radeon_bo_reference(sctx->screen->ws, &bo, NULL);
|
||||
|
||||
ac_destroy_spm(&sctx->spm);
|
||||
|
|
|
|||
|
|
@ -1087,7 +1087,7 @@ static void si_test_gds_memory_management(struct si_context *sctx, unsigned allo
|
|||
{
|
||||
struct radeon_winsys *ws = sctx->ws;
|
||||
struct radeon_cmdbuf cs[8];
|
||||
struct pb_buffer *gds_bo[ARRAY_SIZE(cs)];
|
||||
struct pb_buffer_lean *gds_bo[ARRAY_SIZE(cs)];
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(cs); i++) {
|
||||
ws->cs_create(&cs[i], sctx->ctx, AMD_IP_COMPUTE, NULL, NULL);
|
||||
|
|
|
|||
|
|
@ -322,7 +322,7 @@ struct si_resource {
|
|||
uint32_t _pad;
|
||||
|
||||
/* Winsys objects. */
|
||||
struct pb_buffer *buf;
|
||||
struct pb_buffer_lean *buf;
|
||||
uint64_t gpu_address;
|
||||
|
||||
/* Resource properties. */
|
||||
|
|
@ -438,7 +438,7 @@ struct si_texture {
|
|||
*/
|
||||
struct si_auxiliary_texture {
|
||||
struct threaded_resource b;
|
||||
struct pb_buffer *buffer;
|
||||
struct pb_buffer_lean *buffer;
|
||||
uint32_t offset;
|
||||
uint32_t stride;
|
||||
};
|
||||
|
|
@ -529,7 +529,7 @@ union si_mmio_counters {
|
|||
|
||||
struct si_memory_object {
|
||||
struct pipe_memory_object b;
|
||||
struct pb_buffer *buf;
|
||||
struct pb_buffer_lean *buf;
|
||||
uint32_t stride;
|
||||
};
|
||||
|
||||
|
|
@ -715,7 +715,7 @@ struct si_screen {
|
|||
|
||||
/* NGG streamout. */
|
||||
simple_mtx_t gds_mutex;
|
||||
struct pb_buffer *gds_oa;
|
||||
struct pb_buffer_lean *gds_oa;
|
||||
};
|
||||
|
||||
struct si_compute {
|
||||
|
|
@ -1420,7 +1420,7 @@ void si_gfx_blit(struct pipe_context *ctx, const struct pipe_blit_info *info);
|
|||
bool si_nir_is_output_const_if_tex_is_const(struct nir_shader *shader, float *in, float *out, int *texunit);
|
||||
|
||||
/* si_buffer.c */
|
||||
bool si_cs_is_buffer_referenced(struct si_context *sctx, struct pb_buffer *buf,
|
||||
bool si_cs_is_buffer_referenced(struct si_context *sctx, struct pb_buffer_lean *buf,
|
||||
unsigned usage);
|
||||
void *si_buffer_map(struct si_context *sctx, struct si_resource *resource,
|
||||
unsigned usage);
|
||||
|
|
@ -1433,7 +1433,7 @@ struct si_resource *si_aligned_buffer_create(struct pipe_screen *screen, unsigne
|
|||
unsigned usage, unsigned size, unsigned alignment);
|
||||
struct pipe_resource *si_buffer_from_winsys_buffer(struct pipe_screen *screen,
|
||||
const struct pipe_resource *templ,
|
||||
struct pb_buffer *imported_buf,
|
||||
struct pb_buffer_lean *imported_buf,
|
||||
uint64_t offset);
|
||||
void si_replace_buffer_storage(struct pipe_context *ctx, struct pipe_resource *dst,
|
||||
struct pipe_resource *src, unsigned num_rebinds,
|
||||
|
|
|
|||
|
|
@ -669,7 +669,7 @@ bool si_init_sqtt(struct si_context *sctx)
|
|||
void si_destroy_sqtt(struct si_context *sctx)
|
||||
{
|
||||
struct si_screen *sscreen = sctx->screen;
|
||||
struct pb_buffer *bo = sctx->sqtt->bo;
|
||||
struct pb_buffer_lean *bo = sctx->sqtt->bo;
|
||||
radeon_bo_reference(sctx->screen->ws, &bo, NULL);
|
||||
|
||||
if (sctx->sqtt->trigger_file)
|
||||
|
|
|
|||
|
|
@ -949,7 +949,7 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
|
|||
const struct pipe_resource *base,
|
||||
const struct radeon_surf *surface,
|
||||
const struct si_texture *plane0,
|
||||
struct pb_buffer *imported_buf,
|
||||
struct pb_buffer_lean *imported_buf,
|
||||
uint64_t offset, unsigned pitch_in_bytes,
|
||||
uint64_t alloc_size, unsigned alignment)
|
||||
{
|
||||
|
|
@ -1007,8 +1007,8 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
|
|||
} else {
|
||||
resource->buf = imported_buf;
|
||||
resource->gpu_address = sscreen->ws->buffer_get_virtual_address(resource->buf);
|
||||
resource->bo_size = imported_buf->base.size;
|
||||
resource->bo_alignment_log2 = imported_buf->base.alignment_log2;
|
||||
resource->bo_size = imported_buf->size;
|
||||
resource->bo_alignment_log2 = imported_buf->alignment_log2;
|
||||
resource->domains = sscreen->ws->buffer_get_initial_domain(resource->buf);
|
||||
if (sscreen->ws->buffer_get_flags)
|
||||
resource->flags = sscreen->ws->buffer_get_flags(resource->buf);
|
||||
|
|
@ -1018,7 +1018,7 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
|
|||
fprintf(stderr,
|
||||
"VM start=0x%" PRIX64 " end=0x%" PRIX64
|
||||
" | Texture %ix%ix%i, %i levels, %i samples, %s | Flags: ",
|
||||
tex->buffer.gpu_address, tex->buffer.gpu_address + tex->buffer.buf->base.size,
|
||||
tex->buffer.gpu_address, tex->buffer.gpu_address + tex->buffer.buf->size,
|
||||
base->width0, base->height0, util_num_layers(base, 0), base->last_level + 1,
|
||||
base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
|
||||
si_res_print_flags(tex->buffer.flags);
|
||||
|
|
@ -1580,7 +1580,7 @@ static bool si_texture_is_aux_plane(const struct pipe_resource *resource)
|
|||
|
||||
static struct pipe_resource *si_texture_from_winsys_buffer(struct si_screen *sscreen,
|
||||
const struct pipe_resource *templ,
|
||||
struct pb_buffer *buf, unsigned stride,
|
||||
struct pb_buffer_lean *buf, unsigned stride,
|
||||
uint64_t offset, uint64_t modifier,
|
||||
unsigned usage, bool dedicated)
|
||||
{
|
||||
|
|
@ -1681,7 +1681,7 @@ static struct pipe_resource *si_texture_from_winsys_buffer(struct si_screen *ssc
|
|||
}
|
||||
|
||||
if (ac_surface_get_plane_offset(sscreen->info.gfx_level, &tex->surface, 0, 0) +
|
||||
tex->surface.total_size > buf->base.size) {
|
||||
tex->surface.total_size > buf->size) {
|
||||
si_texture_reference(&tex, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -1705,7 +1705,7 @@ static struct pipe_resource *si_texture_from_handle(struct pipe_screen *screen,
|
|||
struct winsys_handle *whandle, unsigned usage)
|
||||
{
|
||||
struct si_screen *sscreen = (struct si_screen *)screen;
|
||||
struct pb_buffer *buf = NULL;
|
||||
struct pb_buffer_lean *buf = NULL;
|
||||
|
||||
buf = sscreen->ws->buffer_from_handle(sscreen->ws, whandle,
|
||||
sscreen->info.max_alignment,
|
||||
|
|
@ -2025,7 +2025,7 @@ static void si_texture_transfer_unmap(struct pipe_context *ctx, struct pipe_tran
|
|||
si_copy_from_staging_texture(ctx, stransfer);
|
||||
|
||||
if (stransfer->staging) {
|
||||
sctx->num_alloc_tex_transfer_bytes += stransfer->staging->buf->base.size;
|
||||
sctx->num_alloc_tex_transfer_bytes += stransfer->staging->buf->size;
|
||||
si_resource_reference(&stransfer->staging, NULL);
|
||||
}
|
||||
|
||||
|
|
@ -2262,7 +2262,7 @@ si_memobj_from_handle(struct pipe_screen *screen, struct winsys_handle *whandle,
|
|||
{
|
||||
struct si_screen *sscreen = (struct si_screen *)screen;
|
||||
struct si_memory_object *memobj = CALLOC_STRUCT(si_memory_object);
|
||||
struct pb_buffer *buf = NULL;
|
||||
struct pb_buffer_lean *buf = NULL;
|
||||
|
||||
if (!memobj)
|
||||
return NULL;
|
||||
|
|
@ -2312,7 +2312,7 @@ static struct pipe_resource *si_resource_from_memobj(struct pipe_screen *screen,
|
|||
/* si_texture_from_winsys_buffer doesn't increment refcount of
|
||||
* memobj->buf, so increment it here.
|
||||
*/
|
||||
struct pb_buffer *buf = NULL;
|
||||
struct pb_buffer_lean *buf = NULL;
|
||||
radeon_bo_reference(sscreen->ws, &buf, memobj->buf);
|
||||
return res;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ struct pipe_video_buffer *si_video_buffer_create_with_modifiers(struct pipe_cont
|
|||
}
|
||||
|
||||
/* set the decoding target buffer offsets */
|
||||
static struct pb_buffer *si_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_buffer *buf)
|
||||
static struct pb_buffer_lean *si_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_buffer *buf)
|
||||
{
|
||||
struct si_screen *sscreen = (struct si_screen *)buf->base.context->screen;
|
||||
struct si_texture *luma = (struct si_texture *)buf->resources[0];
|
||||
|
|
@ -85,7 +85,7 @@ static struct pb_buffer *si_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_bu
|
|||
}
|
||||
|
||||
/* get the radeon resources for VCE */
|
||||
static void si_vce_get_buffer(struct pipe_resource *resource, struct pb_buffer **handle,
|
||||
static void si_vce_get_buffer(struct pipe_resource *resource, struct pb_buffer_lean **handle,
|
||||
struct radeon_surf **surface)
|
||||
{
|
||||
struct si_texture *res = (struct si_texture *)resource;
|
||||
|
|
|
|||
|
|
@ -334,13 +334,14 @@ struct radeon_winsys {
|
|||
* \param domain A bitmask of the RADEON_DOMAIN_* flags.
|
||||
* \return The created buffer object.
|
||||
*/
|
||||
struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws, uint64_t size, unsigned alignment,
|
||||
enum radeon_bo_domain domain, enum radeon_bo_flag flags);
|
||||
struct pb_buffer_lean *(*buffer_create)(struct radeon_winsys *ws, uint64_t size,
|
||||
unsigned alignment, enum radeon_bo_domain domain,
|
||||
enum radeon_bo_flag flags);
|
||||
|
||||
/**
|
||||
* Don't use directly. Use radeon_bo_reference.
|
||||
*/
|
||||
void (*buffer_destroy)(struct radeon_winsys *ws, struct pb_buffer *buf);
|
||||
void (*buffer_destroy)(struct radeon_winsys *ws, struct pb_buffer_lean *buf);
|
||||
|
||||
/**
|
||||
* Map the entire data store of a buffer object into the client's address
|
||||
|
|
@ -354,7 +355,7 @@ struct radeon_winsys {
|
|||
* \param usage A bitmask of the PIPE_MAP_* and RADEON_MAP_* flags.
|
||||
* \return The pointer at the beginning of the buffer.
|
||||
*/
|
||||
void *(*buffer_map)(struct radeon_winsys *ws, struct pb_buffer *buf,
|
||||
void *(*buffer_map)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
|
||||
struct radeon_cmdbuf *cs, enum pipe_map_flags usage);
|
||||
|
||||
/**
|
||||
|
|
@ -362,7 +363,7 @@ struct radeon_winsys {
|
|||
*
|
||||
* \param buf A winsys buffer object to unmap.
|
||||
*/
|
||||
void (*buffer_unmap)(struct radeon_winsys *ws, struct pb_buffer *buf);
|
||||
void (*buffer_unmap)(struct radeon_winsys *ws, struct pb_buffer_lean *buf);
|
||||
|
||||
/**
|
||||
* Wait for the buffer and return true if the buffer is not used
|
||||
|
|
@ -372,7 +373,7 @@ struct radeon_winsys {
|
|||
* The timeout of OS_TIMEOUT_INFINITE will always wait until the buffer
|
||||
* is idle.
|
||||
*/
|
||||
bool (*buffer_wait)(struct radeon_winsys *ws, struct pb_buffer *buf,
|
||||
bool (*buffer_wait)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
|
||||
uint64_t timeout, unsigned usage);
|
||||
|
||||
/**
|
||||
|
|
@ -382,7 +383,7 @@ struct radeon_winsys {
|
|||
* \param buf A winsys buffer object to get the flags from.
|
||||
* \param md Metadata
|
||||
*/
|
||||
void (*buffer_get_metadata)(struct radeon_winsys *ws, struct pb_buffer *buf,
|
||||
void (*buffer_get_metadata)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
|
||||
struct radeon_bo_metadata *md, struct radeon_surf *surf);
|
||||
|
||||
/**
|
||||
|
|
@ -392,7 +393,7 @@ struct radeon_winsys {
|
|||
* \param buf A winsys buffer object to set the flags for.
|
||||
* \param md Metadata
|
||||
*/
|
||||
void (*buffer_set_metadata)(struct radeon_winsys *ws, struct pb_buffer *buf,
|
||||
void (*buffer_set_metadata)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
|
||||
struct radeon_bo_metadata *md, struct radeon_surf *surf);
|
||||
|
||||
/**
|
||||
|
|
@ -403,8 +404,10 @@ struct radeon_winsys {
|
|||
* \param whandle A winsys handle pointer as was received from a state
|
||||
* tracker.
|
||||
*/
|
||||
struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws, struct winsys_handle *whandle,
|
||||
unsigned vm_alignment, bool is_prime_linear_buffer);
|
||||
struct pb_buffer_lean *(*buffer_from_handle)(struct radeon_winsys *ws,
|
||||
struct winsys_handle *whandle,
|
||||
unsigned vm_alignment,
|
||||
bool is_prime_linear_buffer);
|
||||
|
||||
/**
|
||||
* Get a winsys buffer from a user pointer. The resulting buffer can't
|
||||
|
|
@ -414,7 +417,8 @@ struct radeon_winsys {
|
|||
* \param pointer User pointer to turn into a buffer object.
|
||||
* \param Size Size in bytes for the new buffer.
|
||||
*/
|
||||
struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer, uint64_t size, enum radeon_bo_flag flags);
|
||||
struct pb_buffer_lean *(*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer,
|
||||
uint64_t size, enum radeon_bo_flag flags);
|
||||
|
||||
/**
|
||||
* Whether the buffer was created from a user pointer.
|
||||
|
|
@ -422,10 +426,10 @@ struct radeon_winsys {
|
|||
* \param buf A winsys buffer object
|
||||
* \return whether \p buf was created via buffer_from_ptr
|
||||
*/
|
||||
bool (*buffer_is_user_ptr)(struct pb_buffer *buf);
|
||||
bool (*buffer_is_user_ptr)(struct pb_buffer_lean *buf);
|
||||
|
||||
/** Whether the buffer was suballocated. */
|
||||
bool (*buffer_is_suballocated)(struct pb_buffer *buf);
|
||||
bool (*buffer_is_suballocated)(struct pb_buffer_lean *buf);
|
||||
|
||||
/**
|
||||
* Get a winsys handle from a winsys buffer. The internal structure
|
||||
|
|
@ -436,7 +440,7 @@ struct radeon_winsys {
|
|||
* \param whandle A winsys handle pointer.
|
||||
* \return true on success.
|
||||
*/
|
||||
bool (*buffer_get_handle)(struct radeon_winsys *ws, struct pb_buffer *buf,
|
||||
bool (*buffer_get_handle)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
|
||||
struct winsys_handle *whandle);
|
||||
|
||||
/**
|
||||
|
|
@ -449,7 +453,7 @@ struct radeon_winsys {
|
|||
*
|
||||
* \return false on out of memory or other failure, true on success.
|
||||
*/
|
||||
bool (*buffer_commit)(struct radeon_winsys *ws, struct pb_buffer *buf,
|
||||
bool (*buffer_commit)(struct radeon_winsys *ws, struct pb_buffer_lean *buf,
|
||||
uint64_t offset, uint64_t size, bool commit);
|
||||
|
||||
/**
|
||||
|
|
@ -457,7 +461,7 @@ struct radeon_winsys {
|
|||
* \note Only implemented by the amdgpu winsys.
|
||||
* \return the skipped count if the range_offset fall into a hole.
|
||||
*/
|
||||
unsigned (*buffer_find_next_committed_memory)(struct pb_buffer *buf,
|
||||
unsigned (*buffer_find_next_committed_memory)(struct pb_buffer_lean *buf,
|
||||
uint64_t range_offset, unsigned *range_size);
|
||||
/**
|
||||
* Return the virtual address of a buffer.
|
||||
|
|
@ -468,7 +472,7 @@ struct radeon_winsys {
|
|||
* \param buf A winsys buffer object
|
||||
* \return virtual address
|
||||
*/
|
||||
uint64_t (*buffer_get_virtual_address)(struct pb_buffer *buf);
|
||||
uint64_t (*buffer_get_virtual_address)(struct pb_buffer_lean *buf);
|
||||
|
||||
/**
|
||||
* Return the offset of this buffer relative to the relocation base.
|
||||
|
|
@ -480,12 +484,12 @@ struct radeon_winsys {
|
|||
* \param buf A winsys buffer object
|
||||
* \return the offset for relocations
|
||||
*/
|
||||
unsigned (*buffer_get_reloc_offset)(struct pb_buffer *buf);
|
||||
unsigned (*buffer_get_reloc_offset)(struct pb_buffer_lean *buf);
|
||||
|
||||
/**
|
||||
* Query the initial placement of the buffer from the kernel driver.
|
||||
*/
|
||||
enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer *buf);
|
||||
enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer_lean *buf);
|
||||
|
||||
/**
|
||||
* Query the flags used for creation of this buffer.
|
||||
|
|
@ -493,7 +497,7 @@ struct radeon_winsys {
|
|||
* Note that for imported buffer this may be lossy since not all flags
|
||||
* are passed 1:1.
|
||||
*/
|
||||
enum radeon_bo_flag (*buffer_get_flags)(struct pb_buffer *buf);
|
||||
enum radeon_bo_flag (*buffer_get_flags)(struct pb_buffer_lean *buf);
|
||||
|
||||
/**************************************************************************
|
||||
* Command submission.
|
||||
|
|
@ -576,7 +580,7 @@ struct radeon_winsys {
|
|||
* \param domain Bitmask of the RADEON_DOMAIN_* flags.
|
||||
* \return Buffer index.
|
||||
*/
|
||||
unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer *buf,
|
||||
unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer_lean *buf,
|
||||
unsigned usage, enum radeon_bo_domain domain);
|
||||
|
||||
/**
|
||||
|
|
@ -589,7 +593,7 @@ struct radeon_winsys {
|
|||
* \param buf Buffer
|
||||
* \return The buffer index, or -1 if the buffer has not been added.
|
||||
*/
|
||||
int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer *buf);
|
||||
int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs, struct pb_buffer_lean *buf);
|
||||
|
||||
/**
|
||||
* Return true if there is enough memory in VRAM and GTT for the buffers
|
||||
|
|
@ -650,7 +654,7 @@ struct radeon_winsys {
|
|||
* \param cs A command stream.
|
||||
* \param buf A winsys buffer.
|
||||
*/
|
||||
bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs, struct pb_buffer *buf,
|
||||
bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs, struct pb_buffer_lean *buf,
|
||||
unsigned usage);
|
||||
|
||||
/**
|
||||
|
|
@ -781,11 +785,12 @@ static inline bool radeon_uses_secure_bos(struct radeon_winsys* ws)
|
|||
}
|
||||
|
||||
static inline void
|
||||
radeon_bo_reference(struct radeon_winsys *rws, struct pb_buffer **dst, struct pb_buffer *src)
|
||||
radeon_bo_reference(struct radeon_winsys *rws, struct pb_buffer_lean **dst,
|
||||
struct pb_buffer_lean *src)
|
||||
{
|
||||
struct pb_buffer *old = *dst;
|
||||
struct pb_buffer_lean *old = *dst;
|
||||
|
||||
if (pipe_reference(&(*dst)->base.reference, &src->base.reference))
|
||||
if (pipe_reference(&(*dst)->reference, &src->reference))
|
||||
rws->buffer_destroy(rws, old);
|
||||
*dst = src;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ struct amdgpu_sparse_backing_chunk {
|
|||
};
|
||||
|
||||
static bool amdgpu_bo_wait(struct radeon_winsys *rws,
|
||||
struct pb_buffer *_buf, uint64_t timeout,
|
||||
struct pb_buffer_lean *_buf, uint64_t timeout,
|
||||
unsigned usage)
|
||||
{
|
||||
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
|
||||
|
|
@ -135,15 +135,15 @@ static inline unsigned get_slab_entry_offset(struct amdgpu_winsys_bo *bo)
|
|||
}
|
||||
|
||||
static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
|
||||
struct pb_buffer *buf)
|
||||
struct pb_buffer_lean *buf)
|
||||
{
|
||||
return ((struct amdgpu_winsys_bo*)buf)->base.base.placement;
|
||||
return ((struct amdgpu_winsys_bo*)buf)->base.placement;
|
||||
}
|
||||
|
||||
static enum radeon_bo_flag amdgpu_bo_get_flags(
|
||||
struct pb_buffer *buf)
|
||||
struct pb_buffer_lean *buf)
|
||||
{
|
||||
return ((struct amdgpu_winsys_bo*)buf)->base.base.usage;
|
||||
return ((struct amdgpu_winsys_bo*)buf)->base.usage;
|
||||
}
|
||||
|
||||
static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
|
||||
|
|
@ -156,7 +156,7 @@ static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
|
|||
bo->max_fences = 0;
|
||||
}
|
||||
|
||||
void amdgpu_bo_destroy(struct amdgpu_winsys *ws, struct pb_buffer *_buf)
|
||||
void amdgpu_bo_destroy(struct amdgpu_winsys *ws, struct pb_buffer_lean *_buf)
|
||||
{
|
||||
struct amdgpu_bo_real *bo = get_real_bo(amdgpu_winsys_bo(_buf));
|
||||
struct amdgpu_screen_winsys *sws_iter;
|
||||
|
|
@ -164,15 +164,15 @@ void amdgpu_bo_destroy(struct amdgpu_winsys *ws, struct pb_buffer *_buf)
|
|||
simple_mtx_lock(&ws->bo_export_table_lock);
|
||||
|
||||
/* amdgpu_bo_from_handle might have revived the bo */
|
||||
if (p_atomic_read(&bo->b.base.base.reference.count)) {
|
||||
if (p_atomic_read(&bo->b.base.reference.count)) {
|
||||
simple_mtx_unlock(&ws->bo_export_table_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
_mesa_hash_table_remove_key(ws->bo_export_table, bo->bo);
|
||||
|
||||
if (bo->b.base.base.placement & RADEON_DOMAIN_VRAM_GTT) {
|
||||
amdgpu_bo_va_op(bo->bo, 0, bo->b.base.base.size, bo->gpu_address, 0, AMDGPU_VA_OP_UNMAP);
|
||||
if (bo->b.base.placement & RADEON_DOMAIN_VRAM_GTT) {
|
||||
amdgpu_bo_va_op(bo->bo, 0, bo->b.base.size, bo->gpu_address, 0, AMDGPU_VA_OP_UNMAP);
|
||||
amdgpu_va_range_free(bo->va_handle);
|
||||
}
|
||||
|
||||
|
|
@ -215,16 +215,16 @@ void amdgpu_bo_destroy(struct amdgpu_winsys *ws, struct pb_buffer *_buf)
|
|||
|
||||
amdgpu_bo_remove_fences(&bo->b);
|
||||
|
||||
if (bo->b.base.base.placement & RADEON_DOMAIN_VRAM)
|
||||
ws->allocated_vram -= align64(bo->b.base.base.size, ws->info.gart_page_size);
|
||||
else if (bo->b.base.base.placement & RADEON_DOMAIN_GTT)
|
||||
ws->allocated_gtt -= align64(bo->b.base.base.size, ws->info.gart_page_size);
|
||||
if (bo->b.base.placement & RADEON_DOMAIN_VRAM)
|
||||
ws->allocated_vram -= align64(bo->b.base.size, ws->info.gart_page_size);
|
||||
else if (bo->b.base.placement & RADEON_DOMAIN_GTT)
|
||||
ws->allocated_gtt -= align64(bo->b.base.size, ws->info.gart_page_size);
|
||||
|
||||
simple_mtx_destroy(&bo->lock);
|
||||
FREE(bo);
|
||||
}
|
||||
|
||||
static void amdgpu_bo_destroy_or_cache(struct radeon_winsys *rws, struct pb_buffer *_buf)
|
||||
static void amdgpu_bo_destroy_or_cache(struct radeon_winsys *rws, struct pb_buffer_lean *_buf)
|
||||
{
|
||||
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
|
||||
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
|
||||
|
|
@ -259,10 +259,10 @@ static bool amdgpu_bo_do_map(struct radeon_winsys *rws, struct amdgpu_bo_real *b
|
|||
}
|
||||
|
||||
if (p_atomic_inc_return(&bo->map_count) == 1) {
|
||||
if (bo->b.base.base.placement & RADEON_DOMAIN_VRAM)
|
||||
ws->mapped_vram += bo->b.base.base.size;
|
||||
else if (bo->b.base.base.placement & RADEON_DOMAIN_GTT)
|
||||
ws->mapped_gtt += bo->b.base.base.size;
|
||||
if (bo->b.base.placement & RADEON_DOMAIN_VRAM)
|
||||
ws->mapped_vram += bo->b.base.size;
|
||||
else if (bo->b.base.placement & RADEON_DOMAIN_GTT)
|
||||
ws->mapped_gtt += bo->b.base.size;
|
||||
ws->num_mapped_buffers++;
|
||||
}
|
||||
|
||||
|
|
@ -270,7 +270,7 @@ static bool amdgpu_bo_do_map(struct radeon_winsys *rws, struct amdgpu_bo_real *b
|
|||
}
|
||||
|
||||
void *amdgpu_bo_map(struct radeon_winsys *rws,
|
||||
struct pb_buffer *buf,
|
||||
struct pb_buffer_lean *buf,
|
||||
struct radeon_cmdbuf *rcs,
|
||||
enum pipe_map_flags usage)
|
||||
{
|
||||
|
|
@ -300,7 +300,7 @@ void *amdgpu_bo_map(struct radeon_winsys *rws,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!amdgpu_bo_wait(rws, (struct pb_buffer*)bo, 0,
|
||||
if (!amdgpu_bo_wait(rws, (struct pb_buffer_lean*)bo, 0,
|
||||
RADEON_USAGE_WRITE)) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -311,7 +311,7 @@ void *amdgpu_bo_map(struct radeon_winsys *rws,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!amdgpu_bo_wait(rws, (struct pb_buffer*)bo, 0,
|
||||
if (!amdgpu_bo_wait(rws, (struct pb_buffer_lean*)bo, 0,
|
||||
RADEON_USAGE_READWRITE)) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -339,7 +339,7 @@ void *amdgpu_bo_map(struct radeon_winsys *rws,
|
|||
}
|
||||
}
|
||||
|
||||
amdgpu_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
|
||||
amdgpu_bo_wait(rws, (struct pb_buffer_lean*)bo, OS_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_WRITE);
|
||||
} else {
|
||||
/* Mapping for write. */
|
||||
|
|
@ -354,7 +354,7 @@ void *amdgpu_bo_map(struct radeon_winsys *rws,
|
|||
}
|
||||
}
|
||||
|
||||
amdgpu_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
|
||||
amdgpu_bo_wait(rws, (struct pb_buffer_lean*)bo, OS_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_READWRITE);
|
||||
}
|
||||
|
||||
|
|
@ -401,7 +401,7 @@ void *amdgpu_bo_map(struct radeon_winsys *rws,
|
|||
return (uint8_t*)cpu + offset;
|
||||
}
|
||||
|
||||
void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *buf)
|
||||
void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer_lean *buf)
|
||||
{
|
||||
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
|
||||
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
|
||||
|
|
@ -419,10 +419,10 @@ void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *buf)
|
|||
assert(!real->cpu_ptr &&
|
||||
"too many unmaps or forgot RADEON_MAP_TEMPORARY flag");
|
||||
|
||||
if (real->b.base.base.placement & RADEON_DOMAIN_VRAM)
|
||||
ws->mapped_vram -= real->b.base.base.size;
|
||||
else if (real->b.base.base.placement & RADEON_DOMAIN_GTT)
|
||||
ws->mapped_gtt -= real->b.base.base.size;
|
||||
if (real->b.base.placement & RADEON_DOMAIN_VRAM)
|
||||
ws->mapped_vram -= real->b.base.size;
|
||||
else if (real->b.base.placement & RADEON_DOMAIN_GTT)
|
||||
ws->mapped_gtt -= real->b.base.size;
|
||||
ws->num_mapped_buffers--;
|
||||
}
|
||||
|
||||
|
|
@ -491,7 +491,7 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
|
|||
return NULL;
|
||||
|
||||
bo = &new_bo->b;
|
||||
pb_cache_init_entry(&ws->bo_cache, &new_bo->cache_entry, &bo->b.base.base, heap);
|
||||
pb_cache_init_entry(&ws->bo_cache, &new_bo->cache_entry, &bo->b.base, heap);
|
||||
bo->b.type = slab_backing ? AMDGPU_BO_REAL_REUSABLE_SLAB : AMDGPU_BO_REAL_REUSABLE;
|
||||
} else {
|
||||
bo = CALLOC_STRUCT(amdgpu_bo_real);
|
||||
|
|
@ -587,11 +587,11 @@ static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
|
|||
}
|
||||
|
||||
simple_mtx_init(&bo->lock, mtx_plain);
|
||||
pipe_reference_init(&bo->b.base.base.reference, 1);
|
||||
bo->b.base.base.placement = initial_domain;
|
||||
bo->b.base.base.alignment_log2 = util_logbase2(alignment);
|
||||
bo->b.base.base.usage = flags;
|
||||
bo->b.base.base.size = size;
|
||||
pipe_reference_init(&bo->b.base.reference, 1);
|
||||
bo->b.base.placement = initial_domain;
|
||||
bo->b.base.alignment_log2 = util_logbase2(alignment);
|
||||
bo->b.base.usage = flags;
|
||||
bo->b.base.size = size;
|
||||
bo->gpu_address = va;
|
||||
bo->b.unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
|
||||
bo->bo = buf_handle;
|
||||
|
|
@ -618,7 +618,7 @@ error_bo_alloc:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
bool amdgpu_bo_can_reclaim(struct amdgpu_winsys *ws, struct pb_buffer *_buf)
|
||||
bool amdgpu_bo_can_reclaim(struct amdgpu_winsys *ws, struct pb_buffer_lean *_buf)
|
||||
{
|
||||
return amdgpu_bo_wait(&ws->dummy_ws.base, _buf, 0, RADEON_USAGE_READWRITE);
|
||||
}
|
||||
|
|
@ -632,19 +632,19 @@ bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
|
|||
|
||||
static unsigned get_slab_wasted_size(struct amdgpu_winsys *ws, struct amdgpu_bo_slab_entry *bo)
|
||||
{
|
||||
assert(bo->b.base.base.size <= bo->entry.slab->entry_size);
|
||||
assert(bo->b.base.base.size < (1 << bo->b.base.base.alignment_log2) ||
|
||||
bo->b.base.base.size < 1 << ws->bo_slabs.min_order ||
|
||||
bo->b.base.base.size > bo->entry.slab->entry_size / 2);
|
||||
return bo->entry.slab->entry_size - bo->b.base.base.size;
|
||||
assert(bo->b.base.size <= bo->entry.slab->entry_size);
|
||||
assert(bo->b.base.size < (1 << bo->b.base.alignment_log2) ||
|
||||
bo->b.base.size < 1 << ws->bo_slabs.min_order ||
|
||||
bo->b.base.size > bo->entry.slab->entry_size / 2);
|
||||
return bo->entry.slab->entry_size - bo->b.base.size;
|
||||
}
|
||||
|
||||
static void amdgpu_bo_slab_destroy(struct radeon_winsys *rws, struct pb_buffer *_buf)
|
||||
static void amdgpu_bo_slab_destroy(struct radeon_winsys *rws, struct pb_buffer_lean *_buf)
|
||||
{
|
||||
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
|
||||
struct amdgpu_bo_slab_entry *bo = get_slab_entry_bo(amdgpu_winsys_bo(_buf));
|
||||
|
||||
if (bo->b.base.base.placement & RADEON_DOMAIN_VRAM)
|
||||
if (bo->b.base.placement & RADEON_DOMAIN_VRAM)
|
||||
ws->slab_wasted_vram -= get_slab_wasted_size(ws, bo);
|
||||
else
|
||||
ws->slab_wasted_gtt -= get_slab_wasted_size(ws, bo);
|
||||
|
|
@ -723,7 +723,7 @@ struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, unsigned entry_s
|
|||
assert(slab_bo->b.b.b.type == AMDGPU_BO_REAL_REUSABLE_SLAB);
|
||||
|
||||
/* We can get a buffer from pb_cache that is slightly larger. */
|
||||
slab_size = slab_bo->b.b.b.base.base.size;
|
||||
slab_size = slab_bo->b.b.b.base.size;
|
||||
|
||||
slab_bo->slab.num_entries = slab_size / entry_size;
|
||||
slab_bo->slab.num_free = slab_bo->slab.num_entries;
|
||||
|
|
@ -740,9 +740,9 @@ struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, unsigned entry_s
|
|||
for (unsigned i = 0; i < slab_bo->slab.num_entries; ++i) {
|
||||
struct amdgpu_bo_slab_entry *bo = &slab_bo->entries[i];
|
||||
|
||||
bo->b.base.base.placement = domains;
|
||||
bo->b.base.base.alignment_log2 = util_logbase2(get_slab_entry_alignment(ws, entry_size));
|
||||
bo->b.base.base.size = entry_size;
|
||||
bo->b.base.placement = domains;
|
||||
bo->b.base.alignment_log2 = util_logbase2(get_slab_entry_alignment(ws, entry_size));
|
||||
bo->b.base.size = entry_size;
|
||||
bo->b.type = AMDGPU_BO_SLAB_ENTRY;
|
||||
bo->b.unique_id = base_id + i;
|
||||
|
||||
|
|
@ -767,10 +767,10 @@ fail:
|
|||
void amdgpu_bo_slab_free(struct amdgpu_winsys *ws, struct pb_slab *slab)
|
||||
{
|
||||
struct amdgpu_bo_real_reusable_slab *bo = get_bo_from_slab(slab);
|
||||
unsigned slab_size = bo->b.b.b.base.base.size;
|
||||
unsigned slab_size = bo->b.b.b.base.size;
|
||||
|
||||
assert(bo->slab.num_entries * bo->slab.entry_size <= slab_size);
|
||||
if (bo->b.b.b.base.base.placement & RADEON_DOMAIN_VRAM)
|
||||
if (bo->b.b.b.base.placement & RADEON_DOMAIN_VRAM)
|
||||
ws->slab_wasted_vram -= slab_size - bo->slab.num_entries * bo->slab.entry_size;
|
||||
else
|
||||
ws->slab_wasted_gtt -= slab_size - bo->slab.num_entries * bo->slab.entry_size;
|
||||
|
|
@ -869,7 +869,7 @@ sparse_backing_alloc(struct amdgpu_winsys *ws, struct amdgpu_bo_sparse *bo,
|
|||
|
||||
/* Allocate a new backing buffer if necessary. */
|
||||
if (!best_backing) {
|
||||
struct pb_buffer *buf;
|
||||
struct pb_buffer_lean *buf;
|
||||
uint64_t size;
|
||||
uint32_t pages;
|
||||
|
||||
|
|
@ -885,16 +885,16 @@ sparse_backing_alloc(struct amdgpu_winsys *ws, struct amdgpu_bo_sparse *bo,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
assert(bo->num_backing_pages < DIV_ROUND_UP(bo->b.base.base.size, RADEON_SPARSE_PAGE_SIZE));
|
||||
assert(bo->num_backing_pages < DIV_ROUND_UP(bo->b.base.size, RADEON_SPARSE_PAGE_SIZE));
|
||||
|
||||
size = MIN3(bo->b.base.base.size / 16,
|
||||
size = MIN3(bo->b.base.size / 16,
|
||||
8 * 1024 * 1024,
|
||||
bo->b.base.base.size - (uint64_t)bo->num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
|
||||
bo->b.base.size - (uint64_t)bo->num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
|
||||
size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
|
||||
|
||||
buf = amdgpu_bo_create(ws, size, RADEON_SPARSE_PAGE_SIZE,
|
||||
bo->b.base.base.placement,
|
||||
(bo->b.base.base.usage & ~RADEON_FLAG_SPARSE &
|
||||
bo->b.base.placement,
|
||||
(bo->b.base.usage & ~RADEON_FLAG_SPARSE &
|
||||
/* Set the interprocess sharing flag to disable pb_cache because
|
||||
* amdgpu_bo_wait doesn't wait for active CS jobs.
|
||||
*/
|
||||
|
|
@ -906,7 +906,7 @@ sparse_backing_alloc(struct amdgpu_winsys *ws, struct amdgpu_bo_sparse *bo,
|
|||
}
|
||||
|
||||
/* We might have gotten a bigger buffer than requested via caching. */
|
||||
pages = buf->base.size / RADEON_SPARSE_PAGE_SIZE;
|
||||
pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
|
||||
|
||||
best_backing->bo = get_real_bo(amdgpu_winsys_bo(buf));
|
||||
best_backing->num_chunks = 1;
|
||||
|
|
@ -937,7 +937,7 @@ static void
|
|||
sparse_free_backing_buffer(struct amdgpu_winsys *ws, struct amdgpu_bo_sparse *bo,
|
||||
struct amdgpu_sparse_backing *backing)
|
||||
{
|
||||
bo->num_backing_pages -= backing->bo->b.base.base.size / RADEON_SPARSE_PAGE_SIZE;
|
||||
bo->num_backing_pages -= backing->bo->b.base.size / RADEON_SPARSE_PAGE_SIZE;
|
||||
|
||||
simple_mtx_lock(&ws->bo_fence_lock);
|
||||
amdgpu_add_fences(&backing->bo->b, bo->b.num_fences, bo->b.fences);
|
||||
|
|
@ -1008,13 +1008,13 @@ sparse_backing_free(struct amdgpu_winsys *ws, struct amdgpu_bo_sparse *bo,
|
|||
}
|
||||
|
||||
if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
|
||||
backing->chunks[0].end == backing->bo->b.base.base.size / RADEON_SPARSE_PAGE_SIZE)
|
||||
backing->chunks[0].end == backing->bo->b.base.size / RADEON_SPARSE_PAGE_SIZE)
|
||||
sparse_free_backing_buffer(ws, bo, backing);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void amdgpu_bo_sparse_destroy(struct radeon_winsys *rws, struct pb_buffer *_buf)
|
||||
static void amdgpu_bo_sparse_destroy(struct radeon_winsys *rws, struct pb_buffer_lean *_buf)
|
||||
{
|
||||
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
|
||||
struct amdgpu_bo_sparse *bo = get_sparse_bo(amdgpu_winsys_bo(_buf));
|
||||
|
|
@ -1039,7 +1039,7 @@ static void amdgpu_bo_sparse_destroy(struct radeon_winsys *rws, struct pb_buffer
|
|||
FREE(bo);
|
||||
}
|
||||
|
||||
static struct pb_buffer *
|
||||
static struct pb_buffer_lean *
|
||||
amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
|
||||
enum radeon_bo_domain domain,
|
||||
enum radeon_bo_flag flags)
|
||||
|
|
@ -1061,11 +1061,11 @@ amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
|
|||
return NULL;
|
||||
|
||||
simple_mtx_init(&bo->lock, mtx_plain);
|
||||
pipe_reference_init(&bo->b.base.base.reference, 1);
|
||||
bo->b.base.base.placement = domain;
|
||||
bo->b.base.base.alignment_log2 = util_logbase2(RADEON_SPARSE_PAGE_SIZE);
|
||||
bo->b.base.base.usage = flags;
|
||||
bo->b.base.base.size = size;
|
||||
pipe_reference_init(&bo->b.base.reference, 1);
|
||||
bo->b.base.placement = domain;
|
||||
bo->b.base.alignment_log2 = util_logbase2(RADEON_SPARSE_PAGE_SIZE);
|
||||
bo->b.base.usage = flags;
|
||||
bo->b.base.size = size;
|
||||
bo->b.unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
|
||||
bo->b.type = AMDGPU_BO_SPARSE;
|
||||
|
||||
|
|
@ -1104,7 +1104,7 @@ error_alloc_commitments:
|
|||
}
|
||||
|
||||
static bool
|
||||
amdgpu_bo_sparse_commit(struct radeon_winsys *rws, struct pb_buffer *buf,
|
||||
amdgpu_bo_sparse_commit(struct radeon_winsys *rws, struct pb_buffer_lean *buf,
|
||||
uint64_t offset, uint64_t size, bool commit)
|
||||
{
|
||||
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
|
||||
|
|
@ -1115,9 +1115,9 @@ amdgpu_bo_sparse_commit(struct radeon_winsys *rws, struct pb_buffer *buf,
|
|||
int r;
|
||||
|
||||
assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
|
||||
assert(offset <= bo->b.base.base.size);
|
||||
assert(size <= bo->b.base.base.size - offset);
|
||||
assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->b.base.base.size);
|
||||
assert(offset <= bo->b.base.size);
|
||||
assert(size <= bo->b.base.size - offset);
|
||||
assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->b.base.size);
|
||||
|
||||
comm = bo->commitments;
|
||||
va_page = offset / RADEON_SPARSE_PAGE_SIZE;
|
||||
|
|
@ -1233,7 +1233,7 @@ out:
|
|||
}
|
||||
|
||||
static unsigned
|
||||
amdgpu_bo_find_next_committed_memory(struct pb_buffer *buf,
|
||||
amdgpu_bo_find_next_committed_memory(struct pb_buffer_lean *buf,
|
||||
uint64_t range_offset, unsigned *range_size)
|
||||
{
|
||||
struct amdgpu_bo_sparse *bo = get_sparse_bo(amdgpu_winsys_bo(buf));
|
||||
|
|
@ -1245,7 +1245,7 @@ amdgpu_bo_find_next_committed_memory(struct pb_buffer *buf,
|
|||
if (*range_size == 0)
|
||||
return 0;
|
||||
|
||||
assert(*range_size + range_offset <= bo->b.base.base.size);
|
||||
assert(*range_size + range_offset <= bo->b.base.size);
|
||||
|
||||
uncommitted_range_prev = uncommitted_range_next = 0;
|
||||
comm = bo->commitments;
|
||||
|
|
@ -1287,7 +1287,7 @@ amdgpu_bo_find_next_committed_memory(struct pb_buffer *buf,
|
|||
}
|
||||
|
||||
static void amdgpu_buffer_get_metadata(struct radeon_winsys *rws,
|
||||
struct pb_buffer *_buf,
|
||||
struct pb_buffer_lean *_buf,
|
||||
struct radeon_bo_metadata *md,
|
||||
struct radeon_surf *surf)
|
||||
{
|
||||
|
|
@ -1308,7 +1308,7 @@ static void amdgpu_buffer_get_metadata(struct radeon_winsys *rws,
|
|||
}
|
||||
|
||||
static void amdgpu_buffer_set_metadata(struct radeon_winsys *rws,
|
||||
struct pb_buffer *_buf,
|
||||
struct pb_buffer_lean *_buf,
|
||||
struct radeon_bo_metadata *md,
|
||||
struct radeon_surf *surf)
|
||||
{
|
||||
|
|
@ -1324,7 +1324,7 @@ static void amdgpu_buffer_set_metadata(struct radeon_winsys *rws,
|
|||
amdgpu_bo_set_metadata(bo->bo, &metadata);
|
||||
}
|
||||
|
||||
struct pb_buffer *
|
||||
struct pb_buffer_lean *
|
||||
amdgpu_bo_create(struct amdgpu_winsys *ws,
|
||||
uint64_t size,
|
||||
unsigned alignment,
|
||||
|
|
@ -1381,9 +1381,9 @@ amdgpu_bo_create(struct amdgpu_winsys *ws,
|
|||
return NULL;
|
||||
|
||||
struct amdgpu_bo_slab_entry *slab_bo = container_of(entry, struct amdgpu_bo_slab_entry, entry);
|
||||
pipe_reference_init(&slab_bo->b.base.base.reference, 1);
|
||||
slab_bo->b.base.base.size = size;
|
||||
assert(alignment <= 1 << slab_bo->b.base.base.alignment_log2);
|
||||
pipe_reference_init(&slab_bo->b.base.reference, 1);
|
||||
slab_bo->b.base.size = size;
|
||||
assert(alignment <= 1 << slab_bo->b.base.alignment_log2);
|
||||
|
||||
if (domain & RADEON_DOMAIN_VRAM)
|
||||
ws->slab_wasted_vram += get_slab_wasted_size(ws, slab_bo);
|
||||
|
|
@ -1451,19 +1451,19 @@ no_slab:
|
|||
return &bo->base;
|
||||
}
|
||||
|
||||
static struct pb_buffer *
|
||||
static struct pb_buffer_lean *
|
||||
amdgpu_buffer_create(struct radeon_winsys *ws,
|
||||
uint64_t size,
|
||||
unsigned alignment,
|
||||
enum radeon_bo_domain domain,
|
||||
enum radeon_bo_flag flags)
|
||||
{
|
||||
struct pb_buffer * res = amdgpu_bo_create(amdgpu_winsys(ws), size, alignment, domain,
|
||||
struct pb_buffer_lean * res = amdgpu_bo_create(amdgpu_winsys(ws), size, alignment, domain,
|
||||
flags);
|
||||
return res;
|
||||
}
|
||||
|
||||
static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
||||
static struct pb_buffer_lean *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
||||
struct winsys_handle *whandle,
|
||||
unsigned vm_alignment,
|
||||
bool is_prime_linear_buffer)
|
||||
|
|
@ -1501,7 +1501,7 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
|||
* counter and return it.
|
||||
*/
|
||||
if (bo) {
|
||||
p_atomic_inc(&bo->b.base.base.reference.count);
|
||||
p_atomic_inc(&bo->b.base.reference.count);
|
||||
simple_mtx_unlock(&ws->bo_export_table_lock);
|
||||
|
||||
/* Release the buffer handle, because we don't need it anymore.
|
||||
|
|
@ -1555,12 +1555,12 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
|||
}
|
||||
|
||||
/* Initialize the structure. */
|
||||
pipe_reference_init(&bo->b.base.base.reference, 1);
|
||||
bo->b.base.base.placement = initial;
|
||||
bo->b.base.base.alignment_log2 = util_logbase2(info.phys_alignment ?
|
||||
pipe_reference_init(&bo->b.base.reference, 1);
|
||||
bo->b.base.placement = initial;
|
||||
bo->b.base.alignment_log2 = util_logbase2(info.phys_alignment ?
|
||||
info.phys_alignment : ws->info.gart_page_size);
|
||||
bo->b.base.base.usage = flags;
|
||||
bo->b.base.base.size = result.alloc_size;
|
||||
bo->b.base.usage = flags;
|
||||
bo->b.base.size = result.alloc_size;
|
||||
bo->b.type = AMDGPU_BO_REAL;
|
||||
bo->gpu_address = va;
|
||||
bo->b.unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
|
||||
|
|
@ -1569,10 +1569,10 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
|
|||
bo->va_handle = va_handle;
|
||||
bo->is_shared = true;
|
||||
|
||||
if (bo->b.base.base.placement & RADEON_DOMAIN_VRAM)
|
||||
ws->allocated_vram += align64(bo->b.base.base.size, ws->info.gart_page_size);
|
||||
else if (bo->b.base.base.placement & RADEON_DOMAIN_GTT)
|
||||
ws->allocated_gtt += align64(bo->b.base.base.size, ws->info.gart_page_size);
|
||||
if (bo->b.base.placement & RADEON_DOMAIN_VRAM)
|
||||
ws->allocated_vram += align64(bo->b.base.size, ws->info.gart_page_size);
|
||||
else if (bo->b.base.placement & RADEON_DOMAIN_GTT)
|
||||
ws->allocated_gtt += align64(bo->b.base.size, ws->info.gart_page_size);
|
||||
|
||||
amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->kms_handle);
|
||||
|
||||
|
|
@ -1594,7 +1594,7 @@ error:
|
|||
}
|
||||
|
||||
static bool amdgpu_bo_get_handle(struct radeon_winsys *rws,
|
||||
struct pb_buffer *buffer,
|
||||
struct pb_buffer_lean *buffer,
|
||||
struct winsys_handle *whandle)
|
||||
{
|
||||
struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
|
||||
|
|
@ -1679,7 +1679,7 @@ static bool amdgpu_bo_get_handle(struct radeon_winsys *rws,
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
|
||||
static struct pb_buffer_lean *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
|
||||
void *pointer, uint64_t size,
|
||||
enum radeon_bo_flag flags)
|
||||
{
|
||||
|
|
@ -1711,10 +1711,10 @@ static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
|
|||
|
||||
/* Initialize it. */
|
||||
bo->is_user_ptr = true;
|
||||
pipe_reference_init(&bo->b.base.base.reference, 1);
|
||||
bo->b.base.base.placement = RADEON_DOMAIN_GTT;
|
||||
bo->b.base.base.alignment_log2 = 0;
|
||||
bo->b.base.base.size = size;
|
||||
pipe_reference_init(&bo->b.base.reference, 1);
|
||||
bo->b.base.placement = RADEON_DOMAIN_GTT;
|
||||
bo->b.base.alignment_log2 = 0;
|
||||
bo->b.base.size = size;
|
||||
bo->b.type = AMDGPU_BO_REAL;
|
||||
bo->gpu_address = va;
|
||||
bo->b.unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
|
||||
|
|
@ -1729,7 +1729,7 @@ static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
|
|||
|
||||
amdgpu_bo_export(bo->bo, amdgpu_bo_handle_type_kms, &bo->kms_handle);
|
||||
|
||||
return (struct pb_buffer*)bo;
|
||||
return (struct pb_buffer_lean*)bo;
|
||||
|
||||
error_va_map:
|
||||
amdgpu_va_range_free(va_handle);
|
||||
|
|
@ -1742,21 +1742,21 @@ error:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
|
||||
static bool amdgpu_bo_is_user_ptr(struct pb_buffer_lean *buf)
|
||||
{
|
||||
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
|
||||
|
||||
return is_real_bo(bo) ? get_real_bo(bo)->is_user_ptr : false;
|
||||
}
|
||||
|
||||
static bool amdgpu_bo_is_suballocated(struct pb_buffer *buf)
|
||||
static bool amdgpu_bo_is_suballocated(struct pb_buffer_lean *buf)
|
||||
{
|
||||
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
|
||||
|
||||
return bo->type == AMDGPU_BO_SLAB_ENTRY;
|
||||
}
|
||||
|
||||
uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
|
||||
uint64_t amdgpu_bo_get_va(struct pb_buffer_lean *buf)
|
||||
{
|
||||
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
|
||||
|
||||
|
|
@ -1772,7 +1772,7 @@ uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
|
|||
}
|
||||
}
|
||||
|
||||
static void amdgpu_buffer_destroy(struct radeon_winsys *ws, struct pb_buffer *buf)
|
||||
static void amdgpu_buffer_destroy(struct radeon_winsys *ws, struct pb_buffer_lean *buf)
|
||||
{
|
||||
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
|
||||
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ enum amdgpu_bo_type {
|
|||
|
||||
/* Base class of the buffer object that other structures inherit. */
|
||||
struct amdgpu_winsys_bo {
|
||||
struct pb_buffer base;
|
||||
struct pb_buffer_lean base;
|
||||
enum amdgpu_bo_type type;
|
||||
|
||||
uint32_t unique_id;
|
||||
|
|
@ -153,7 +153,7 @@ static struct amdgpu_bo_real_reusable *get_real_bo_reusable(struct amdgpu_winsys
|
|||
|
||||
static struct amdgpu_bo_sparse *get_sparse_bo(struct amdgpu_winsys_bo *bo)
|
||||
{
|
||||
assert(bo->type == AMDGPU_BO_SPARSE && bo->base.base.usage & RADEON_FLAG_SPARSE);
|
||||
assert(bo->type == AMDGPU_BO_SPARSE && bo->base.usage & RADEON_FLAG_SPARSE);
|
||||
return (struct amdgpu_bo_sparse*)bo;
|
||||
}
|
||||
|
||||
|
|
@ -174,28 +174,28 @@ static struct amdgpu_bo_real *get_slab_entry_real_bo(struct amdgpu_winsys_bo *bo
|
|||
return &get_bo_from_slab(((struct amdgpu_bo_slab_entry*)bo)->entry.slab)->b.b;
|
||||
}
|
||||
|
||||
bool amdgpu_bo_can_reclaim(struct amdgpu_winsys *ws, struct pb_buffer *_buf);
|
||||
struct pb_buffer *amdgpu_bo_create(struct amdgpu_winsys *ws,
|
||||
bool amdgpu_bo_can_reclaim(struct amdgpu_winsys *ws, struct pb_buffer_lean *_buf);
|
||||
struct pb_buffer_lean *amdgpu_bo_create(struct amdgpu_winsys *ws,
|
||||
uint64_t size,
|
||||
unsigned alignment,
|
||||
enum radeon_bo_domain domain,
|
||||
enum radeon_bo_flag flags);
|
||||
void amdgpu_bo_destroy(struct amdgpu_winsys *ws, struct pb_buffer *_buf);
|
||||
void amdgpu_bo_destroy(struct amdgpu_winsys *ws, struct pb_buffer_lean *_buf);
|
||||
void *amdgpu_bo_map(struct radeon_winsys *rws,
|
||||
struct pb_buffer *buf,
|
||||
struct pb_buffer_lean *buf,
|
||||
struct radeon_cmdbuf *rcs,
|
||||
enum pipe_map_flags usage);
|
||||
void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *buf);
|
||||
void amdgpu_bo_unmap(struct radeon_winsys *rws, struct pb_buffer_lean *buf);
|
||||
void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws);
|
||||
|
||||
bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
|
||||
struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap, unsigned entry_size,
|
||||
unsigned group_index);
|
||||
void amdgpu_bo_slab_free(struct amdgpu_winsys *ws, struct pb_slab *slab);
|
||||
uint64_t amdgpu_bo_get_va(struct pb_buffer *buf);
|
||||
uint64_t amdgpu_bo_get_va(struct pb_buffer_lean *buf);
|
||||
|
||||
static inline
|
||||
struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
|
||||
struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer_lean *bo)
|
||||
{
|
||||
return (struct amdgpu_winsys_bo *)bo;
|
||||
}
|
||||
|
|
@ -206,7 +206,7 @@ void amdgpu_winsys_bo_reference(struct amdgpu_winsys *ws,
|
|||
struct amdgpu_winsys_bo *src)
|
||||
{
|
||||
radeon_bo_reference(&ws->dummy_ws.base,
|
||||
(struct pb_buffer**)dst, (struct pb_buffer*)src);
|
||||
(struct pb_buffer_lean**)dst, (struct pb_buffer_lean*)src);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -652,7 +652,7 @@ amdgpu_lookup_or_add_slab_buffer(struct amdgpu_cs_context *cs, struct amdgpu_win
|
|||
}
|
||||
|
||||
static unsigned amdgpu_cs_add_buffer(struct radeon_cmdbuf *rcs,
|
||||
struct pb_buffer *buf,
|
||||
struct pb_buffer_lean *buf,
|
||||
unsigned usage,
|
||||
enum radeon_bo_domain domains)
|
||||
{
|
||||
|
|
@ -699,7 +699,7 @@ static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws,
|
|||
struct amdgpu_ib *main_ib,
|
||||
struct amdgpu_cs *cs)
|
||||
{
|
||||
struct pb_buffer *pb;
|
||||
struct pb_buffer_lean *pb;
|
||||
uint8_t *mapped;
|
||||
unsigned buffer_size;
|
||||
|
||||
|
|
@ -795,7 +795,7 @@ static bool amdgpu_get_new_ib(struct amdgpu_winsys *ws,
|
|||
|
||||
/* Allocate a new buffer for IBs if the current buffer is all used. */
|
||||
if (!main_ib->big_buffer ||
|
||||
main_ib->used_ib_space + ib_size > main_ib->big_buffer->base.size) {
|
||||
main_ib->used_ib_space + ib_size > main_ib->big_buffer->size) {
|
||||
if (!amdgpu_ib_new_buffer(ws, main_ib, cs))
|
||||
return false;
|
||||
}
|
||||
|
|
@ -814,7 +814,7 @@ static bool amdgpu_get_new_ib(struct amdgpu_winsys *ws,
|
|||
|
||||
cs->csc->ib_main_addr = rcs->current.buf;
|
||||
|
||||
ib_size = main_ib->big_buffer->base.size - main_ib->used_ib_space;
|
||||
ib_size = main_ib->big_buffer->size - main_ib->used_ib_space;
|
||||
rcs->current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs);
|
||||
return true;
|
||||
}
|
||||
|
|
@ -1026,7 +1026,7 @@ amdgpu_cs_setup_preemption(struct radeon_cmdbuf *rcs, const uint32_t *preamble_i
|
|||
struct amdgpu_winsys *ws = cs->ws;
|
||||
struct amdgpu_cs_context *csc[2] = {&cs->csc1, &cs->csc2};
|
||||
unsigned size = align(preamble_num_dw * 4, ws->info.ip[AMD_IP_GFX].ib_alignment);
|
||||
struct pb_buffer *preamble_bo;
|
||||
struct pb_buffer_lean *preamble_bo;
|
||||
uint32_t *map;
|
||||
|
||||
/* Create the preamble IB buffer. */
|
||||
|
|
@ -1146,7 +1146,7 @@ static bool amdgpu_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw)
|
|||
rcs->current.cdw = 0;
|
||||
|
||||
rcs->current.buf = (uint32_t*)(main_ib->big_buffer_cpu_ptr + main_ib->used_ib_space);
|
||||
rcs->current.max_dw = main_ib->big_buffer->base.size / 4 - cs_epilog_dw;
|
||||
rcs->current.max_dw = main_ib->big_buffer->size / 4 - cs_epilog_dw;
|
||||
|
||||
amdgpu_cs_add_buffer(rcs, main_ib->big_buffer,
|
||||
RADEON_USAGE_READ | RADEON_PRIO_IB, 0);
|
||||
|
|
@ -1163,7 +1163,7 @@ static unsigned amdgpu_cs_get_buffer_list(struct radeon_cmdbuf *rcs,
|
|||
|
||||
if (list) {
|
||||
for (unsigned i = 0; i < num_real_buffers; i++) {
|
||||
list[i].bo_size = real_buffers->buffers[i].bo->base.base.size;
|
||||
list[i].bo_size = real_buffers->buffers[i].bo->base.size;
|
||||
list[i].vm_address = get_real_bo(real_buffers->buffers[i].bo)->gpu_address;
|
||||
list[i].priority_usage = real_buffers->buffers[i].usage;
|
||||
}
|
||||
|
|
@ -1785,7 +1785,7 @@ static void amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
|
|||
}
|
||||
|
||||
static bool amdgpu_bo_is_referenced(struct radeon_cmdbuf *rcs,
|
||||
struct pb_buffer *_buf,
|
||||
struct pb_buffer_lean *_buf,
|
||||
unsigned usage)
|
||||
{
|
||||
struct amdgpu_cs *cs = amdgpu_cs(rcs);
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ enum ib_type {
|
|||
|
||||
struct amdgpu_ib {
|
||||
/* A buffer out of which new IBs are allocated. */
|
||||
struct pb_buffer *big_buffer;
|
||||
struct pb_buffer_lean *big_buffer;
|
||||
uint8_t *big_buffer_cpu_ptr;
|
||||
uint64_t gpu_address;
|
||||
unsigned used_ib_space;
|
||||
|
|
@ -145,7 +145,7 @@ struct amdgpu_cs {
|
|||
|
||||
struct util_queue_fence flush_completed;
|
||||
struct pipe_fence_handle *next_fence;
|
||||
struct pb_buffer *preamble_ib_bo;
|
||||
struct pb_buffer_lean *preamble_ib_bo;
|
||||
|
||||
struct drm_amdgpu_cs_chunk_cp_gfx_shadow mcbp_fw_shadow_chunk;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -21,14 +21,14 @@
|
|||
#include <stdio.h>
|
||||
#include <inttypes.h>
|
||||
|
||||
static struct pb_buffer *
|
||||
static struct pb_buffer_lean *
|
||||
radeon_winsys_bo_create(struct radeon_winsys *rws,
|
||||
uint64_t size,
|
||||
unsigned alignment,
|
||||
enum radeon_bo_domain domain,
|
||||
enum radeon_bo_flag flags);
|
||||
|
||||
static inline struct radeon_bo *radeon_bo(struct pb_buffer *bo)
|
||||
static inline struct radeon_bo *radeon_bo(struct pb_buffer_lean *bo)
|
||||
{
|
||||
return (struct radeon_bo *)bo;
|
||||
}
|
||||
|
|
@ -109,7 +109,7 @@ static void radeon_bo_wait_idle(struct radeon_winsys *rws, struct radeon_bo *bo)
|
|||
}
|
||||
|
||||
static bool radeon_bo_wait(struct radeon_winsys *rws,
|
||||
struct pb_buffer *_buf, uint64_t timeout,
|
||||
struct pb_buffer_lean *_buf, uint64_t timeout,
|
||||
unsigned usage)
|
||||
{
|
||||
struct radeon_bo *bo = radeon_bo(_buf);
|
||||
|
|
@ -154,7 +154,7 @@ static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)
|
|||
}
|
||||
|
||||
static enum radeon_bo_domain radeon_bo_get_initial_domain(
|
||||
struct pb_buffer *buf)
|
||||
struct pb_buffer_lean *buf)
|
||||
{
|
||||
struct radeon_bo *bo = (struct radeon_bo*)buf;
|
||||
struct drm_radeon_gem_op args;
|
||||
|
|
@ -329,7 +329,7 @@ out:
|
|||
|
||||
void radeon_bo_destroy(void *winsys, struct pb_buffer_lean *_buf)
|
||||
{
|
||||
struct radeon_bo *bo = radeon_bo((struct pb_buffer*)_buf);
|
||||
struct radeon_bo *bo = radeon_bo((struct pb_buffer_lean*)_buf);
|
||||
struct radeon_drm_winsys *rws = bo->rws;
|
||||
struct drm_gem_close args;
|
||||
|
||||
|
|
@ -339,7 +339,7 @@ void radeon_bo_destroy(void *winsys, struct pb_buffer_lean *_buf)
|
|||
|
||||
mtx_lock(&rws->bo_handles_mutex);
|
||||
/* radeon_winsys_bo_from_handle might have revived the bo */
|
||||
if (pipe_is_referenced(&bo->base.base.reference)) {
|
||||
if (pipe_is_referenced(&bo->base.reference)) {
|
||||
mtx_unlock(&rws->bo_handles_mutex);
|
||||
return;
|
||||
}
|
||||
|
|
@ -351,7 +351,7 @@ void radeon_bo_destroy(void *winsys, struct pb_buffer_lean *_buf)
|
|||
mtx_unlock(&rws->bo_handles_mutex);
|
||||
|
||||
if (bo->u.real.ptr)
|
||||
os_munmap(bo->u.real.ptr, bo->base.base.size);
|
||||
os_munmap(bo->u.real.ptr, bo->base.size);
|
||||
|
||||
if (rws->info.r600_has_virtual_memory) {
|
||||
if (rws->va_unmap_working) {
|
||||
|
|
@ -369,14 +369,14 @@ void radeon_bo_destroy(void *winsys, struct pb_buffer_lean *_buf)
|
|||
sizeof(va)) != 0 &&
|
||||
va.operation == RADEON_VA_RESULT_ERROR) {
|
||||
fprintf(stderr, "radeon: Failed to deallocate virtual address for buffer:\n");
|
||||
fprintf(stderr, "radeon: size : %"PRIu64" bytes\n", bo->base.base.size);
|
||||
fprintf(stderr, "radeon: size : %"PRIu64" bytes\n", bo->base.size);
|
||||
fprintf(stderr, "radeon: va : 0x%"PRIx64"\n", bo->va);
|
||||
}
|
||||
}
|
||||
|
||||
radeon_bomgr_free_va(&rws->info,
|
||||
bo->va < rws->vm32.end ? &rws->vm32 : &rws->vm64,
|
||||
bo->va, bo->base.base.size);
|
||||
bo->va, bo->base.size);
|
||||
}
|
||||
|
||||
/* Close object. */
|
||||
|
|
@ -386,22 +386,22 @@ void radeon_bo_destroy(void *winsys, struct pb_buffer_lean *_buf)
|
|||
mtx_destroy(&bo->u.real.map_mutex);
|
||||
|
||||
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
rws->allocated_vram -= align(bo->base.base.size, rws->info.gart_page_size);
|
||||
rws->allocated_vram -= align(bo->base.size, rws->info.gart_page_size);
|
||||
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
|
||||
rws->allocated_gtt -= align(bo->base.base.size, rws->info.gart_page_size);
|
||||
rws->allocated_gtt -= align(bo->base.size, rws->info.gart_page_size);
|
||||
|
||||
if (bo->u.real.map_count >= 1) {
|
||||
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
bo->rws->mapped_vram -= bo->base.base.size;
|
||||
bo->rws->mapped_vram -= bo->base.size;
|
||||
else
|
||||
bo->rws->mapped_gtt -= bo->base.base.size;
|
||||
bo->rws->mapped_gtt -= bo->base.size;
|
||||
bo->rws->num_mapped_buffers--;
|
||||
}
|
||||
|
||||
FREE(bo);
|
||||
}
|
||||
|
||||
static void radeon_bo_destroy_or_cache(void *winsys, struct pb_buffer *_buf)
|
||||
static void radeon_bo_destroy_or_cache(void *winsys, struct pb_buffer_lean *_buf)
|
||||
{
|
||||
struct radeon_drm_winsys *rws = (struct radeon_drm_winsys *)winsys;
|
||||
struct radeon_bo *bo = radeon_bo(_buf);
|
||||
|
|
@ -411,7 +411,7 @@ static void radeon_bo_destroy_or_cache(void *winsys, struct pb_buffer *_buf)
|
|||
if (bo->u.real.use_reusable_pool)
|
||||
pb_cache_add_buffer(&rws->bo_cache, &bo->u.real.cache_entry);
|
||||
else
|
||||
radeon_bo_destroy(NULL, &_buf->base);
|
||||
radeon_bo_destroy(NULL, _buf);
|
||||
}
|
||||
|
||||
void *radeon_bo_do_map(struct radeon_bo *bo)
|
||||
|
|
@ -441,7 +441,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
|
|||
}
|
||||
args.handle = bo->handle;
|
||||
args.offset = 0;
|
||||
args.size = (uint64_t)bo->base.base.size;
|
||||
args.size = (uint64_t)bo->base.size;
|
||||
if (drmCommandWriteRead(bo->rws->fd,
|
||||
DRM_RADEON_GEM_MMAP,
|
||||
&args,
|
||||
|
|
@ -470,9 +470,9 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
|
|||
bo->u.real.map_count = 1;
|
||||
|
||||
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
bo->rws->mapped_vram += bo->base.base.size;
|
||||
bo->rws->mapped_vram += bo->base.size;
|
||||
else
|
||||
bo->rws->mapped_gtt += bo->base.base.size;
|
||||
bo->rws->mapped_gtt += bo->base.size;
|
||||
bo->rws->num_mapped_buffers++;
|
||||
|
||||
mtx_unlock(&bo->u.real.map_mutex);
|
||||
|
|
@ -480,7 +480,7 @@ void *radeon_bo_do_map(struct radeon_bo *bo)
|
|||
}
|
||||
|
||||
static void *radeon_bo_map(struct radeon_winsys *rws,
|
||||
struct pb_buffer *buf,
|
||||
struct pb_buffer_lean *buf,
|
||||
struct radeon_cmdbuf *rcs,
|
||||
enum pipe_map_flags usage)
|
||||
{
|
||||
|
|
@ -505,7 +505,7 @@ static void *radeon_bo_map(struct radeon_winsys *rws,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!radeon_bo_wait(rws, (struct pb_buffer*)bo, 0,
|
||||
if (!radeon_bo_wait(rws, (struct pb_buffer_lean*)bo, 0,
|
||||
RADEON_USAGE_WRITE)) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -516,7 +516,7 @@ static void *radeon_bo_map(struct radeon_winsys *rws,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!radeon_bo_wait(rws, (struct pb_buffer*)bo, 0,
|
||||
if (!radeon_bo_wait(rws, (struct pb_buffer_lean*)bo, 0,
|
||||
RADEON_USAGE_READWRITE)) {
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -536,7 +536,7 @@ static void *radeon_bo_map(struct radeon_winsys *rws,
|
|||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
|
||||
}
|
||||
radeon_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
|
||||
radeon_bo_wait(rws, (struct pb_buffer_lean*)bo, OS_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_WRITE);
|
||||
} else {
|
||||
/* Mapping for write. */
|
||||
|
|
@ -551,7 +551,7 @@ static void *radeon_bo_map(struct radeon_winsys *rws,
|
|||
}
|
||||
}
|
||||
|
||||
radeon_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
|
||||
radeon_bo_wait(rws, (struct pb_buffer_lean*)bo, OS_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_READWRITE);
|
||||
}
|
||||
|
||||
|
|
@ -562,7 +562,7 @@ static void *radeon_bo_map(struct radeon_winsys *rws,
|
|||
return radeon_bo_do_map(bo);
|
||||
}
|
||||
|
||||
static void radeon_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *_buf)
|
||||
static void radeon_bo_unmap(struct radeon_winsys *rws, struct pb_buffer_lean *_buf)
|
||||
{
|
||||
struct radeon_bo *bo = (struct radeon_bo*)_buf;
|
||||
|
||||
|
|
@ -584,13 +584,13 @@ static void radeon_bo_unmap(struct radeon_winsys *rws, struct pb_buffer *_buf)
|
|||
return; /* it's been mapped multiple times */
|
||||
}
|
||||
|
||||
os_munmap(bo->u.real.ptr, bo->base.base.size);
|
||||
os_munmap(bo->u.real.ptr, bo->base.size);
|
||||
bo->u.real.ptr = NULL;
|
||||
|
||||
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
bo->rws->mapped_vram -= bo->base.base.size;
|
||||
bo->rws->mapped_vram -= bo->base.size;
|
||||
else
|
||||
bo->rws->mapped_gtt -= bo->base.base.size;
|
||||
bo->rws->mapped_gtt -= bo->base.size;
|
||||
bo->rws->num_mapped_buffers--;
|
||||
|
||||
mtx_unlock(&bo->u.real.map_mutex);
|
||||
|
|
@ -645,10 +645,10 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
|
|||
if (!bo)
|
||||
return NULL;
|
||||
|
||||
pipe_reference_init(&bo->base.base.reference, 1);
|
||||
bo->base.base.alignment_log2 = util_logbase2(alignment);
|
||||
bo->base.base.usage = 0;
|
||||
bo->base.base.size = size;
|
||||
pipe_reference_init(&bo->base.reference, 1);
|
||||
bo->base.alignment_log2 = util_logbase2(alignment);
|
||||
bo->base.usage = 0;
|
||||
bo->base.size = size;
|
||||
bo->rws = rws;
|
||||
bo->handle = args.handle;
|
||||
bo->va = 0;
|
||||
|
|
@ -657,7 +657,7 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
|
|||
(void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
|
||||
|
||||
if (heap >= 0) {
|
||||
pb_cache_init_entry(&rws->bo_cache, &bo->u.real.cache_entry, &bo->base.base,
|
||||
pb_cache_init_entry(&rws->bo_cache, &bo->u.real.cache_entry, &bo->base,
|
||||
heap);
|
||||
}
|
||||
|
||||
|
|
@ -689,12 +689,12 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
|
|||
fprintf(stderr, "radeon: alignment : %d bytes\n", alignment);
|
||||
fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
|
||||
fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
|
||||
radeon_bo_destroy(NULL, &bo->base.base);
|
||||
radeon_bo_destroy(NULL, &bo->base);
|
||||
return NULL;
|
||||
}
|
||||
mtx_lock(&rws->bo_handles_mutex);
|
||||
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
|
||||
struct pb_buffer *b = &bo->base;
|
||||
struct pb_buffer_lean *b = &bo->base;
|
||||
struct radeon_bo *old_bo =
|
||||
_mesa_hash_table_u64_search(rws->bo_vas, va.offset);
|
||||
|
||||
|
|
@ -717,22 +717,22 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
|
|||
|
||||
bool radeon_bo_can_reclaim(void *winsys, struct pb_buffer_lean *_buf)
|
||||
{
|
||||
struct radeon_bo *bo = radeon_bo((struct pb_buffer*)_buf);
|
||||
struct radeon_bo *bo = radeon_bo((struct pb_buffer_lean*)_buf);
|
||||
|
||||
if (radeon_bo_is_referenced_by_any_cs(bo))
|
||||
return false;
|
||||
|
||||
return radeon_bo_wait(winsys, (struct pb_buffer*)_buf, 0, RADEON_USAGE_READWRITE);
|
||||
return radeon_bo_wait(winsys, (struct pb_buffer_lean*)_buf, 0, RADEON_USAGE_READWRITE);
|
||||
}
|
||||
|
||||
bool radeon_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
|
||||
{
|
||||
struct radeon_bo *bo = container_of(entry, struct radeon_bo, u.slab.entry);
|
||||
|
||||
return radeon_bo_can_reclaim(NULL, &bo->base.base);
|
||||
return radeon_bo_can_reclaim(NULL, &bo->base);
|
||||
}
|
||||
|
||||
static void radeon_bo_slab_destroy(void *winsys, struct pb_buffer *_buf)
|
||||
static void radeon_bo_slab_destroy(void *winsys, struct pb_buffer_lean *_buf)
|
||||
{
|
||||
struct radeon_bo *bo = radeon_bo(_buf);
|
||||
|
||||
|
|
@ -762,7 +762,7 @@ struct pb_slab *radeon_bo_slab_alloc(void *priv, unsigned heap,
|
|||
|
||||
assert(slab->buffer->handle);
|
||||
|
||||
slab->base.num_entries = slab->buffer->base.base.size / entry_size;
|
||||
slab->base.num_entries = slab->buffer->base.size / entry_size;
|
||||
slab->base.num_free = slab->base.num_entries;
|
||||
slab->base.group_index = group_index;
|
||||
slab->base.entry_size = entry_size;
|
||||
|
|
@ -777,9 +777,9 @@ struct pb_slab *radeon_bo_slab_alloc(void *priv, unsigned heap,
|
|||
for (unsigned i = 0; i < slab->base.num_entries; ++i) {
|
||||
struct radeon_bo *bo = &slab->entries[i];
|
||||
|
||||
bo->base.base.alignment_log2 = util_logbase2(entry_size);
|
||||
bo->base.base.usage = slab->buffer->base.base.usage;
|
||||
bo->base.base.size = entry_size;
|
||||
bo->base.alignment_log2 = util_logbase2(entry_size);
|
||||
bo->base.usage = slab->buffer->base.usage;
|
||||
bo->base.size = entry_size;
|
||||
bo->rws = ws;
|
||||
bo->va = slab->buffer->va + i * entry_size;
|
||||
bo->initial_domain = domains;
|
||||
|
|
@ -846,7 +846,7 @@ static unsigned eg_tile_split_rev(unsigned eg_tile_split)
|
|||
}
|
||||
|
||||
static void radeon_bo_get_metadata(struct radeon_winsys *rws,
|
||||
struct pb_buffer *_buf,
|
||||
struct pb_buffer_lean *_buf,
|
||||
struct radeon_bo_metadata *md,
|
||||
struct radeon_surf *surf)
|
||||
{
|
||||
|
|
@ -904,7 +904,7 @@ static void radeon_bo_get_metadata(struct radeon_winsys *rws,
|
|||
}
|
||||
|
||||
static void radeon_bo_set_metadata(struct radeon_winsys *rws,
|
||||
struct pb_buffer *_buf,
|
||||
struct pb_buffer_lean *_buf,
|
||||
struct radeon_bo_metadata *md,
|
||||
struct radeon_surf *surf)
|
||||
{
|
||||
|
|
@ -974,7 +974,7 @@ static void radeon_bo_set_metadata(struct radeon_winsys *rws,
|
|||
sizeof(args));
|
||||
}
|
||||
|
||||
static struct pb_buffer *
|
||||
static struct pb_buffer_lean *
|
||||
radeon_winsys_bo_create(struct radeon_winsys *rws,
|
||||
uint64_t size,
|
||||
unsigned alignment,
|
||||
|
|
@ -1013,7 +1013,7 @@ radeon_winsys_bo_create(struct radeon_winsys *rws,
|
|||
|
||||
bo = container_of(entry, struct radeon_bo, u.slab.entry);
|
||||
|
||||
pipe_reference_init(&bo->base.base.reference, 1);
|
||||
pipe_reference_init(&bo->base.reference, 1);
|
||||
|
||||
return &bo->base;
|
||||
}
|
||||
|
|
@ -1034,7 +1034,7 @@ radeon_winsys_bo_create(struct radeon_winsys *rws,
|
|||
heap = radeon_get_heap_index(domain, flags & ~RADEON_FLAG_NO_SUBALLOC);
|
||||
assert(heap >= 0 && heap < RADEON_NUM_HEAPS);
|
||||
|
||||
bo = radeon_bo((struct pb_buffer*)pb_cache_reclaim_buffer(&ws->bo_cache, size,
|
||||
bo = radeon_bo((struct pb_buffer_lean*)pb_cache_reclaim_buffer(&ws->bo_cache, size,
|
||||
alignment, 0, heap));
|
||||
if (bo)
|
||||
return &bo->base;
|
||||
|
|
@ -1060,7 +1060,7 @@ radeon_winsys_bo_create(struct radeon_winsys *rws,
|
|||
return &bo->base;
|
||||
}
|
||||
|
||||
static void radeon_winsys_bo_destroy(struct radeon_winsys *ws, struct pb_buffer *buf)
|
||||
static void radeon_winsys_bo_destroy(struct radeon_winsys *ws, struct pb_buffer_lean *buf)
|
||||
{
|
||||
struct radeon_bo *bo = radeon_bo(buf);
|
||||
|
||||
|
|
@ -1070,7 +1070,7 @@ static void radeon_winsys_bo_destroy(struct radeon_winsys *ws, struct pb_buffer
|
|||
radeon_bo_slab_destroy(ws, buf);
|
||||
}
|
||||
|
||||
static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
|
||||
static struct pb_buffer_lean *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
|
||||
void *pointer, uint64_t size,
|
||||
enum radeon_bo_flag flags)
|
||||
{
|
||||
|
|
@ -1106,10 +1106,10 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
|
|||
mtx_lock(&ws->bo_handles_mutex);
|
||||
|
||||
/* Initialize it. */
|
||||
pipe_reference_init(&bo->base.base.reference, 1);
|
||||
pipe_reference_init(&bo->base.reference, 1);
|
||||
bo->handle = args.handle;
|
||||
bo->base.base.alignment_log2 = 0;
|
||||
bo->base.base.size = size;
|
||||
bo->base.alignment_log2 = 0;
|
||||
bo->base.size = size;
|
||||
bo->rws = ws;
|
||||
bo->user_ptr = pointer;
|
||||
bo->va = 0;
|
||||
|
|
@ -1124,7 +1124,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
|
|||
if (ws->info.r600_has_virtual_memory) {
|
||||
struct drm_radeon_gem_va va;
|
||||
|
||||
bo->va = radeon_bomgr_find_va64(ws, bo->base.base.size, 1 << 20);
|
||||
bo->va = radeon_bomgr_find_va64(ws, bo->base.size, 1 << 20);
|
||||
|
||||
va.handle = bo->handle;
|
||||
va.operation = RADEON_VA_MAP;
|
||||
|
|
@ -1137,12 +1137,12 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
|
|||
r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
|
||||
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
|
||||
fprintf(stderr, "radeon: Failed to assign virtual address space\n");
|
||||
radeon_bo_destroy(NULL, &bo->base.base);
|
||||
radeon_bo_destroy(NULL, &bo->base);
|
||||
return NULL;
|
||||
}
|
||||
mtx_lock(&ws->bo_handles_mutex);
|
||||
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
|
||||
struct pb_buffer *b = &bo->base;
|
||||
struct pb_buffer_lean *b = &bo->base;
|
||||
struct radeon_bo *old_bo =
|
||||
_mesa_hash_table_u64_search(ws->bo_vas, va.offset);
|
||||
|
||||
|
|
@ -1155,12 +1155,12 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
|
|||
mtx_unlock(&ws->bo_handles_mutex);
|
||||
}
|
||||
|
||||
ws->allocated_gtt += align(bo->base.base.size, ws->info.gart_page_size);
|
||||
ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
|
||||
|
||||
return (struct pb_buffer*)bo;
|
||||
return (struct pb_buffer_lean*)bo;
|
||||
}
|
||||
|
||||
static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
|
||||
static struct pb_buffer_lean *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
|
||||
struct winsys_handle *whandle,
|
||||
unsigned vm_alignment,
|
||||
bool is_dri_prime_linear_buffer)
|
||||
|
|
@ -1195,7 +1195,7 @@ static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
|
|||
|
||||
if (bo) {
|
||||
/* Increase the refcount. */
|
||||
p_atomic_inc(&bo->base.base.reference.count);
|
||||
p_atomic_inc(&bo->base.reference.count);
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
|
@ -1235,9 +1235,9 @@ static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
|
|||
bo->handle = handle;
|
||||
|
||||
/* Initialize it. */
|
||||
pipe_reference_init(&bo->base.base.reference, 1);
|
||||
bo->base.base.alignment_log2 = 0;
|
||||
bo->base.base.size = (unsigned) size;
|
||||
pipe_reference_init(&bo->base.reference, 1);
|
||||
bo->base.alignment_log2 = 0;
|
||||
bo->base.size = (unsigned) size;
|
||||
bo->rws = ws;
|
||||
bo->va = 0;
|
||||
bo->hash = __sync_fetch_and_add(&ws->next_bo_hash, 1);
|
||||
|
|
@ -1254,7 +1254,7 @@ done:
|
|||
if (ws->info.r600_has_virtual_memory && !bo->va) {
|
||||
struct drm_radeon_gem_va va;
|
||||
|
||||
bo->va = radeon_bomgr_find_va64(ws, bo->base.base.size, vm_alignment);
|
||||
bo->va = radeon_bomgr_find_va64(ws, bo->base.size, vm_alignment);
|
||||
|
||||
va.handle = bo->handle;
|
||||
va.operation = RADEON_VA_MAP;
|
||||
|
|
@ -1267,12 +1267,12 @@ done:
|
|||
r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
|
||||
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
|
||||
fprintf(stderr, "radeon: Failed to assign virtual address space\n");
|
||||
radeon_bo_destroy(NULL, &bo->base.base);
|
||||
radeon_bo_destroy(NULL, &bo->base);
|
||||
return NULL;
|
||||
}
|
||||
mtx_lock(&ws->bo_handles_mutex);
|
||||
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
|
||||
struct pb_buffer *b = &bo->base;
|
||||
struct pb_buffer_lean *b = &bo->base;
|
||||
struct radeon_bo *old_bo =
|
||||
_mesa_hash_table_u64_search(ws->bo_vas, va.offset);
|
||||
|
||||
|
|
@ -1288,11 +1288,11 @@ done:
|
|||
bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
|
||||
|
||||
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
|
||||
ws->allocated_vram += align(bo->base.base.size, ws->info.gart_page_size);
|
||||
ws->allocated_vram += align(bo->base.size, ws->info.gart_page_size);
|
||||
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
|
||||
ws->allocated_gtt += align(bo->base.base.size, ws->info.gart_page_size);
|
||||
ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
|
||||
|
||||
return (struct pb_buffer*)bo;
|
||||
return (struct pb_buffer_lean*)bo;
|
||||
|
||||
fail:
|
||||
mtx_unlock(&ws->bo_handles_mutex);
|
||||
|
|
@ -1300,7 +1300,7 @@ fail:
|
|||
}
|
||||
|
||||
static bool radeon_winsys_bo_get_handle(struct radeon_winsys *rws,
|
||||
struct pb_buffer *buffer,
|
||||
struct pb_buffer_lean *buffer,
|
||||
struct winsys_handle *whandle)
|
||||
{
|
||||
struct drm_gem_flink flink;
|
||||
|
|
@ -1340,22 +1340,22 @@ static bool radeon_winsys_bo_get_handle(struct radeon_winsys *rws,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer *buf)
|
||||
static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer_lean *buf)
|
||||
{
|
||||
return ((struct radeon_bo*)buf)->user_ptr != NULL;
|
||||
}
|
||||
|
||||
static bool radeon_winsys_bo_is_suballocated(struct pb_buffer *buf)
|
||||
static bool radeon_winsys_bo_is_suballocated(struct pb_buffer_lean *buf)
|
||||
{
|
||||
return !((struct radeon_bo*)buf)->handle;
|
||||
}
|
||||
|
||||
static uint64_t radeon_winsys_bo_va(struct pb_buffer *buf)
|
||||
static uint64_t radeon_winsys_bo_va(struct pb_buffer_lean *buf)
|
||||
{
|
||||
return ((struct radeon_bo*)buf)->va;
|
||||
}
|
||||
|
||||
static unsigned radeon_winsys_bo_get_reloc_offset(struct pb_buffer *buf)
|
||||
static unsigned radeon_winsys_bo_get_reloc_offset(struct pb_buffer_lean *buf)
|
||||
{
|
||||
struct radeon_bo *bo = radeon_bo(buf);
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
#include "pipebuffer/pb_slab.h"
|
||||
|
||||
struct radeon_bo {
|
||||
struct pb_buffer base;
|
||||
struct pb_buffer_lean base;
|
||||
union {
|
||||
struct {
|
||||
struct pb_cache_entry cache_entry;
|
||||
|
|
@ -70,7 +70,7 @@ static inline void
|
|||
radeon_ws_bo_reference(struct radeon_winsys *rws, struct radeon_bo **dst,
|
||||
struct radeon_bo *src)
|
||||
{
|
||||
radeon_bo_reference(rws, (struct pb_buffer**)dst, (struct pb_buffer*)src);
|
||||
radeon_bo_reference(rws, (struct pb_buffer_lean**)dst, (struct pb_buffer_lean*)src);
|
||||
}
|
||||
|
||||
void *radeon_bo_do_map(struct radeon_bo *bo);
|
||||
|
|
|
|||
|
|
@ -373,7 +373,7 @@ static int radeon_lookup_or_add_slab_buffer(struct radeon_drm_cs *cs,
|
|||
}
|
||||
|
||||
static unsigned radeon_drm_cs_add_buffer(struct radeon_cmdbuf *rcs,
|
||||
struct pb_buffer *buf,
|
||||
struct pb_buffer_lean *buf,
|
||||
unsigned usage,
|
||||
enum radeon_bo_domain domains)
|
||||
{
|
||||
|
|
@ -415,15 +415,15 @@ static unsigned radeon_drm_cs_add_buffer(struct radeon_cmdbuf *rcs,
|
|||
cs->csc->relocs_bo[index].u.real.priority_usage |= priority;
|
||||
|
||||
if (added_domains & RADEON_DOMAIN_VRAM)
|
||||
rcs->used_vram_kb += bo->base.base.size / 1024;
|
||||
rcs->used_vram_kb += bo->base.size / 1024;
|
||||
else if (added_domains & RADEON_DOMAIN_GTT)
|
||||
rcs->used_gart_kb += bo->base.base.size / 1024;
|
||||
rcs->used_gart_kb += bo->base.size / 1024;
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
static int radeon_drm_cs_lookup_buffer(struct radeon_cmdbuf *rcs,
|
||||
struct pb_buffer *buf)
|
||||
struct pb_buffer_lean *buf)
|
||||
{
|
||||
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
|
||||
|
||||
|
|
@ -483,7 +483,7 @@ static unsigned radeon_drm_cs_get_buffer_list(struct radeon_cmdbuf *rcs,
|
|||
|
||||
if (list) {
|
||||
for (i = 0; i < cs->csc->num_relocs; i++) {
|
||||
list[i].bo_size = cs->csc->relocs_bo[i].bo->base.base.size;
|
||||
list[i].bo_size = cs->csc->relocs_bo[i].bo->base.size;
|
||||
list[i].vm_address = cs->csc->relocs_bo[i].bo->va;
|
||||
list[i].priority_usage = cs->csc->relocs_bo[i].u.real.priority_usage;
|
||||
}
|
||||
|
|
@ -766,7 +766,7 @@ static void radeon_drm_cs_destroy(struct radeon_cmdbuf *rcs)
|
|||
}
|
||||
|
||||
static bool radeon_bo_is_referenced(struct radeon_cmdbuf *rcs,
|
||||
struct pb_buffer *_buf,
|
||||
struct pb_buffer_lean *_buf,
|
||||
unsigned usage)
|
||||
{
|
||||
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
|
||||
|
|
@ -796,7 +796,7 @@ static bool radeon_bo_is_referenced(struct radeon_cmdbuf *rcs,
|
|||
static struct pipe_fence_handle *radeon_cs_create_fence(struct radeon_cmdbuf *rcs)
|
||||
{
|
||||
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
|
||||
struct pb_buffer *fence;
|
||||
struct pb_buffer_lean *fence;
|
||||
|
||||
/* Create a fence, which is a dummy BO. */
|
||||
fence = cs->ws->base.buffer_create(&cs->ws->base, 1, 1,
|
||||
|
|
@ -816,7 +816,7 @@ static bool radeon_fence_wait(struct radeon_winsys *ws,
|
|||
struct pipe_fence_handle *fence,
|
||||
uint64_t timeout)
|
||||
{
|
||||
return ws->buffer_wait(ws, (struct pb_buffer*)fence, timeout,
|
||||
return ws->buffer_wait(ws, (struct pb_buffer_lean*)fence, timeout,
|
||||
RADEON_USAGE_READWRITE);
|
||||
}
|
||||
|
||||
|
|
@ -824,7 +824,7 @@ static void radeon_fence_reference(struct radeon_winsys *ws,
|
|||
struct pipe_fence_handle **dst,
|
||||
struct pipe_fence_handle *src)
|
||||
{
|
||||
radeon_bo_reference(ws, (struct pb_buffer**)dst, (struct pb_buffer*)src);
|
||||
radeon_bo_reference(ws, (struct pb_buffer_lean**)dst, (struct pb_buffer_lean*)src);
|
||||
}
|
||||
|
||||
static struct pipe_fence_handle *radeon_drm_cs_get_next_fence(struct radeon_cmdbuf *rcs)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue