mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-24 15:20:10 +01:00
r300,r600,radeon/winsys: always pass the winsys to radeon_bo_reference
This will allow the removal of pb_cache_entry::mgr. Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26643>
This commit is contained in:
parent
643f390de5
commit
4a078e693e
12 changed files with 78 additions and 66 deletions
|
|
@ -64,7 +64,7 @@ static void r300_release_referenced_objects(struct r300_context *r300)
|
|||
|
||||
/* Manually-created vertex buffers. */
|
||||
pipe_vertex_buffer_unreference(&r300->dummy_vb);
|
||||
pb_reference(&r300->vbo, NULL);
|
||||
radeon_bo_reference(r300->rws, &r300->vbo, NULL);
|
||||
|
||||
r300->context.delete_depth_stencil_alpha_state(&r300->context,
|
||||
r300->dsa_decompress_zmask);
|
||||
|
|
|
|||
|
|
@ -73,9 +73,10 @@ static struct pipe_query *r300_create_query(struct pipe_context *pipe,
|
|||
static void r300_destroy_query(struct pipe_context* pipe,
|
||||
struct pipe_query* query)
|
||||
{
|
||||
struct r300_context *r300 = r300_context(pipe);
|
||||
struct r300_query* q = r300_query(query);
|
||||
|
||||
pb_reference(&q->buf, NULL);
|
||||
radeon_bo_reference(r300->rws, &q->buf, NULL);
|
||||
FREE(query);
|
||||
}
|
||||
|
||||
|
|
@ -120,7 +121,7 @@ static bool r300_end_query(struct pipe_context* pipe,
|
|||
struct r300_query *q = r300_query(query);
|
||||
|
||||
if (q->type == PIPE_QUERY_GPU_FINISHED) {
|
||||
pb_reference(&q->buf, NULL);
|
||||
radeon_bo_reference(r300->rws, &q->buf, NULL);
|
||||
r300_flush(pipe, PIPE_FLUSH_ASYNC,
|
||||
(struct pipe_fence_handle**)&q->buf);
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -951,7 +951,7 @@ static bool r300_render_allocate_vertices(struct vbuf_render* render,
|
|||
DBG(r300, DBG_DRAW, "r300: render_allocate_vertices (size: %d)\n", size);
|
||||
|
||||
if (!r300->vbo || size + r300->draw_vbo_offset > r300->vbo->size) {
|
||||
pb_reference(&r300->vbo, NULL);
|
||||
radeon_bo_reference(r300->rws, &r300->vbo, NULL);
|
||||
r300->vbo = NULL;
|
||||
r300render->vbo_ptr = NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -53,17 +53,18 @@ void r300_upload_index_buffer(struct r300_context *r300,
|
|||
void r300_resource_destroy(struct pipe_screen *screen,
|
||||
struct pipe_resource *buf)
|
||||
{
|
||||
struct r300_screen *rscreen = r300_screen(screen);
|
||||
|
||||
if (buf->target == PIPE_BUFFER) {
|
||||
struct r300_resource *rbuf = r300_resource(buf);
|
||||
|
||||
align_free(rbuf->malloced_buffer);
|
||||
|
||||
if (rbuf->buf)
|
||||
pb_reference(&rbuf->buf, NULL);
|
||||
radeon_bo_reference(rscreen->rws, &rbuf->buf, NULL);
|
||||
|
||||
FREE(rbuf);
|
||||
} else {
|
||||
struct r300_screen *rscreen = r300_screen(screen);
|
||||
struct r300_resource* tex = (struct r300_resource*)buf;
|
||||
|
||||
if (tex->tex.cmask_dwords) {
|
||||
|
|
@ -73,7 +74,7 @@ void r300_resource_destroy(struct pipe_screen *screen,
|
|||
}
|
||||
mtx_unlock(&rscreen->cmask_mutex);
|
||||
}
|
||||
pb_reference(&tex->buf, NULL);
|
||||
radeon_bo_reference(rscreen->rws, &tex->buf, NULL);
|
||||
FREE(tex);
|
||||
}
|
||||
}
|
||||
|
|
@ -122,7 +123,7 @@ r300_buffer_transfer_map( struct pipe_context *context,
|
|||
RADEON_FLAG_NO_INTERPROCESS_SHARING);
|
||||
if (new_buf) {
|
||||
/* Discard the old buffer. */
|
||||
pb_reference(&rbuf->buf, NULL);
|
||||
radeon_bo_reference(r300->rws, &rbuf->buf, NULL);
|
||||
rbuf->buf = new_buf;
|
||||
|
||||
/* We changed the buffer, now we need to bind it where the old one was bound. */
|
||||
|
|
|
|||
|
|
@ -1109,7 +1109,7 @@ r300_texture_create_object(struct r300_screen *rscreen,
|
|||
fail:
|
||||
FREE(tex);
|
||||
if (buffer)
|
||||
pb_reference(&buffer, NULL);
|
||||
radeon_bo_reference(rscreen->rws, &buffer, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -188,7 +188,7 @@ bool r600_alloc_resource(struct r600_common_screen *rscreen,
|
|||
else
|
||||
res->gpu_address = 0;
|
||||
|
||||
pb_reference(&old_buf, NULL);
|
||||
radeon_bo_reference(rscreen->ws, &old_buf, NULL);
|
||||
|
||||
util_range_set_empty(&res->valid_buffer_range);
|
||||
|
||||
|
|
@ -203,12 +203,13 @@ bool r600_alloc_resource(struct r600_common_screen *rscreen,
|
|||
|
||||
void r600_buffer_destroy(struct pipe_screen *screen, struct pipe_resource *buf)
|
||||
{
|
||||
struct r600_screen *rscreen = (struct r600_screen*)screen;
|
||||
struct r600_resource *rbuffer = r600_resource(buf);
|
||||
|
||||
threaded_resource_deinit(buf);
|
||||
util_range_destroy(&rbuffer->valid_buffer_range);
|
||||
pipe_resource_reference((struct pipe_resource**)&rbuffer->immed_buffer, NULL);
|
||||
pb_reference(&rbuffer->buf, NULL);
|
||||
radeon_bo_reference(rscreen->b.ws, &rbuffer->buf, NULL);
|
||||
FREE(rbuffer);
|
||||
}
|
||||
|
||||
|
|
@ -251,7 +252,7 @@ void r600_replace_buffer_storage(struct pipe_context *ctx,
|
|||
struct r600_resource *rsrc = r600_resource(src);
|
||||
uint64_t old_gpu_address = rdst->gpu_address;
|
||||
|
||||
pb_reference(&rdst->buf, rsrc->buf);
|
||||
radeon_bo_reference(rctx->ws, &rdst->buf, rsrc->buf);
|
||||
rdst->gpu_address = rsrc->gpu_address;
|
||||
rdst->b.b.bind = rsrc->b.b.bind;
|
||||
rdst->flags = rsrc->flags;
|
||||
|
|
|
|||
|
|
@ -405,7 +405,7 @@ static void r600_reallocate_texture_inplace(struct r600_common_context *rctx,
|
|||
|
||||
/* Replace the structure fields of rtex. */
|
||||
rtex->resource.b.b.bind = templ.bind;
|
||||
pb_reference(&rtex->resource.buf, new_tex->resource.buf);
|
||||
radeon_bo_reference(rctx->ws, &rtex->resource.buf, new_tex->resource.buf);
|
||||
rtex->resource.gpu_address = new_tex->resource.gpu_address;
|
||||
rtex->resource.vram_usage = new_tex->resource.vram_usage;
|
||||
rtex->resource.gart_usage = new_tex->resource.gart_usage;
|
||||
|
|
@ -576,6 +576,7 @@ static bool r600_texture_get_handle(struct pipe_screen* screen,
|
|||
|
||||
void r600_texture_destroy(struct pipe_screen *screen, struct pipe_resource *ptex)
|
||||
{
|
||||
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
|
||||
struct r600_texture *rtex = (struct r600_texture*)ptex;
|
||||
struct r600_resource *resource = &rtex->resource;
|
||||
|
||||
|
|
@ -585,7 +586,7 @@ void r600_texture_destroy(struct pipe_screen *screen, struct pipe_resource *ptex
|
|||
if (rtex->cmask_buffer != &rtex->resource) {
|
||||
r600_resource_reference(&rtex->cmask_buffer, NULL);
|
||||
}
|
||||
pb_reference(&resource->buf, NULL);
|
||||
radeon_bo_reference(rscreen->ws, &resource->buf, NULL);
|
||||
FREE(rtex);
|
||||
}
|
||||
|
||||
|
|
@ -1802,9 +1803,10 @@ static void
|
|||
r600_memobj_destroy(struct pipe_screen *screen,
|
||||
struct pipe_memory_object *_memobj)
|
||||
{
|
||||
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
|
||||
struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj;
|
||||
|
||||
pb_reference(&memobj->buf, NULL);
|
||||
radeon_bo_reference(rscreen->ws, &memobj->buf, NULL);
|
||||
free(memobj);
|
||||
}
|
||||
|
||||
|
|
@ -1870,7 +1872,7 @@ r600_texture_from_memobj(struct pipe_screen *screen,
|
|||
/* r600_texture_create_object doesn't increment refcount of
|
||||
* memobj->buf, so increment it here.
|
||||
*/
|
||||
pb_reference(&buf, memobj->buf);
|
||||
radeon_bo_reference(rscreen->ws, &buf, memobj->buf);
|
||||
|
||||
rtex->resource.b.is_shared = true;
|
||||
rtex->resource.external_usage = PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE;
|
||||
|
|
|
|||
|
|
@ -209,10 +209,10 @@ void rvid_join_surfaces(struct r600_common_context *rctx,
|
|||
if (!buffers[i] || !*buffers[i])
|
||||
continue;
|
||||
|
||||
pb_reference(buffers[i], pb);
|
||||
radeon_bo_reference(rctx->ws, buffers[i], pb);
|
||||
}
|
||||
|
||||
pb_reference(&pb, NULL);
|
||||
radeon_bo_reference(rctx->ws, &pb, NULL);
|
||||
}
|
||||
|
||||
int rvid_get_video_param(struct pipe_screen *screen,
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ static bool radeon_real_bo_is_busy(struct radeon_bo *bo)
|
|||
&args, sizeof(args)) != 0;
|
||||
}
|
||||
|
||||
static bool radeon_bo_is_busy(struct radeon_bo *bo)
|
||||
static bool radeon_bo_is_busy(struct radeon_winsys *rws, struct radeon_bo *bo)
|
||||
{
|
||||
unsigned num_idle;
|
||||
bool busy = false;
|
||||
|
|
@ -62,7 +62,7 @@ static bool radeon_bo_is_busy(struct radeon_bo *bo)
|
|||
busy = true;
|
||||
break;
|
||||
}
|
||||
radeon_ws_bo_reference(&bo->u.slab.fences[num_idle], NULL);
|
||||
radeon_ws_bo_reference(rws, &bo->u.slab.fences[num_idle], NULL);
|
||||
}
|
||||
memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle],
|
||||
(bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0]));
|
||||
|
|
@ -81,7 +81,7 @@ static void radeon_real_bo_wait_idle(struct radeon_bo *bo)
|
|||
&args, sizeof(args)) == -EBUSY);
|
||||
}
|
||||
|
||||
static void radeon_bo_wait_idle(struct radeon_bo *bo)
|
||||
static void radeon_bo_wait_idle(struct radeon_winsys *rws, struct radeon_bo *bo)
|
||||
{
|
||||
if (bo->handle) {
|
||||
radeon_real_bo_wait_idle(bo);
|
||||
|
|
@ -89,7 +89,7 @@ static void radeon_bo_wait_idle(struct radeon_bo *bo)
|
|||
mtx_lock(&bo->rws->bo_fence_lock);
|
||||
while (bo->u.slab.num_fences) {
|
||||
struct radeon_bo *fence = NULL;
|
||||
radeon_ws_bo_reference(&fence, bo->u.slab.fences[0]);
|
||||
radeon_ws_bo_reference(rws, &fence, bo->u.slab.fences[0]);
|
||||
mtx_unlock(&bo->rws->bo_fence_lock);
|
||||
|
||||
/* Wait without holding the fence lock. */
|
||||
|
|
@ -97,12 +97,12 @@ static void radeon_bo_wait_idle(struct radeon_bo *bo)
|
|||
|
||||
mtx_lock(&bo->rws->bo_fence_lock);
|
||||
if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) {
|
||||
radeon_ws_bo_reference(&bo->u.slab.fences[0], NULL);
|
||||
radeon_ws_bo_reference(rws, &bo->u.slab.fences[0], NULL);
|
||||
memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[1],
|
||||
(bo->u.slab.num_fences - 1) * sizeof(bo->u.slab.fences[0]));
|
||||
bo->u.slab.num_fences--;
|
||||
}
|
||||
radeon_ws_bo_reference(&fence, NULL);
|
||||
radeon_ws_bo_reference(rws, &fence, NULL);
|
||||
}
|
||||
mtx_unlock(&bo->rws->bo_fence_lock);
|
||||
}
|
||||
|
|
@ -117,7 +117,7 @@ static bool radeon_bo_wait(struct radeon_winsys *rws,
|
|||
|
||||
/* No timeout. Just query. */
|
||||
if (timeout == 0)
|
||||
return !bo->num_active_ioctls && !radeon_bo_is_busy(bo);
|
||||
return !bo->num_active_ioctls && !radeon_bo_is_busy(rws, bo);
|
||||
|
||||
abs_timeout = os_time_get_absolute_timeout(timeout);
|
||||
|
||||
|
|
@ -127,12 +127,12 @@ static bool radeon_bo_wait(struct radeon_winsys *rws,
|
|||
|
||||
/* Infinite timeout. */
|
||||
if (abs_timeout == OS_TIMEOUT_INFINITE) {
|
||||
radeon_bo_wait_idle(bo);
|
||||
radeon_bo_wait_idle(rws, bo);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Other timeouts need to be emulated with a loop. */
|
||||
while (radeon_bo_is_busy(bo)) {
|
||||
while (radeon_bo_is_busy(rws, bo)) {
|
||||
if (os_time_get_nano() >= abs_timeout)
|
||||
return false;
|
||||
os_time_sleep(10);
|
||||
|
|
@ -704,7 +704,7 @@ static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
|
|||
_mesa_hash_table_u64_search(rws->bo_vas, va.offset);
|
||||
|
||||
mtx_unlock(&rws->bo_handles_mutex);
|
||||
pb_reference(&b, &old_bo->base);
|
||||
radeon_bo_reference(&rws->base, &b, &old_bo->base);
|
||||
return radeon_bo(b);
|
||||
}
|
||||
|
||||
|
|
@ -804,7 +804,7 @@ struct pb_slab *radeon_bo_slab_alloc(void *priv, unsigned heap,
|
|||
return &slab->base;
|
||||
|
||||
fail_buffer:
|
||||
radeon_ws_bo_reference(&slab->buffer, NULL);
|
||||
radeon_ws_bo_reference(&ws->base, &slab->buffer, NULL);
|
||||
fail:
|
||||
FREE(slab);
|
||||
return NULL;
|
||||
|
|
@ -812,17 +812,18 @@ fail:
|
|||
|
||||
void radeon_bo_slab_free(void *priv, struct pb_slab *pslab)
|
||||
{
|
||||
struct radeon_winsys *rws = (struct radeon_winsys *)priv;
|
||||
struct radeon_slab *slab = (struct radeon_slab *)pslab;
|
||||
|
||||
for (unsigned i = 0; i < slab->base.num_entries; ++i) {
|
||||
struct radeon_bo *bo = &slab->entries[i];
|
||||
for (unsigned j = 0; j < bo->u.slab.num_fences; ++j)
|
||||
radeon_ws_bo_reference(&bo->u.slab.fences[j], NULL);
|
||||
radeon_ws_bo_reference(rws, &bo->u.slab.fences[j], NULL);
|
||||
FREE(bo->u.slab.fences);
|
||||
}
|
||||
|
||||
FREE(slab->entries);
|
||||
radeon_ws_bo_reference(&slab->buffer, NULL);
|
||||
radeon_ws_bo_reference(rws, &slab->buffer, NULL);
|
||||
FREE(slab);
|
||||
}
|
||||
|
||||
|
|
@ -1153,7 +1154,7 @@ static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
|
|||
_mesa_hash_table_u64_search(ws->bo_vas, va.offset);
|
||||
|
||||
mtx_unlock(&ws->bo_handles_mutex);
|
||||
pb_reference(&b, &old_bo->base);
|
||||
radeon_bo_reference(rws, &b, &old_bo->base);
|
||||
return b;
|
||||
}
|
||||
|
||||
|
|
@ -1284,7 +1285,7 @@ done:
|
|||
_mesa_hash_table_u64_search(ws->bo_vas, va.offset);
|
||||
|
||||
mtx_unlock(&ws->bo_handles_mutex);
|
||||
pb_reference(&b, &old_bo->base);
|
||||
radeon_bo_reference(rws, &b, &old_bo->base);
|
||||
return b;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -66,10 +66,11 @@ struct pb_slab *radeon_bo_slab_alloc(void *priv, unsigned heap,
|
|||
unsigned group_index);
|
||||
void radeon_bo_slab_free(void *priv, struct pb_slab *slab);
|
||||
|
||||
static inline
|
||||
void radeon_ws_bo_reference(struct radeon_bo **dst, struct radeon_bo *src)
|
||||
static inline void
|
||||
radeon_ws_bo_reference(struct radeon_winsys *rws, struct radeon_bo **dst,
|
||||
struct radeon_bo *src)
|
||||
{
|
||||
pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
|
||||
radeon_bo_reference(rws, (struct pb_buffer**)dst, (struct pb_buffer*)src);
|
||||
}
|
||||
|
||||
void *radeon_bo_do_map(struct radeon_bo *bo);
|
||||
|
|
|
|||
|
|
@ -135,17 +135,18 @@ static bool radeon_init_cs_context(struct radeon_cs_context *csc,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
|
||||
static void radeon_cs_context_cleanup(struct radeon_winsys *rws,
|
||||
struct radeon_cs_context *csc)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < csc->num_relocs; i++) {
|
||||
p_atomic_dec(&csc->relocs_bo[i].bo->num_cs_references);
|
||||
radeon_ws_bo_reference(&csc->relocs_bo[i].bo, NULL);
|
||||
radeon_ws_bo_reference(rws, &csc->relocs_bo[i].bo, NULL);
|
||||
}
|
||||
for (i = 0; i < csc->num_slab_buffers; ++i) {
|
||||
p_atomic_dec(&csc->slab_buffers[i].bo->num_cs_references);
|
||||
radeon_ws_bo_reference(&csc->slab_buffers[i].bo, NULL);
|
||||
radeon_ws_bo_reference(rws, &csc->slab_buffers[i].bo, NULL);
|
||||
}
|
||||
|
||||
csc->num_relocs = 0;
|
||||
|
|
@ -159,9 +160,9 @@ static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
|
|||
}
|
||||
}
|
||||
|
||||
static void radeon_destroy_cs_context(struct radeon_cs_context *csc)
|
||||
static void radeon_destroy_cs_context(struct radeon_winsys *rws, struct radeon_cs_context *csc)
|
||||
{
|
||||
radeon_cs_context_cleanup(csc);
|
||||
radeon_cs_context_cleanup(rws, csc);
|
||||
FREE(csc->slab_buffers);
|
||||
FREE(csc->relocs_bo);
|
||||
FREE(csc->relocs);
|
||||
|
|
@ -201,7 +202,7 @@ radeon_drm_cs_create(struct radeon_cmdbuf *rcs,
|
|||
return false;
|
||||
}
|
||||
if (!radeon_init_cs_context(&cs->csc2, cs->ws)) {
|
||||
radeon_destroy_cs_context(&cs->csc1);
|
||||
radeon_destroy_cs_context(&ws->base, &cs->csc1);
|
||||
FREE(cs);
|
||||
return false;
|
||||
}
|
||||
|
|
@ -227,7 +228,8 @@ static void radeon_drm_cs_set_preamble(struct radeon_cmdbuf *cs, const uint32_t
|
|||
radeon_emit_array(cs, preamble_ib, preamble_num_dw);
|
||||
}
|
||||
|
||||
int radeon_lookup_buffer(struct radeon_cs_context *csc, struct radeon_bo *bo)
|
||||
int radeon_lookup_buffer(struct radeon_winsys *rws, struct radeon_cs_context *csc,
|
||||
struct radeon_bo *bo)
|
||||
{
|
||||
unsigned hash = bo->hash & (ARRAY_SIZE(csc->reloc_indices_hashlist)-1);
|
||||
struct radeon_bo_item *buffers;
|
||||
|
|
@ -273,7 +275,7 @@ static unsigned radeon_lookup_or_add_real_buffer(struct radeon_drm_cs *cs,
|
|||
unsigned hash = bo->hash & (ARRAY_SIZE(csc->reloc_indices_hashlist)-1);
|
||||
int i = -1;
|
||||
|
||||
i = radeon_lookup_buffer(csc, bo);
|
||||
i = radeon_lookup_buffer(&cs->ws->base, csc, bo);
|
||||
|
||||
if (i >= 0) {
|
||||
/* For async DMA, every add_buffer call must add a buffer to the list
|
||||
|
|
@ -308,7 +310,7 @@ static unsigned radeon_lookup_or_add_real_buffer(struct radeon_drm_cs *cs,
|
|||
/* Initialize the new relocation. */
|
||||
csc->relocs_bo[csc->num_relocs].bo = NULL;
|
||||
csc->relocs_bo[csc->num_relocs].u.real.priority_usage = 0;
|
||||
radeon_ws_bo_reference(&csc->relocs_bo[csc->num_relocs].bo, bo);
|
||||
radeon_ws_bo_reference(&cs->ws->base, &csc->relocs_bo[csc->num_relocs].bo, bo);
|
||||
p_atomic_inc(&bo->num_cs_references);
|
||||
reloc = &csc->relocs[csc->num_relocs];
|
||||
reloc->handle = bo->handle;
|
||||
|
|
@ -332,7 +334,7 @@ static int radeon_lookup_or_add_slab_buffer(struct radeon_drm_cs *cs,
|
|||
int idx;
|
||||
int real_idx;
|
||||
|
||||
idx = radeon_lookup_buffer(csc, bo);
|
||||
idx = radeon_lookup_buffer(&cs->ws->base, csc, bo);
|
||||
if (idx >= 0)
|
||||
return idx;
|
||||
|
||||
|
|
@ -361,7 +363,7 @@ static int radeon_lookup_or_add_slab_buffer(struct radeon_drm_cs *cs,
|
|||
|
||||
item->bo = NULL;
|
||||
item->u.slab.real_idx = real_idx;
|
||||
radeon_ws_bo_reference(&item->bo, bo);
|
||||
radeon_ws_bo_reference(&cs->ws->base, &item->bo, bo);
|
||||
p_atomic_inc(&bo->num_cs_references);
|
||||
|
||||
hash = bo->hash & (ARRAY_SIZE(csc->reloc_indices_hashlist)-1);
|
||||
|
|
@ -425,7 +427,7 @@ static int radeon_drm_cs_lookup_buffer(struct radeon_cmdbuf *rcs,
|
|||
{
|
||||
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
|
||||
|
||||
return radeon_lookup_buffer(cs->csc, (struct radeon_bo*)buf);
|
||||
return radeon_lookup_buffer(&cs->ws->base, cs->csc, (struct radeon_bo*)buf);
|
||||
}
|
||||
|
||||
static bool radeon_drm_cs_validate(struct radeon_cmdbuf *rcs)
|
||||
|
|
@ -445,7 +447,7 @@ static bool radeon_drm_cs_validate(struct radeon_cmdbuf *rcs)
|
|||
|
||||
for (i = cs->csc->num_validated_relocs; i < cs->csc->num_relocs; i++) {
|
||||
p_atomic_dec(&cs->csc->relocs_bo[i].bo->num_cs_references);
|
||||
radeon_ws_bo_reference(&cs->csc->relocs_bo[i].bo, NULL);
|
||||
radeon_ws_bo_reference(&cs->ws->base, &cs->csc->relocs_bo[i].bo, NULL);
|
||||
}
|
||||
cs->csc->num_relocs = cs->csc->num_validated_relocs;
|
||||
|
||||
|
|
@ -454,7 +456,7 @@ static bool radeon_drm_cs_validate(struct radeon_cmdbuf *rcs)
|
|||
cs->flush_cs(cs->flush_data,
|
||||
RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
|
||||
} else {
|
||||
radeon_cs_context_cleanup(cs->csc);
|
||||
radeon_cs_context_cleanup(&cs->ws->base, cs->csc);
|
||||
rcs->used_vram_kb = 0;
|
||||
rcs->used_gart_kb = 0;
|
||||
|
||||
|
|
@ -491,7 +493,8 @@ static unsigned radeon_drm_cs_get_buffer_list(struct radeon_cmdbuf *rcs,
|
|||
|
||||
void radeon_drm_cs_emit_ioctl_oneshot(void *job, void *gdata, int thread_index)
|
||||
{
|
||||
struct radeon_cs_context *csc = ((struct radeon_drm_cs*)job)->cst;
|
||||
struct radeon_drm_cs *cs = (struct radeon_drm_cs*)job;
|
||||
struct radeon_cs_context *csc = cs->cst;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
|
|
@ -518,7 +521,7 @@ void radeon_drm_cs_emit_ioctl_oneshot(void *job, void *gdata, int thread_index)
|
|||
for (i = 0; i < csc->num_slab_buffers; i++)
|
||||
p_atomic_dec(&csc->slab_buffers[i].bo->num_active_ioctls);
|
||||
|
||||
radeon_cs_context_cleanup(csc);
|
||||
radeon_cs_context_cleanup(&cs->ws->base, csc);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -544,7 +547,8 @@ void radeon_drm_cs_sync_flush(struct radeon_cmdbuf *rcs)
|
|||
* their respective ioctl do not have to be kept, because we know that they
|
||||
* will signal earlier.
|
||||
*/
|
||||
static void radeon_bo_slab_fence(struct radeon_bo *bo, struct radeon_bo *fence)
|
||||
static void radeon_bo_slab_fence(struct radeon_winsys *rws, struct radeon_bo *bo,
|
||||
struct radeon_bo *fence)
|
||||
{
|
||||
unsigned dst;
|
||||
|
||||
|
|
@ -557,7 +561,7 @@ static void radeon_bo_slab_fence(struct radeon_bo *bo, struct radeon_bo *fence)
|
|||
bo->u.slab.fences[dst] = bo->u.slab.fences[src];
|
||||
dst++;
|
||||
} else {
|
||||
radeon_ws_bo_reference(&bo->u.slab.fences[src], NULL);
|
||||
radeon_ws_bo_reference(rws, &bo->u.slab.fences[src], NULL);
|
||||
}
|
||||
}
|
||||
bo->u.slab.num_fences = dst;
|
||||
|
|
@ -579,7 +583,7 @@ static void radeon_bo_slab_fence(struct radeon_bo *bo, struct radeon_bo *fence)
|
|||
|
||||
/* Add the new fence */
|
||||
bo->u.slab.fences[bo->u.slab.num_fences] = NULL;
|
||||
radeon_ws_bo_reference(&bo->u.slab.fences[bo->u.slab.num_fences], fence);
|
||||
radeon_ws_bo_reference(rws, &bo->u.slab.fences[bo->u.slab.num_fences], fence);
|
||||
bo->u.slab.num_fences++;
|
||||
}
|
||||
|
||||
|
|
@ -643,7 +647,7 @@ static int radeon_drm_cs_flush(struct radeon_cmdbuf *rcs,
|
|||
for (unsigned i = 0; i < cs->csc->num_slab_buffers; ++i) {
|
||||
struct radeon_bo *bo = cs->csc->slab_buffers[i].bo;
|
||||
p_atomic_inc(&bo->num_active_ioctls);
|
||||
radeon_bo_slab_fence(bo, (struct radeon_bo *)fence);
|
||||
radeon_bo_slab_fence(&cs->ws->base, bo, (struct radeon_bo *)fence);
|
||||
}
|
||||
mtx_unlock(&cs->ws->bo_fence_lock);
|
||||
|
||||
|
|
@ -727,7 +731,7 @@ static int radeon_drm_cs_flush(struct radeon_cmdbuf *rcs,
|
|||
radeon_drm_cs_emit_ioctl_oneshot(cs, NULL, 0);
|
||||
}
|
||||
} else {
|
||||
radeon_cs_context_cleanup(cs->cst);
|
||||
radeon_cs_context_cleanup(&cs->ws->base, cs->cst);
|
||||
}
|
||||
|
||||
/* Prepare a new CS. */
|
||||
|
|
@ -752,11 +756,11 @@ static void radeon_drm_cs_destroy(struct radeon_cmdbuf *rcs)
|
|||
|
||||
radeon_drm_cs_sync_flush(rcs);
|
||||
util_queue_fence_destroy(&cs->flush_completed);
|
||||
radeon_cs_context_cleanup(&cs->csc1);
|
||||
radeon_cs_context_cleanup(&cs->csc2);
|
||||
radeon_cs_context_cleanup(&cs->ws->base, &cs->csc1);
|
||||
radeon_cs_context_cleanup(&cs->ws->base, &cs->csc2);
|
||||
p_atomic_dec(&cs->ws->num_cs);
|
||||
radeon_destroy_cs_context(&cs->csc1);
|
||||
radeon_destroy_cs_context(&cs->csc2);
|
||||
radeon_destroy_cs_context(&cs->ws->base, &cs->csc1);
|
||||
radeon_destroy_cs_context(&cs->ws->base, &cs->csc2);
|
||||
radeon_fence_reference(&cs->ws->base, &cs->next_fence, NULL);
|
||||
FREE(cs);
|
||||
}
|
||||
|
|
@ -772,7 +776,7 @@ static bool radeon_bo_is_referenced(struct radeon_cmdbuf *rcs,
|
|||
if (!bo->num_cs_references)
|
||||
return false;
|
||||
|
||||
index = radeon_lookup_buffer(cs->csc, bo);
|
||||
index = radeon_lookup_buffer(&cs->ws->base, cs->csc, bo);
|
||||
if (index == -1)
|
||||
return false;
|
||||
|
||||
|
|
@ -820,7 +824,7 @@ static void radeon_fence_reference(struct radeon_winsys *ws,
|
|||
struct pipe_fence_handle **dst,
|
||||
struct pipe_fence_handle *src)
|
||||
{
|
||||
pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
|
||||
radeon_bo_reference(ws, (struct pb_buffer**)dst, (struct pb_buffer*)src);
|
||||
}
|
||||
|
||||
static struct pipe_fence_handle *radeon_drm_cs_get_next_fence(struct radeon_cmdbuf *rcs)
|
||||
|
|
|
|||
|
|
@ -73,7 +73,8 @@ struct radeon_drm_cs {
|
|||
struct pipe_fence_handle *next_fence;
|
||||
};
|
||||
|
||||
int radeon_lookup_buffer(struct radeon_cs_context *csc, struct radeon_bo *bo);
|
||||
int radeon_lookup_buffer(struct radeon_winsys *rws, struct radeon_cs_context *csc,
|
||||
struct radeon_bo *bo);
|
||||
|
||||
static inline struct radeon_drm_cs *
|
||||
radeon_drm_cs(struct radeon_cmdbuf *rcs)
|
||||
|
|
@ -87,7 +88,7 @@ radeon_bo_is_referenced_by_cs(struct radeon_drm_cs *cs,
|
|||
{
|
||||
int num_refs = bo->num_cs_references;
|
||||
return num_refs == bo->rws->num_cs ||
|
||||
(num_refs && radeon_lookup_buffer(cs->csc, bo) != -1);
|
||||
(num_refs && radeon_lookup_buffer(&cs->ws->base, cs->csc, bo) != -1);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
|
@ -99,7 +100,7 @@ radeon_bo_is_referenced_by_cs_for_write(struct radeon_drm_cs *cs,
|
|||
if (!bo->num_cs_references)
|
||||
return false;
|
||||
|
||||
index = radeon_lookup_buffer(cs->csc, bo);
|
||||
index = radeon_lookup_buffer(&cs->ws->base, cs->csc, bo);
|
||||
if (index == -1)
|
||||
return false;
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue