zink: add back atomics for internal refcounts
Some checks are pending
macOS-CI / macOS-CI (dri) (push) Waiting to run
macOS-CI / macOS-CI (xlib) (push) Waiting to run

in extensive testing, using no atomics at all is a bit too loose for
basic/common cases like sharing a vertex bufer between contexts, readily
leading to unintended behavior

keeping the atomics internal ensures that they remain in the same ccx,
which avoids impacting performance

it does require a little trickery to avoid an extra atomic in the buffer
decrement case, however

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38176>
This commit is contained in:
Mike Blumenkrantz 2025-10-30 10:26:39 -04:00 committed by Marge Bot
parent f48df6f45c
commit e152c80d18
2 changed files with 12 additions and 9 deletions

View file

@ -96,6 +96,3 @@ glx@glx-multithread-buffer-busy-tracking-bug
glx@glx-query-drawable-glx_width
glx@glx-query-drawable-glxpixmap-glx_width
glx@glx-swap-pixmap
# deqp-egl-x11: ../src/gallium/drivers/zink/zink_context.c:1339: update_res_bind_count: Assertion `res->bind_count[is_compute]' failed.
x11-dEQP-EGL.functional.sharing.gles2.multithread.simple.buffers.buffersubdata_render

View file

@ -1362,11 +1362,11 @@ update_img_bind_count(struct zink_context *ctx, struct zink_resource *res, bool
assert(!res->obj->is_buffer);
if (decrement) {
assert(res->bind_count[is_compute]);
if (!--res->bind_count[is_compute])
if (p_atomic_dec_zero(&res->bind_count[is_compute]))
_mesa_set_remove_key(ctx->need_barriers[is_compute], res);
check_resource_for_batch_ref(ctx, res);
} else
res->bind_count[is_compute]++;
p_atomic_inc(&res->bind_count[is_compute]);
}
ALWAYS_INLINE static void
@ -1375,12 +1375,18 @@ update_buf_bind_count(struct zink_context *ctx, struct zink_resource *res, bool
assert(res->obj->is_buffer);
if (decrement) {
assert(res->bind_count[is_compute]);
--res->bind_count[is_compute];
/* these are 16bit counters:
- the gfx count is 0-65535, increments of 1
- the compute count is 65536-UINT32_MAX, increments of (1<<16)
*/
unsigned incr = is_compute ? 1 << 16 : 1;
/* avoid extra atomics from checking all_binds in the next line by directly subtracting the 16bit increment */
unsigned all_binds = p_atomic_add_return(&res->all_binds, -incr);
/* if hit while blitting, this will be triggered again after blitting */
if (res->deleted && !res->all_binds && !ctx->blitting)
if (res->deleted && !all_binds && !ctx->blitting)
resource_release(ctx, res);
} else
res->bind_count[is_compute]++;
p_atomic_inc(&res->bind_count[is_compute]);
}
ALWAYS_INLINE static void
@ -1397,7 +1403,7 @@ zink_resource_release(struct pipe_context *pctx, struct pipe_resource *pres)
{
struct zink_resource *res = zink_resource(pres);
if (res->all_binds)
if (p_atomic_read_relaxed(&res->all_binds))
res->deleted = true;
else
resource_release(zink_context(pctx), res);