mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-27 04:00:10 +01:00
gallium/radeon: unify buffer_wait and buffer_is_busy in the winsys interface
The timeout parameter covers both cases. Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
8118d3719a
commit
592ce6e2d1
9 changed files with 49 additions and 60 deletions
|
|
@ -146,10 +146,11 @@ static boolean r300_get_query_result(struct pipe_context* pipe,
|
|||
|
||||
if (q->type == PIPE_QUERY_GPU_FINISHED) {
|
||||
if (wait) {
|
||||
r300->rws->buffer_wait(q->buf, RADEON_USAGE_READWRITE);
|
||||
r300->rws->buffer_wait(q->buf, PIPE_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_READWRITE);
|
||||
vresult->b = TRUE;
|
||||
} else {
|
||||
vresult->b = !r300->rws->buffer_is_busy(q->buf, RADEON_USAGE_READWRITE);
|
||||
vresult->b = r300->rws->buffer_wait(q->buf, 0, RADEON_USAGE_READWRITE);
|
||||
}
|
||||
return vresult->b;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ r300_buffer_transfer_map( struct pipe_context *context,
|
|||
|
||||
/* Check if mapping this buffer would cause waiting for the GPU. */
|
||||
if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->cs_buf, RADEON_USAGE_READWRITE) ||
|
||||
r300->rws->buffer_is_busy(rbuf->buf, RADEON_USAGE_READWRITE)) {
|
||||
!r300->rws->buffer_wait(rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
|
||||
unsigned i;
|
||||
struct pb_buffer *new_buf;
|
||||
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ r300_texture_transfer_map(struct pipe_context *ctx,
|
|||
referenced_hw = TRUE;
|
||||
} else {
|
||||
referenced_hw =
|
||||
r300->rws->buffer_is_busy(tex->buf, RADEON_USAGE_READWRITE);
|
||||
!r300->rws->buffer_wait(tex->buf, 0, RADEON_USAGE_READWRITE);
|
||||
}
|
||||
|
||||
trans = CALLOC_STRUCT(r300_transfer);
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
|
|||
}
|
||||
}
|
||||
|
||||
if (busy || ctx->ws->buffer_is_busy(resource->buf, rusage)) {
|
||||
if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
|
||||
if (usage & PIPE_TRANSFER_DONTBLOCK) {
|
||||
return NULL;
|
||||
} else {
|
||||
|
|
@ -274,7 +274,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
|
|||
|
||||
/* Check if mapping this buffer would cause waiting for the GPU. */
|
||||
if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
|
||||
rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
|
||||
!rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
|
||||
rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
|
||||
}
|
||||
/* At this point, the buffer is always idle. */
|
||||
|
|
@ -288,7 +288,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx,
|
|||
|
||||
/* Check if mapping this buffer would cause waiting for the GPU. */
|
||||
if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
|
||||
rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
|
||||
!rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
|
||||
/* Do a wait-free write-only transfer using a temporary buffer. */
|
||||
unsigned offset;
|
||||
struct r600_resource *staging = NULL;
|
||||
|
|
|
|||
|
|
@ -505,7 +505,7 @@ static boolean r600_begin_query(struct pipe_context *ctx,
|
|||
|
||||
/* Obtain a new buffer if the current one can't be mapped without a stall. */
|
||||
if (r600_rings_is_buffer_referenced(rctx, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
|
||||
rctx->ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) {
|
||||
!rctx->ws->buffer_wait(rquery->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
|
||||
pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
|
||||
rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -941,7 +941,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx,
|
|||
use_staging_texture = TRUE;
|
||||
} else if (!(usage & PIPE_TRANSFER_READ) &&
|
||||
(r600_rings_is_buffer_referenced(rctx, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) ||
|
||||
rctx->ws->buffer_is_busy(rtex->resource.buf, RADEON_USAGE_READWRITE))) {
|
||||
!rctx->ws->buffer_wait(rtex->resource.buf, 0, RADEON_USAGE_READWRITE))) {
|
||||
/* Use a staging texture for uploads if the underlying BO is busy. */
|
||||
use_staging_texture = TRUE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -398,24 +398,15 @@ struct radeon_winsys {
|
|||
void (*buffer_unmap)(struct radeon_winsys_cs_handle *buf);
|
||||
|
||||
/**
|
||||
* Return TRUE if a buffer object is being used by the GPU.
|
||||
* Wait for the buffer and return true if the buffer is not used
|
||||
* by the device.
|
||||
*
|
||||
* \param buf A winsys buffer object.
|
||||
* \param usage Only check whether the buffer is busy for the given usage.
|
||||
* The timeout of 0 will only return the status.
|
||||
* The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
|
||||
* is idle.
|
||||
*/
|
||||
boolean (*buffer_is_busy)(struct pb_buffer *buf,
|
||||
enum radeon_bo_usage usage);
|
||||
|
||||
/**
|
||||
* Wait for a buffer object until it is not used by a GPU. This is
|
||||
* equivalent to a fence placed after the last command using the buffer,
|
||||
* and synchronizing to the fence.
|
||||
*
|
||||
* \param buf A winsys buffer object to wait for.
|
||||
* \param usage Only wait until the buffer is idle for the given usage,
|
||||
* but may still be busy for some other usage.
|
||||
*/
|
||||
void (*buffer_wait)(struct pb_buffer *buf, enum radeon_bo_usage usage);
|
||||
bool (*buffer_wait)(struct pb_buffer *buf, uint64_t timeout,
|
||||
enum radeon_bo_usage usage);
|
||||
|
||||
/**
|
||||
* Return tiling flags describing a memory layout of a buffer object.
|
||||
|
|
|
|||
|
|
@ -101,33 +101,30 @@ static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf)
|
|||
return bo;
|
||||
}
|
||||
|
||||
static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage)
|
||||
static bool radeon_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
|
||||
enum radeon_bo_usage usage)
|
||||
{
|
||||
struct radeon_bo *bo = get_radeon_bo(_buf);
|
||||
struct drm_radeon_gem_wait_idle args = {0};
|
||||
struct radeon_bo *bo = get_radeon_bo(_buf);
|
||||
|
||||
while (p_atomic_read(&bo->num_active_ioctls)) {
|
||||
sched_yield();
|
||||
/* Wait if any ioctl is being submitted with this buffer. */
|
||||
if (!os_wait_until_zero(&bo->num_active_ioctls, timeout))
|
||||
return false;
|
||||
|
||||
/* TODO: handle arbitrary timeout */
|
||||
if (!timeout) {
|
||||
struct drm_radeon_gem_busy args = {0};
|
||||
|
||||
args.handle = bo->handle;
|
||||
return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
|
||||
&args, sizeof(args)) == 0;
|
||||
} else {
|
||||
struct drm_radeon_gem_wait_idle args = {0};
|
||||
|
||||
args.handle = bo->handle;
|
||||
while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
|
||||
&args, sizeof(args)) == -EBUSY);
|
||||
return true;
|
||||
}
|
||||
|
||||
args.handle = bo->handle;
|
||||
while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
|
||||
&args, sizeof(args)) == -EBUSY);
|
||||
}
|
||||
|
||||
static boolean radeon_bo_is_busy(struct pb_buffer *_buf,
|
||||
enum radeon_bo_usage usage)
|
||||
{
|
||||
struct radeon_bo *bo = get_radeon_bo(_buf);
|
||||
struct drm_radeon_gem_busy args = {0};
|
||||
|
||||
if (p_atomic_read(&bo->num_active_ioctls)) {
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
args.handle = bo->handle;
|
||||
return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
|
||||
&args, sizeof(args)) != 0;
|
||||
}
|
||||
|
||||
static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)
|
||||
|
|
@ -410,8 +407,8 @@ static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (radeon_bo_is_busy((struct pb_buffer*)bo,
|
||||
RADEON_USAGE_WRITE)) {
|
||||
if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
|
||||
RADEON_USAGE_WRITE)) {
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
|
|
@ -420,8 +417,8 @@ static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (radeon_bo_is_busy((struct pb_buffer*)bo,
|
||||
RADEON_USAGE_READWRITE)) {
|
||||
if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
|
||||
RADEON_USAGE_READWRITE)) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
|
@ -439,7 +436,7 @@ static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
|
|||
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
|
||||
cs->flush_cs(cs->flush_data, 0, NULL);
|
||||
}
|
||||
radeon_bo_wait((struct pb_buffer*)bo,
|
||||
radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_WRITE);
|
||||
} else {
|
||||
/* Mapping for write. */
|
||||
|
|
@ -453,7 +450,8 @@ static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
|
|||
}
|
||||
}
|
||||
|
||||
radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
|
||||
radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
|
||||
RADEON_USAGE_READWRITE);
|
||||
}
|
||||
|
||||
bo->mgr->rws->buffer_wait_time += os_time_get_nano() - time;
|
||||
|
|
@ -644,7 +642,7 @@ static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr,
|
|||
return TRUE;
|
||||
}
|
||||
|
||||
if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
|
||||
if (!radeon_bo_wait((struct pb_buffer*)bo, 0, RADEON_USAGE_READWRITE)) {
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
|
@ -1166,7 +1164,6 @@ void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws)
|
|||
ws->base.buffer_map = radeon_bo_map;
|
||||
ws->base.buffer_unmap = radeon_bo_unmap;
|
||||
ws->base.buffer_wait = radeon_bo_wait;
|
||||
ws->base.buffer_is_busy = radeon_bo_is_busy;
|
||||
ws->base.buffer_create = radeon_winsys_bo_create;
|
||||
ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
|
||||
ws->base.buffer_from_ptr = radeon_winsys_bo_from_ptr;
|
||||
|
|
|
|||
|
|
@ -637,7 +637,7 @@ static bool radeon_fence_wait(struct radeon_winsys *ws,
|
|||
struct pb_buffer *rfence = (struct pb_buffer*)fence;
|
||||
|
||||
if (timeout == 0)
|
||||
return !ws->buffer_is_busy(rfence, RADEON_USAGE_READWRITE);
|
||||
return ws->buffer_wait(rfence, 0, RADEON_USAGE_READWRITE);
|
||||
|
||||
if (timeout != PIPE_TIMEOUT_INFINITE) {
|
||||
int64_t start_time = os_time_get();
|
||||
|
|
@ -646,7 +646,7 @@ static bool radeon_fence_wait(struct radeon_winsys *ws,
|
|||
timeout /= 1000;
|
||||
|
||||
/* Wait in a loop. */
|
||||
while (ws->buffer_is_busy(rfence, RADEON_USAGE_READWRITE)) {
|
||||
while (!ws->buffer_wait(rfence, 0, RADEON_USAGE_READWRITE)) {
|
||||
if (os_time_get() - start_time >= timeout) {
|
||||
return FALSE;
|
||||
}
|
||||
|
|
@ -655,7 +655,7 @@ static bool radeon_fence_wait(struct radeon_winsys *ws,
|
|||
return TRUE;
|
||||
}
|
||||
|
||||
ws->buffer_wait(rfence, RADEON_USAGE_READWRITE);
|
||||
ws->buffer_wait(rfence, PIPE_TIMEOUT_INFINITE, RADEON_USAGE_READWRITE);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue