mesa/src/gallium/drivers/iris/iris_fence.c
Chris Wilson e31b703c42 iris: Place a seqno at the end of every batch
We can use seqno as a basic for fast userspace fences: where we can
check a value directly to test for fence completion without having to
query using the kernel. To do so we need to write a breadcrumb from the
batch and track those writes as the basis for our lightweight fences.

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/3802>
2020-05-01 19:00:02 +00:00

383 lines
10 KiB
C

/*
* Copyright © 2018 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* @file iris_fence.c
*
* Fences for driver and IPC serialisation, scheduling and synchronisation.
*/
#include <linux/sync_file.h>
#include "util/u_inlines.h"
#include "intel/common/gen_gem.h"
#include "iris_batch.h"
#include "iris_bufmgr.h"
#include "iris_context.h"
#include "iris_fence.h"
#include "iris_screen.h"
static uint32_t
gem_syncobj_create(int fd, uint32_t flags)
{
struct drm_syncobj_create args = {
.flags = flags,
};
gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
return args.handle;
}
static void
gem_syncobj_destroy(int fd, uint32_t handle)
{
struct drm_syncobj_destroy args = {
.handle = handle,
};
gen_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
}
/**
* Make a new sync-point.
*/
struct iris_syncobj *
iris_create_syncobj(struct iris_screen *screen)
{
struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
if (!syncobj)
return NULL;
syncobj->handle = gem_syncobj_create(screen->fd, 0);
assert(syncobj->handle);
pipe_reference_init(&syncobj->ref, 1);
return syncobj;
}
void
iris_syncobj_destroy(struct iris_screen *screen, struct iris_syncobj *syncobj)
{
gem_syncobj_destroy(screen->fd, syncobj->handle);
free(syncobj);
}
/**
* Add a sync-point to the batch, with the given flags.
*
* \p flags One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
*/
void
iris_batch_add_syncobj(struct iris_batch *batch,
struct iris_syncobj *syncobj,
unsigned flags)
{
struct drm_i915_gem_exec_fence *fence =
util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
*fence = (struct drm_i915_gem_exec_fence) {
.handle = syncobj->handle,
.flags = flags,
};
struct iris_syncobj **store =
util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
*store = NULL;
iris_syncobj_reference(batch->screen, store, syncobj);
}
/* ------------------------------------------------------------------- */
struct pipe_fence_handle {
struct pipe_reference ref;
struct iris_syncobj *syncobj[IRIS_BATCH_COUNT];
unsigned count;
};
static void
iris_fence_destroy(struct pipe_screen *p_screen,
struct pipe_fence_handle *fence)
{
struct iris_screen *screen = (struct iris_screen *)p_screen;
for (unsigned i = 0; i < fence->count; i++)
iris_syncobj_reference(screen, &fence->syncobj[i], NULL);
free(fence);
}
static void
iris_fence_reference(struct pipe_screen *p_screen,
struct pipe_fence_handle **dst,
struct pipe_fence_handle *src)
{
if (pipe_reference(*dst ? &(*dst)->ref : NULL,
src ? &src->ref : NULL))
iris_fence_destroy(p_screen, *dst);
*dst = src;
}
bool
iris_wait_syncobj(struct pipe_screen *p_screen,
struct iris_syncobj *syncobj,
int64_t timeout_nsec)
{
if (!syncobj)
return false;
struct iris_screen *screen = (struct iris_screen *)p_screen;
struct drm_syncobj_wait args = {
.handles = (uintptr_t)&syncobj->handle,
.count_handles = 1,
.timeout_nsec = timeout_nsec,
};
return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
}
#define CSI "\e["
#define BLUE_HEADER CSI "0;97;44m"
#define NORMAL CSI "0m"
static void
iris_fence_flush(struct pipe_context *ctx,
struct pipe_fence_handle **out_fence,
unsigned flags)
{
struct iris_screen *screen = (void *) ctx->screen;
struct iris_context *ice = (struct iris_context *)ctx;
if (flags & PIPE_FLUSH_END_OF_FRAME) {
ice->frame++;
if (INTEL_DEBUG & DEBUG_SUBMIT) {
fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
(INTEL_DEBUG & DEBUG_COLOR) ? BLUE_HEADER : "",
ice->frame, ctx, ' ',
(INTEL_DEBUG & DEBUG_COLOR) ? NORMAL : "");
}
}
/* XXX PIPE_FLUSH_DEFERRED */
for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
iris_batch_flush(&ice->batches[i]);
if (!out_fence)
return;
struct pipe_fence_handle *fence = calloc(1, sizeof(*fence));
if (!fence)
return;
pipe_reference_init(&fence->ref, 1);
for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
struct iris_batch *batch = &ice->batches[b];
if (!iris_wait_syncobj(ctx->screen, batch->last_seqno->syncobj, 0))
continue;
iris_syncobj_reference(screen, &fence->syncobj[fence->count++],
batch->last_seqno->syncobj);
}
iris_fence_reference(ctx->screen, out_fence, NULL);
*out_fence = fence;
}
static void
iris_fence_await(struct pipe_context *ctx,
struct pipe_fence_handle *fence)
{
struct iris_context *ice = (struct iris_context *)ctx;
for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) {
for (unsigned i = 0; i < fence->count; i++) {
iris_batch_add_syncobj(&ice->batches[b], fence->syncobj[i],
I915_EXEC_FENCE_WAIT);
}
}
}
#define NSEC_PER_SEC (1000 * USEC_PER_SEC)
#define USEC_PER_SEC (1000 * MSEC_PER_SEC)
#define MSEC_PER_SEC (1000)
static uint64_t
gettime_ns(void)
{
struct timespec current;
clock_gettime(CLOCK_MONOTONIC, &current);
return (uint64_t)current.tv_sec * NSEC_PER_SEC + current.tv_nsec;
}
static uint64_t
rel2abs(uint64_t timeout)
{
if (timeout == 0)
return 0;
uint64_t current_time = gettime_ns();
uint64_t max_timeout = (uint64_t) INT64_MAX - current_time;
timeout = MIN2(max_timeout, timeout);
return current_time + timeout;
}
static bool
iris_fence_finish(struct pipe_screen *p_screen,
struct pipe_context *ctx,
struct pipe_fence_handle *fence,
uint64_t timeout)
{
struct iris_screen *screen = (struct iris_screen *)p_screen;
if (!fence->count)
return true;
uint32_t handles[ARRAY_SIZE(fence->syncobj)];
for (unsigned i = 0; i < fence->count; i++)
handles[i] = fence->syncobj[i]->handle;
struct drm_syncobj_wait args = {
.handles = (uintptr_t)handles,
.count_handles = fence->count,
.timeout_nsec = rel2abs(timeout),
.flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
};
return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args) == 0;
}
static int
sync_merge_fd(int sync_fd, int new_fd)
{
if (sync_fd == -1)
return new_fd;
if (new_fd == -1)
return sync_fd;
struct sync_merge_data args = {
.name = "iris fence",
.fd2 = new_fd,
.fence = -1,
};
gen_ioctl(sync_fd, SYNC_IOC_MERGE, &args);
close(new_fd);
close(sync_fd);
return args.fence;
}
static int
iris_fence_get_fd(struct pipe_screen *p_screen,
struct pipe_fence_handle *fence)
{
struct iris_screen *screen = (struct iris_screen *)p_screen;
int fd = -1;
if (fence->count == 0) {
/* Our fence has no syncobj's recorded. This means that all of the
* batches had already completed, their syncobj's had been signalled,
* and so we didn't bother to record them. But we're being asked to
* export such a fence. So export a dummy already-signalled syncobj.
*/
struct drm_syncobj_handle args = {
.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE, .fd = -1,
};
args.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED);
gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
gem_syncobj_destroy(screen->fd, args.handle);
return args.fd;
}
for (unsigned i = 0; i < fence->count; i++) {
struct drm_syncobj_handle args = {
.handle = fence->syncobj[i]->handle,
.flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
.fd = -1,
};
gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
fd = sync_merge_fd(fd, args.fd);
}
return fd;
}
static void
iris_fence_create_fd(struct pipe_context *ctx,
struct pipe_fence_handle **out,
int fd,
enum pipe_fd_type type)
{
assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
struct drm_syncobj_handle args = {
.handle = gem_syncobj_create(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED),
.flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
.fd = fd,
};
if (gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args) == -1) {
fprintf(stderr, "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %s\n",
strerror(errno));
gem_syncobj_destroy(screen->fd, args.handle);
*out = NULL;
return;
}
struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
syncobj->handle = args.handle;
pipe_reference_init(&syncobj->ref, 1);
struct pipe_fence_handle *fence = malloc(sizeof(*fence));
pipe_reference_init(&fence->ref, 1);
fence->syncobj[0] = syncobj;
fence->count = 1;
*out = fence;
}
void
iris_init_screen_fence_functions(struct pipe_screen *screen)
{
screen->fence_reference = iris_fence_reference;
screen->fence_finish = iris_fence_finish;
screen->fence_get_fd = iris_fence_get_fd;
}
void
iris_init_context_fence_functions(struct pipe_context *ctx)
{
ctx->flush = iris_fence_flush;
ctx->create_fence_fd = iris_fence_create_fd;
ctx->fence_server_sync = iris_fence_await;
}