freedreno/drm: Re-indent

clang-format -fallback-style=none --style=file -i src/freedreno/drm/*.[ch]

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10293>
This commit is contained in:
Rob Clark 2021-04-16 10:42:48 -07:00 committed by Marge Bot
parent a1653854f5
commit b94db11708
15 changed files with 2028 additions and 1959 deletions

View file

@ -33,95 +33,97 @@ simple_mtx_t table_lock = _SIMPLE_MTX_INITIALIZER_NP;
void bo_del(struct fd_bo *bo);
/* set buffer name, and add to table, call w/ table_lock held: */
static void set_name(struct fd_bo *bo, uint32_t name)
static void
set_name(struct fd_bo *bo, uint32_t name)
{
bo->name = name;
/* add ourself into the handle table: */
_mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
bo->name = name;
/* add ourself into the handle table: */
_mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo);
}
/* lookup a buffer, call w/ table_lock held: */
static struct fd_bo * lookup_bo(struct hash_table *tbl, uint32_t key)
static struct fd_bo *
lookup_bo(struct hash_table *tbl, uint32_t key)
{
struct fd_bo *bo = NULL;
struct hash_entry *entry = _mesa_hash_table_search(tbl, &key);
if (entry) {
/* found, incr refcnt and return: */
bo = fd_bo_ref(entry->data);
struct fd_bo *bo = NULL;
struct hash_entry *entry = _mesa_hash_table_search(tbl, &key);
if (entry) {
/* found, incr refcnt and return: */
bo = fd_bo_ref(entry->data);
/* don't break the bucket if this bo was found in one */
list_delinit(&bo->list);
}
return bo;
/* don't break the bucket if this bo was found in one */
list_delinit(&bo->list);
}
return bo;
}
/* allocate a new buffer object, call w/ table_lock held */
static struct fd_bo * bo_from_handle(struct fd_device *dev,
uint32_t size, uint32_t handle)
static struct fd_bo *
bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
{
struct fd_bo *bo;
struct fd_bo *bo;
simple_mtx_assert_locked(&table_lock);
simple_mtx_assert_locked(&table_lock);
bo = dev->funcs->bo_from_handle(dev, size, handle);
if (!bo) {
struct drm_gem_close req = {
.handle = handle,
};
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
return NULL;
}
bo->dev = dev;
bo->size = size;
bo->handle = handle;
bo->iova = bo->funcs->iova(bo);
bo->flags = FD_RELOC_FLAGS_INIT;
bo = dev->funcs->bo_from_handle(dev, size, handle);
if (!bo) {
struct drm_gem_close req = {
.handle = handle,
};
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
return NULL;
}
bo->dev = dev;
bo->size = size;
bo->handle = handle;
bo->iova = bo->funcs->iova(bo);
bo->flags = FD_RELOC_FLAGS_INIT;
p_atomic_set(&bo->refcnt, 1);
list_inithead(&bo->list);
/* add ourself into the handle table: */
_mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
return bo;
p_atomic_set(&bo->refcnt, 1);
list_inithead(&bo->list);
/* add ourself into the handle table: */
_mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
return bo;
}
static struct fd_bo *
bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
struct fd_bo_cache *cache)
struct fd_bo_cache *cache)
{
struct fd_bo *bo = NULL;
uint32_t handle;
int ret;
struct fd_bo *bo = NULL;
uint32_t handle;
int ret;
bo = fd_bo_cache_alloc(cache, &size, flags);
if (bo)
return bo;
bo = fd_bo_cache_alloc(cache, &size, flags);
if (bo)
return bo;
ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
if (ret)
return NULL;
ret = dev->funcs->bo_new_handle(dev, size, flags, &handle);
if (ret)
return NULL;
simple_mtx_lock(&table_lock);
bo = bo_from_handle(dev, size, handle);
simple_mtx_unlock(&table_lock);
simple_mtx_lock(&table_lock);
bo = bo_from_handle(dev, size, handle);
simple_mtx_unlock(&table_lock);
VG_BO_ALLOC(bo);
VG_BO_ALLOC(bo);
return bo;
return bo;
}
struct fd_bo *
_fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
{
struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
if (bo)
bo->bo_reuse = BO_CACHE;
return bo;
struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache);
if (bo)
bo->bo_reuse = BO_CACHE;
return bo;
}
void
_fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
{
bo->funcs->set_name(bo, fmt, ap);
bo->funcs->set_name(bo, fmt, ap);
}
/* internal function to allocate bo's that use the ringbuffer cache
@ -132,249 +134,262 @@ _fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
struct fd_bo *
fd_bo_new_ring(struct fd_device *dev, uint32_t size)
{
uint32_t flags = DRM_FREEDRENO_GEM_GPUREADONLY;
struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache);
if (bo) {
bo->bo_reuse = RING_CACHE;
bo->flags |= FD_RELOC_DUMP;
fd_bo_set_name(bo, "cmdstream");
}
return bo;
uint32_t flags = DRM_FREEDRENO_GEM_GPUREADONLY;
struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache);
if (bo) {
bo->bo_reuse = RING_CACHE;
bo->flags |= FD_RELOC_DUMP;
fd_bo_set_name(bo, "cmdstream");
}
return bo;
}
struct fd_bo *
fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
{
struct fd_bo *bo = NULL;
struct fd_bo *bo = NULL;
simple_mtx_lock(&table_lock);
simple_mtx_lock(&table_lock);
bo = lookup_bo(dev->handle_table, handle);
if (bo)
goto out_unlock;
bo = lookup_bo(dev->handle_table, handle);
if (bo)
goto out_unlock;
bo = bo_from_handle(dev, size, handle);
bo = bo_from_handle(dev, size, handle);
VG_BO_ALLOC(bo);
VG_BO_ALLOC(bo);
out_unlock:
simple_mtx_unlock(&table_lock);
simple_mtx_unlock(&table_lock);
return bo;
return bo;
}
struct fd_bo *
fd_bo_from_dmabuf(struct fd_device *dev, int fd)
{
int ret, size;
uint32_t handle;
struct fd_bo *bo;
int ret, size;
uint32_t handle;
struct fd_bo *bo;
simple_mtx_lock(&table_lock);
ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
if (ret) {
simple_mtx_unlock(&table_lock);
return NULL;
}
simple_mtx_lock(&table_lock);
ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
if (ret) {
simple_mtx_unlock(&table_lock);
return NULL;
}
bo = lookup_bo(dev->handle_table, handle);
if (bo)
goto out_unlock;
bo = lookup_bo(dev->handle_table, handle);
if (bo)
goto out_unlock;
/* lseek() to get bo size */
size = lseek(fd, 0, SEEK_END);
lseek(fd, 0, SEEK_CUR);
/* lseek() to get bo size */
size = lseek(fd, 0, SEEK_END);
lseek(fd, 0, SEEK_CUR);
bo = bo_from_handle(dev, size, handle);
bo = bo_from_handle(dev, size, handle);
VG_BO_ALLOC(bo);
VG_BO_ALLOC(bo);
out_unlock:
simple_mtx_unlock(&table_lock);
simple_mtx_unlock(&table_lock);
return bo;
return bo;
}
struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name)
struct fd_bo *
fd_bo_from_name(struct fd_device *dev, uint32_t name)
{
struct drm_gem_open req = {
.name = name,
};
struct fd_bo *bo;
struct drm_gem_open req = {
.name = name,
};
struct fd_bo *bo;
simple_mtx_lock(&table_lock);
simple_mtx_lock(&table_lock);
/* check name table first, to see if bo is already open: */
bo = lookup_bo(dev->name_table, name);
if (bo)
goto out_unlock;
/* check name table first, to see if bo is already open: */
bo = lookup_bo(dev->name_table, name);
if (bo)
goto out_unlock;
if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
ERROR_MSG("gem-open failed: %s", strerror(errno));
goto out_unlock;
}
if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
ERROR_MSG("gem-open failed: %s", strerror(errno));
goto out_unlock;
}
bo = lookup_bo(dev->handle_table, req.handle);
if (bo)
goto out_unlock;
bo = lookup_bo(dev->handle_table, req.handle);
if (bo)
goto out_unlock;
bo = bo_from_handle(dev, req.size, req.handle);
if (bo) {
set_name(bo, name);
VG_BO_ALLOC(bo);
}
bo = bo_from_handle(dev, req.size, req.handle);
if (bo) {
set_name(bo, name);
VG_BO_ALLOC(bo);
}
out_unlock:
simple_mtx_unlock(&table_lock);
simple_mtx_unlock(&table_lock);
return bo;
return bo;
}
void
fd_bo_mark_for_dump(struct fd_bo *bo)
{
bo->flags |= FD_RELOC_DUMP;
bo->flags |= FD_RELOC_DUMP;
}
uint64_t fd_bo_get_iova(struct fd_bo *bo)
uint64_t
fd_bo_get_iova(struct fd_bo *bo)
{
/* ancient kernels did not support this */
assert(bo->iova != 0);
return bo->iova;
/* ancient kernels did not support this */
assert(bo->iova != 0);
return bo->iova;
}
struct fd_bo * fd_bo_ref(struct fd_bo *bo)
struct fd_bo *
fd_bo_ref(struct fd_bo *bo)
{
p_atomic_inc(&bo->refcnt);
return bo;
p_atomic_inc(&bo->refcnt);
return bo;
}
void fd_bo_del(struct fd_bo *bo)
void
fd_bo_del(struct fd_bo *bo)
{
struct fd_device *dev = bo->dev;
struct fd_device *dev = bo->dev;
if (!p_atomic_dec_zero(&bo->refcnt))
return;
if (!p_atomic_dec_zero(&bo->refcnt))
return;
simple_mtx_lock(&table_lock);
simple_mtx_lock(&table_lock);
if ((bo->bo_reuse == BO_CACHE) && (fd_bo_cache_free(&dev->bo_cache, bo) == 0))
goto out;
if ((bo->bo_reuse == RING_CACHE) && (fd_bo_cache_free(&dev->ring_cache, bo) == 0))
goto out;
if ((bo->bo_reuse == BO_CACHE) &&
(fd_bo_cache_free(&dev->bo_cache, bo) == 0))
goto out;
if ((bo->bo_reuse == RING_CACHE) &&
(fd_bo_cache_free(&dev->ring_cache, bo) == 0))
goto out;
bo_del(bo);
bo_del(bo);
out:
simple_mtx_unlock(&table_lock);
simple_mtx_unlock(&table_lock);
}
/* Called under table_lock */
void bo_del(struct fd_bo *bo)
void
bo_del(struct fd_bo *bo)
{
VG_BO_FREE(bo);
VG_BO_FREE(bo);
simple_mtx_assert_locked(&table_lock);
simple_mtx_assert_locked(&table_lock);
if (bo->map)
os_munmap(bo->map, bo->size);
if (bo->map)
os_munmap(bo->map, bo->size);
/* TODO probably bo's in bucket list get removed from
* handle table??
*/
/* TODO probably bo's in bucket list get removed from
* handle table??
*/
if (bo->handle) {
struct drm_gem_close req = {
.handle = bo->handle,
};
_mesa_hash_table_remove_key(bo->dev->handle_table, &bo->handle);
if (bo->name)
_mesa_hash_table_remove_key(bo->dev->name_table, &bo->name);
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
if (bo->handle) {
struct drm_gem_close req = {
.handle = bo->handle,
};
_mesa_hash_table_remove_key(bo->dev->handle_table, &bo->handle);
if (bo->name)
_mesa_hash_table_remove_key(bo->dev->name_table, &bo->name);
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
bo->funcs->destroy(bo);
bo->funcs->destroy(bo);
}
int fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
int
fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
{
if (!bo->name) {
struct drm_gem_flink req = {
.handle = bo->handle,
};
int ret;
if (!bo->name) {
struct drm_gem_flink req = {
.handle = bo->handle,
};
int ret;
ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
if (ret) {
return ret;
}
ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
if (ret) {
return ret;
}
simple_mtx_lock(&table_lock);
set_name(bo, req.name);
simple_mtx_unlock(&table_lock);
bo->bo_reuse = NO_CACHE;
}
simple_mtx_lock(&table_lock);
set_name(bo, req.name);
simple_mtx_unlock(&table_lock);
bo->bo_reuse = NO_CACHE;
}
*name = bo->name;
*name = bo->name;
return 0;
return 0;
}
uint32_t fd_bo_handle(struct fd_bo *bo)
uint32_t
fd_bo_handle(struct fd_bo *bo)
{
bo->bo_reuse = NO_CACHE;
return bo->handle;
bo->bo_reuse = NO_CACHE;
return bo->handle;
}
int fd_bo_dmabuf(struct fd_bo *bo)
int
fd_bo_dmabuf(struct fd_bo *bo)
{
int ret, prime_fd;
int ret, prime_fd;
ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
&prime_fd);
if (ret) {
ERROR_MSG("failed to get dmabuf fd: %d", ret);
return ret;
}
ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, &prime_fd);
if (ret) {
ERROR_MSG("failed to get dmabuf fd: %d", ret);
return ret;
}
bo->bo_reuse = NO_CACHE;
bo->bo_reuse = NO_CACHE;
return prime_fd;
return prime_fd;
}
uint32_t fd_bo_size(struct fd_bo *bo)
uint32_t
fd_bo_size(struct fd_bo *bo)
{
return bo->size;
return bo->size;
}
void * fd_bo_map(struct fd_bo *bo)
void *
fd_bo_map(struct fd_bo *bo)
{
if (!bo->map) {
uint64_t offset;
int ret;
if (!bo->map) {
uint64_t offset;
int ret;
ret = bo->funcs->offset(bo, &offset);
if (ret) {
return NULL;
}
ret = bo->funcs->offset(bo, &offset);
if (ret) {
return NULL;
}
bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
bo->dev->fd, offset);
if (bo->map == MAP_FAILED) {
ERROR_MSG("mmap failed: %s", strerror(errno));
bo->map = NULL;
}
}
return bo->map;
bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
bo->dev->fd, offset);
if (bo->map == MAP_FAILED) {
ERROR_MSG("mmap failed: %s", strerror(errno));
bo->map = NULL;
}
}
return bo->map;
}
/* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */
int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
int
fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
{
return bo->funcs->cpu_prep(bo, pipe, op);
return bo->funcs->cpu_prep(bo, pipe, op);
}
void fd_bo_cpu_fini(struct fd_bo *bo)
void
fd_bo_cpu_fini(struct fd_bo *bo)
{
bo->funcs->cpu_fini(bo);
bo->funcs->cpu_fini(bo);
}

View file

@ -33,13 +33,13 @@ extern simple_mtx_t table_lock;
static void
add_bucket(struct fd_bo_cache *cache, int size)
{
unsigned int i = cache->num_buckets;
unsigned int i = cache->num_buckets;
assert(i < ARRAY_SIZE(cache->cache_bucket));
assert(i < ARRAY_SIZE(cache->cache_bucket));
list_inithead(&cache->cache_bucket[i].list);
cache->cache_bucket[i].size = size;
cache->num_buckets++;
list_inithead(&cache->cache_bucket[i].list);
cache->cache_bucket[i].size = size;
cache->num_buckets++;
}
/**
@ -49,165 +49,167 @@ add_bucket(struct fd_bo_cache *cache, int size)
void
fd_bo_cache_init(struct fd_bo_cache *cache, int coarse)
{
unsigned long size, cache_max_size = 64 * 1024 * 1024;
unsigned long size, cache_max_size = 64 * 1024 * 1024;
/* OK, so power of two buckets was too wasteful of memory.
* Give 3 other sizes between each power of two, to hopefully
* cover things accurately enough. (The alternative is
* probably to just go for exact matching of sizes, and assume
* that for things like composited window resize the tiled
* width/height alignment and rounding of sizes to pages will
* get us useful cache hit rates anyway)
*/
add_bucket(cache, 4096);
add_bucket(cache, 4096 * 2);
if (!coarse)
add_bucket(cache, 4096 * 3);
/* OK, so power of two buckets was too wasteful of memory.
* Give 3 other sizes between each power of two, to hopefully
* cover things accurately enough. (The alternative is
* probably to just go for exact matching of sizes, and assume
* that for things like composited window resize the tiled
* width/height alignment and rounding of sizes to pages will
* get us useful cache hit rates anyway)
*/
add_bucket(cache, 4096);
add_bucket(cache, 4096 * 2);
if (!coarse)
add_bucket(cache, 4096 * 3);
/* Initialize the linked lists for BO reuse cache. */
for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
add_bucket(cache, size);
if (!coarse) {
add_bucket(cache, size + size * 1 / 4);
add_bucket(cache, size + size * 2 / 4);
add_bucket(cache, size + size * 3 / 4);
}
}
/* Initialize the linked lists for BO reuse cache. */
for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
add_bucket(cache, size);
if (!coarse) {
add_bucket(cache, size + size * 1 / 4);
add_bucket(cache, size + size * 2 / 4);
add_bucket(cache, size + size * 3 / 4);
}
}
}
/* Frees older cached buffers. Called under table_lock */
void
fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
{
int i;
int i;
if (cache->time == time)
return;
if (cache->time == time)
return;
for (i = 0; i < cache->num_buckets; i++) {
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
struct fd_bo *bo;
for (i = 0; i < cache->num_buckets; i++) {
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
struct fd_bo *bo;
while (!list_is_empty(&bucket->list)) {
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
while (!list_is_empty(&bucket->list)) {
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
/* keep things in cache for at least 1 second: */
if (time && ((time - bo->free_time) <= 1))
break;
/* keep things in cache for at least 1 second: */
if (time && ((time - bo->free_time) <= 1))
break;
VG_BO_OBTAIN(bo);
list_del(&bo->list);
bo_del(bo);
}
}
VG_BO_OBTAIN(bo);
list_del(&bo->list);
bo_del(bo);
}
}
cache->time = time;
cache->time = time;
}
static struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
static struct fd_bo_bucket *
get_bucket(struct fd_bo_cache *cache, uint32_t size)
{
int i;
int i;
/* hmm, this is what intel does, but I suppose we could calculate our
* way to the correct bucket size rather than looping..
*/
for (i = 0; i < cache->num_buckets; i++) {
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
if (bucket->size >= size) {
return bucket;
}
}
/* hmm, this is what intel does, but I suppose we could calculate our
* way to the correct bucket size rather than looping..
*/
for (i = 0; i < cache->num_buckets; i++) {
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
if (bucket->size >= size) {
return bucket;
}
}
return NULL;
return NULL;
}
static int is_idle(struct fd_bo *bo)
static int
is_idle(struct fd_bo *bo)
{
return fd_bo_cpu_prep(bo, NULL,
DRM_FREEDRENO_PREP_READ |
DRM_FREEDRENO_PREP_WRITE |
DRM_FREEDRENO_PREP_NOSYNC) == 0;
return fd_bo_cpu_prep(bo, NULL,
DRM_FREEDRENO_PREP_READ | DRM_FREEDRENO_PREP_WRITE |
DRM_FREEDRENO_PREP_NOSYNC) == 0;
}
static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
static struct fd_bo *
find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
{
struct fd_bo *bo = NULL;
struct fd_bo *bo = NULL;
/* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
* skip the busy check.. if it is only going to be a render target
* then we probably don't need to stall..
*
* NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
* (MRU, since likely to be in GPU cache), rather than head (LRU)..
*/
simple_mtx_lock(&table_lock);
if (!list_is_empty(&bucket->list)) {
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
/* TODO check for compatible flags? */
if (is_idle(bo)) {
list_del(&bo->list);
} else {
bo = NULL;
}
}
simple_mtx_unlock(&table_lock);
/* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
* skip the busy check.. if it is only going to be a render target
* then we probably don't need to stall..
*
* NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
* (MRU, since likely to be in GPU cache), rather than head (LRU)..
*/
simple_mtx_lock(&table_lock);
if (!list_is_empty(&bucket->list)) {
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
/* TODO check for compatible flags? */
if (is_idle(bo)) {
list_del(&bo->list);
} else {
bo = NULL;
}
}
simple_mtx_unlock(&table_lock);
return bo;
return bo;
}
/* NOTE: size is potentially rounded up to bucket size: */
struct fd_bo *
fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
{
struct fd_bo *bo = NULL;
struct fd_bo_bucket *bucket;
struct fd_bo *bo = NULL;
struct fd_bo_bucket *bucket;
*size = align(*size, 4096);
bucket = get_bucket(cache, *size);
*size = align(*size, 4096);
bucket = get_bucket(cache, *size);
/* see if we can be green and recycle: */
/* see if we can be green and recycle: */
retry:
if (bucket) {
*size = bucket->size;
bo = find_in_bucket(bucket, flags);
if (bo) {
VG_BO_OBTAIN(bo);
if (bo->funcs->madvise(bo, true) <= 0) {
/* we've lost the backing pages, delete and try again: */
simple_mtx_lock(&table_lock);
bo_del(bo);
simple_mtx_unlock(&table_lock);
goto retry;
}
p_atomic_set(&bo->refcnt, 1);
bo->flags = FD_RELOC_FLAGS_INIT;
return bo;
}
}
if (bucket) {
*size = bucket->size;
bo = find_in_bucket(bucket, flags);
if (bo) {
VG_BO_OBTAIN(bo);
if (bo->funcs->madvise(bo, true) <= 0) {
/* we've lost the backing pages, delete and try again: */
simple_mtx_lock(&table_lock);
bo_del(bo);
simple_mtx_unlock(&table_lock);
goto retry;
}
p_atomic_set(&bo->refcnt, 1);
bo->flags = FD_RELOC_FLAGS_INIT;
return bo;
}
}
return NULL;
return NULL;
}
int
fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
{
struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
/* see if we can be green and recycle: */
if (bucket) {
struct timespec time;
/* see if we can be green and recycle: */
if (bucket) {
struct timespec time;
bo->funcs->madvise(bo, false);
bo->funcs->madvise(bo, false);
clock_gettime(CLOCK_MONOTONIC, &time);
clock_gettime(CLOCK_MONOTONIC, &time);
bo->free_time = time.tv_sec;
VG_BO_RELEASE(bo);
list_addtail(&bo->list, &bucket->list);
fd_bo_cache_cleanup(cache, time.tv_sec);
bo->free_time = time.tv_sec;
VG_BO_RELEASE(bo);
list_addtail(&bo->list, &bucket->list);
fd_bo_cache_cleanup(cache, time.tv_sec);
return 0;
}
return 0;
}
return -1;
return -1;
}

View file

@ -24,142 +24,154 @@
* Rob Clark <robclark@freedesktop.org>
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "util/os_file.h"
#include "freedreno_drmif.h"
#include "freedreno_priv.h"
struct fd_device * kgsl_device_new(int fd);
struct fd_device * msm_device_new(int fd);
struct fd_device *kgsl_device_new(int fd);
struct fd_device *msm_device_new(int fd);
struct fd_device * fd_device_new(int fd)
struct fd_device *
fd_device_new(int fd)
{
struct fd_device *dev;
drmVersionPtr version;
struct fd_device *dev;
drmVersionPtr version;
/* figure out if we are kgsl or msm drm driver: */
version = drmGetVersion(fd);
if (!version) {
ERROR_MSG("cannot get version: %s", strerror(errno));
return NULL;
}
/* figure out if we are kgsl or msm drm driver: */
version = drmGetVersion(fd);
if (!version) {
ERROR_MSG("cannot get version: %s", strerror(errno));
return NULL;
}
if (!strcmp(version->name, "msm")) {
DEBUG_MSG("msm DRM device");
if (version->version_major != 1) {
ERROR_MSG("unsupported version: %u.%u.%u", version->version_major,
version->version_minor, version->version_patchlevel);
dev = NULL;
goto out;
}
if (!strcmp(version->name, "msm")) {
DEBUG_MSG("msm DRM device");
if (version->version_major != 1) {
ERROR_MSG("unsupported version: %u.%u.%u", version->version_major,
version->version_minor, version->version_patchlevel);
dev = NULL;
goto out;
}
dev = msm_device_new(fd);
dev->version = version->version_minor;
dev = msm_device_new(fd);
dev->version = version->version_minor;
#if HAVE_FREEDRENO_KGSL
} else if (!strcmp(version->name, "kgsl")) {
DEBUG_MSG("kgsl DRM device");
dev = kgsl_device_new(fd);
} else if (!strcmp(version->name, "kgsl")) {
DEBUG_MSG("kgsl DRM device");
dev = kgsl_device_new(fd);
#endif
} else {
ERROR_MSG("unknown device: %s", version->name);
dev = NULL;
}
} else {
ERROR_MSG("unknown device: %s", version->name);
dev = NULL;
}
out:
drmFreeVersion(version);
drmFreeVersion(version);
if (!dev)
return NULL;
if (!dev)
return NULL;
p_atomic_set(&dev->refcnt, 1);
dev->fd = fd;
dev->handle_table = _mesa_hash_table_create(NULL, _mesa_hash_u32, _mesa_key_u32_equal);
dev->name_table = _mesa_hash_table_create(NULL, _mesa_hash_u32, _mesa_key_u32_equal);
fd_bo_cache_init(&dev->bo_cache, false);
fd_bo_cache_init(&dev->ring_cache, true);
p_atomic_set(&dev->refcnt, 1);
dev->fd = fd;
dev->handle_table =
_mesa_hash_table_create(NULL, _mesa_hash_u32, _mesa_key_u32_equal);
dev->name_table =
_mesa_hash_table_create(NULL, _mesa_hash_u32, _mesa_key_u32_equal);
fd_bo_cache_init(&dev->bo_cache, false);
fd_bo_cache_init(&dev->ring_cache, true);
return dev;
return dev;
}
/* like fd_device_new() but creates it's own private dup() of the fd
* which is close()d when the device is finalized.
*/
struct fd_device * fd_device_new_dup(int fd)
struct fd_device *
fd_device_new_dup(int fd)
{
int dup_fd = os_dupfd_cloexec(fd);
struct fd_device *dev = fd_device_new(dup_fd);
if (dev)
dev->closefd = 1;
else
close(dup_fd);
return dev;
int dup_fd = os_dupfd_cloexec(fd);
struct fd_device *dev = fd_device_new(dup_fd);
if (dev)
dev->closefd = 1;
else
close(dup_fd);
return dev;
}
struct fd_device * fd_device_ref(struct fd_device *dev)
struct fd_device *
fd_device_ref(struct fd_device *dev)
{
p_atomic_inc(&dev->refcnt);
return dev;
p_atomic_inc(&dev->refcnt);
return dev;
}
static void fd_device_del_impl(struct fd_device *dev)
static void
fd_device_del_impl(struct fd_device *dev)
{
int close_fd = dev->closefd ? dev->fd : -1;
int close_fd = dev->closefd ? dev->fd : -1;
simple_mtx_assert_locked(&table_lock);
simple_mtx_assert_locked(&table_lock);
fd_bo_cache_cleanup(&dev->bo_cache, 0);
fd_bo_cache_cleanup(&dev->ring_cache, 0);
_mesa_hash_table_destroy(dev->handle_table, NULL);
_mesa_hash_table_destroy(dev->name_table, NULL);
dev->funcs->destroy(dev);
if (close_fd >= 0)
close(close_fd);
fd_bo_cache_cleanup(&dev->bo_cache, 0);
fd_bo_cache_cleanup(&dev->ring_cache, 0);
_mesa_hash_table_destroy(dev->handle_table, NULL);
_mesa_hash_table_destroy(dev->name_table, NULL);
dev->funcs->destroy(dev);
if (close_fd >= 0)
close(close_fd);
}
void fd_device_del_locked(struct fd_device *dev)
void
fd_device_del_locked(struct fd_device *dev)
{
if (!p_atomic_dec_zero(&dev->refcnt))
return;
fd_device_del_impl(dev);
if (!p_atomic_dec_zero(&dev->refcnt))
return;
fd_device_del_impl(dev);
}
void fd_device_del(struct fd_device *dev)
void
fd_device_del(struct fd_device *dev)
{
if (!p_atomic_dec_zero(&dev->refcnt))
return;
simple_mtx_lock(&table_lock);
fd_device_del_impl(dev);
simple_mtx_unlock(&table_lock);
if (!p_atomic_dec_zero(&dev->refcnt))
return;
simple_mtx_lock(&table_lock);
fd_device_del_impl(dev);
simple_mtx_unlock(&table_lock);
}
int fd_device_fd(struct fd_device *dev)
int
fd_device_fd(struct fd_device *dev)
{
return dev->fd;
return dev->fd;
}
enum fd_version fd_device_version(struct fd_device *dev)
enum fd_version
fd_device_version(struct fd_device *dev)
{
return dev->version;
return dev->version;
}
bool fd_dbg(void)
bool
fd_dbg(void)
{
static int dbg;
static int dbg;
if (!dbg)
dbg = getenv("LIBGL_DEBUG") ? 1 : -1;
if (!dbg)
dbg = getenv("LIBGL_DEBUG") ? 1 : -1;
return dbg == 1;
return dbg == 1;
}
bool fd_has_syncobj(struct fd_device *dev)
bool
fd_has_syncobj(struct fd_device *dev)
{
uint64_t value;
if (drmGetCap(dev->fd, DRM_CAP_SYNCOBJ, &value))
return false;
return value && dev->version >= FD_VERSION_FENCE_FD;
uint64_t value;
if (drmGetCap(dev->fd, DRM_CAP_SYNCOBJ, &value))
return false;
return value && dev->version >= FD_VERSION_FENCE_FD;
}

View file

@ -40,65 +40,65 @@ struct fd_pipe;
struct fd_device;
enum fd_pipe_id {
FD_PIPE_3D = 1,
FD_PIPE_2D = 2,
/* some devices have two 2d blocks.. not really sure how to
* use that yet, so just ignoring the 2nd 2d pipe for now
*/
FD_PIPE_MAX
FD_PIPE_3D = 1,
FD_PIPE_2D = 2,
/* some devices have two 2d blocks.. not really sure how to
* use that yet, so just ignoring the 2nd 2d pipe for now
*/
FD_PIPE_MAX
};
enum fd_param_id {
FD_DEVICE_ID,
FD_GMEM_SIZE,
FD_GMEM_BASE,
FD_GPU_ID,
FD_CHIP_ID,
FD_MAX_FREQ,
FD_TIMESTAMP,
FD_NR_RINGS, /* # of rings == # of distinct priority levels */
FD_PP_PGTABLE, /* are per-process pagetables used for the pipe/ctx */
FD_CTX_FAULTS, /* # of per context faults */
FD_GLOBAL_FAULTS, /* # of global (all context) faults */
FD_DEVICE_ID,
FD_GMEM_SIZE,
FD_GMEM_BASE,
FD_GPU_ID,
FD_CHIP_ID,
FD_MAX_FREQ,
FD_TIMESTAMP,
FD_NR_RINGS, /* # of rings == # of distinct priority levels */
FD_PP_PGTABLE, /* are per-process pagetables used for the pipe/ctx */
FD_CTX_FAULTS, /* # of per context faults */
FD_GLOBAL_FAULTS, /* # of global (all context) faults */
};
/* bo flags: */
#define DRM_FREEDRENO_GEM_TYPE_SMI 0x00000001
#define DRM_FREEDRENO_GEM_TYPE_KMEM 0x00000002
#define DRM_FREEDRENO_GEM_TYPE_MEM_MASK 0x0000000f
#define DRM_FREEDRENO_GEM_CACHE_NONE 0x00000000
#define DRM_FREEDRENO_GEM_CACHE_WCOMBINE 0x00100000
#define DRM_FREEDRENO_GEM_CACHE_WTHROUGH 0x00200000
#define DRM_FREEDRENO_GEM_CACHE_WBACK 0x00400000
#define DRM_FREEDRENO_GEM_CACHE_WBACKWA 0x00800000
#define DRM_FREEDRENO_GEM_CACHE_MASK 0x00f00000
#define DRM_FREEDRENO_GEM_GPUREADONLY 0x01000000
#define DRM_FREEDRENO_GEM_SCANOUT 0x02000000
#define DRM_FREEDRENO_GEM_TYPE_SMI 0x00000001
#define DRM_FREEDRENO_GEM_TYPE_KMEM 0x00000002
#define DRM_FREEDRENO_GEM_TYPE_MEM_MASK 0x0000000f
#define DRM_FREEDRENO_GEM_CACHE_NONE 0x00000000
#define DRM_FREEDRENO_GEM_CACHE_WCOMBINE 0x00100000
#define DRM_FREEDRENO_GEM_CACHE_WTHROUGH 0x00200000
#define DRM_FREEDRENO_GEM_CACHE_WBACK 0x00400000
#define DRM_FREEDRENO_GEM_CACHE_WBACKWA 0x00800000
#define DRM_FREEDRENO_GEM_CACHE_MASK 0x00f00000
#define DRM_FREEDRENO_GEM_GPUREADONLY 0x01000000
#define DRM_FREEDRENO_GEM_SCANOUT 0x02000000
/* bo access flags: (keep aligned to MSM_PREP_x) */
#define DRM_FREEDRENO_PREP_READ 0x01
#define DRM_FREEDRENO_PREP_WRITE 0x02
#define DRM_FREEDRENO_PREP_NOSYNC 0x04
#define DRM_FREEDRENO_PREP_READ 0x01
#define DRM_FREEDRENO_PREP_WRITE 0x02
#define DRM_FREEDRENO_PREP_NOSYNC 0x04
/* device functions:
*/
struct fd_device * fd_device_new(int fd);
struct fd_device * fd_device_new_dup(int fd);
struct fd_device * fd_device_ref(struct fd_device *dev);
struct fd_device *fd_device_new(int fd);
struct fd_device *fd_device_new_dup(int fd);
struct fd_device *fd_device_ref(struct fd_device *dev);
void fd_device_del(struct fd_device *dev);
int fd_device_fd(struct fd_device *dev);
enum fd_version {
FD_VERSION_MADVISE = 1, /* kernel supports madvise */
FD_VERSION_UNLIMITED_CMDS = 1, /* submits w/ >4 cmd buffers (growable ringbuffer) */
FD_VERSION_FENCE_FD = 2, /* submit command supports in/out fences */
FD_VERSION_GMEM_BASE = 3, /* supports querying GMEM base address */
FD_VERSION_SUBMIT_QUEUES = 3, /* submit queues and multiple priority levels */
FD_VERSION_BO_IOVA = 3, /* supports fd_bo_get/put_iova() */
FD_VERSION_SOFTPIN = 4, /* adds softpin, bo name, and dump flag */
FD_VERSION_ROBUSTNESS = 5, /* adds FD_NR_FAULTS and FD_PP_PGTABLE */
FD_VERSION_MEMORY_FD = 2, /* supports shared memory objects */
FD_VERSION_MADVISE = 1, /* kernel supports madvise */
FD_VERSION_UNLIMITED_CMDS = 1, /* submits w/ >4 cmd buffers (growable ringbuffer) */
FD_VERSION_FENCE_FD = 2, /* submit command supports in/out fences */
FD_VERSION_GMEM_BASE = 3, /* supports querying GMEM base address */
FD_VERSION_SUBMIT_QUEUES = 3, /* submit queues and multiple priority levels */
FD_VERSION_BO_IOVA = 3, /* supports fd_bo_get/put_iova() */
FD_VERSION_SOFTPIN = 4, /* adds softpin, bo name, and dump flag */
FD_VERSION_ROBUSTNESS = 5, /* adds FD_NR_FAULTS and FD_PP_PGTABLE */
FD_VERSION_MEMORY_FD = 2, /* supports shared memory objects */
};
enum fd_version fd_device_version(struct fd_device *dev);
@ -107,72 +107,71 @@ bool fd_has_syncobj(struct fd_device *dev);
/* pipe functions:
*/
struct fd_pipe * fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id);
struct fd_pipe * fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio);
struct fd_pipe * fd_pipe_ref(struct fd_pipe *pipe);
struct fd_pipe *fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id);
struct fd_pipe *fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id,
uint32_t prio);
struct fd_pipe *fd_pipe_ref(struct fd_pipe *pipe);
void fd_pipe_del(struct fd_pipe *pipe);
int fd_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
uint64_t *value);
uint64_t *value);
int fd_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp);
/* timeout in nanosec */
int fd_pipe_wait_timeout(struct fd_pipe *pipe, uint32_t timestamp,
uint64_t timeout);
uint64_t timeout);
/* buffer-object functions:
*/
struct fd_bo * _fd_bo_new(struct fd_device *dev,
uint32_t size, uint32_t flags);
struct fd_bo *_fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags);
void _fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap);
static inline void
fd_bo_set_name(struct fd_bo *bo, const char *fmt, ...) _util_printf_format(2, 3);
static inline void fd_bo_set_name(struct fd_bo *bo, const char *fmt, ...)
_util_printf_format(2, 3);
static inline void
fd_bo_set_name(struct fd_bo *bo, const char *fmt, ...)
{
#ifndef NDEBUG
va_list ap;
va_start(ap, fmt);
_fd_bo_set_name(bo, fmt, ap);
va_end(ap);
va_list ap;
va_start(ap, fmt);
_fd_bo_set_name(bo, fmt, ap);
va_end(ap);
#endif
}
static inline struct fd_bo *
fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
const char *fmt, ...) _util_printf_format(4, 5);
static inline struct fd_bo *fd_bo_new(struct fd_device *dev, uint32_t size,
uint32_t flags, const char *fmt, ...)
_util_printf_format(4, 5);
static inline struct fd_bo *
fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags,
const char *fmt, ...)
fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags, const char *fmt,
...)
{
struct fd_bo *bo = _fd_bo_new(dev, size, flags);
struct fd_bo *bo = _fd_bo_new(dev, size, flags);
#ifndef NDEBUG
if (fmt) {
va_list ap;
va_start(ap, fmt);
_fd_bo_set_name(bo, fmt, ap);
va_end(ap);
}
if (fmt) {
va_list ap;
va_start(ap, fmt);
_fd_bo_set_name(bo, fmt, ap);
va_end(ap);
}
#endif
return bo;
return bo;
}
struct fd_bo *fd_bo_from_handle(struct fd_device *dev,
uint32_t handle, uint32_t size);
struct fd_bo * fd_bo_from_name(struct fd_device *dev, uint32_t name);
struct fd_bo * fd_bo_from_dmabuf(struct fd_device *dev, int fd);
struct fd_bo *fd_bo_from_handle(struct fd_device *dev, uint32_t handle,
uint32_t size);
struct fd_bo *fd_bo_from_name(struct fd_device *dev, uint32_t name);
struct fd_bo *fd_bo_from_dmabuf(struct fd_device *dev, int fd);
void fd_bo_mark_for_dump(struct fd_bo *bo);
uint64_t fd_bo_get_iova(struct fd_bo *bo);
struct fd_bo * fd_bo_ref(struct fd_bo *bo);
struct fd_bo *fd_bo_ref(struct fd_bo *bo);
void fd_bo_del(struct fd_bo *bo);
int fd_bo_get_name(struct fd_bo *bo, uint32_t *name);
uint32_t fd_bo_handle(struct fd_bo *bo);
int fd_bo_dmabuf(struct fd_bo *bo);
uint32_t fd_bo_size(struct fd_bo *bo);
void * fd_bo_map(struct fd_bo *bo);
void *fd_bo_map(struct fd_bo *bo);
int fd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
void fd_bo_cpu_fini(struct fd_bo *bo);

View file

@ -34,67 +34,70 @@
struct fd_pipe *
fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
{
struct fd_pipe *pipe;
uint64_t val;
struct fd_pipe *pipe;
uint64_t val;
if (id > FD_PIPE_MAX) {
ERROR_MSG("invalid pipe id: %d", id);
return NULL;
}
if (id > FD_PIPE_MAX) {
ERROR_MSG("invalid pipe id: %d", id);
return NULL;
}
if ((prio != 1) && (fd_device_version(dev) < FD_VERSION_SUBMIT_QUEUES)) {
ERROR_MSG("invalid priority!");
return NULL;
}
if ((prio != 1) && (fd_device_version(dev) < FD_VERSION_SUBMIT_QUEUES)) {
ERROR_MSG("invalid priority!");
return NULL;
}
pipe = dev->funcs->pipe_new(dev, id, prio);
if (!pipe) {
ERROR_MSG("allocation failed");
return NULL;
}
pipe = dev->funcs->pipe_new(dev, id, prio);
if (!pipe) {
ERROR_MSG("allocation failed");
return NULL;
}
pipe->dev = dev;
pipe->id = id;
p_atomic_set(&pipe->refcnt, 1);
pipe->dev = dev;
pipe->id = id;
p_atomic_set(&pipe->refcnt, 1);
fd_pipe_get_param(pipe, FD_GPU_ID, &val);
pipe->gpu_id = val;
fd_pipe_get_param(pipe, FD_GPU_ID, &val);
pipe->gpu_id = val;
return pipe;
return pipe;
}
struct fd_pipe *
fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
{
return fd_pipe_new2(dev, id, 1);
return fd_pipe_new2(dev, id, 1);
}
struct fd_pipe * fd_pipe_ref(struct fd_pipe *pipe)
struct fd_pipe *
fd_pipe_ref(struct fd_pipe *pipe)
{
p_atomic_inc(&pipe->refcnt);
return pipe;
p_atomic_inc(&pipe->refcnt);
return pipe;
}
void fd_pipe_del(struct fd_pipe *pipe)
void
fd_pipe_del(struct fd_pipe *pipe)
{
if (!p_atomic_dec_zero(&pipe->refcnt))
return;
pipe->funcs->destroy(pipe);
if (!p_atomic_dec_zero(&pipe->refcnt))
return;
pipe->funcs->destroy(pipe);
}
int fd_pipe_get_param(struct fd_pipe *pipe,
enum fd_param_id param, uint64_t *value)
int
fd_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param, uint64_t *value)
{
return pipe->funcs->get_param(pipe, param, value);
return pipe->funcs->get_param(pipe, param, value);
}
int fd_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp)
int
fd_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp)
{
return fd_pipe_wait_timeout(pipe, timestamp, ~0);
return fd_pipe_wait_timeout(pipe, timestamp, ~0);
}
int fd_pipe_wait_timeout(struct fd_pipe *pipe, uint32_t timestamp,
uint64_t timeout)
int
fd_pipe_wait_timeout(struct fd_pipe *pipe, uint32_t timestamp, uint64_t timeout)
{
return pipe->funcs->wait(pipe, timestamp, timeout);
return pipe->funcs->wait(pipe, timestamp, timeout);
}

View file

@ -27,15 +27,14 @@
#ifndef FREEDRENO_PRIV_H_
#define FREEDRENO_PRIV_H_
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <stdio.h>
#include <xf86drm.h>
@ -43,10 +42,9 @@
#include "util/list.h"
#include "util/log.h"
#include "util/simple_mtx.h"
#include "util/u_debug.h"
#include "util/u_atomic.h"
#include "util/u_math.h"
#include "util/u_debug.h"
#include "util/u_math.h"
#include "freedreno_drmif.h"
#include "freedreno_ringbuffer.h"
@ -54,161 +52,173 @@
extern simple_mtx_t table_lock;
struct fd_device_funcs {
int (*bo_new_handle)(struct fd_device *dev, uint32_t size,
uint32_t flags, uint32_t *handle);
struct fd_bo * (*bo_from_handle)(struct fd_device *dev,
uint32_t size, uint32_t handle);
struct fd_pipe * (*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
unsigned prio);
void (*destroy)(struct fd_device *dev);
int (*bo_new_handle)(struct fd_device *dev, uint32_t size, uint32_t flags,
uint32_t *handle);
struct fd_bo *(*bo_from_handle)(struct fd_device *dev, uint32_t size,
uint32_t handle);
struct fd_pipe *(*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
unsigned prio);
void (*destroy)(struct fd_device *dev);
};
struct fd_bo_bucket {
uint32_t size;
struct list_head list;
uint32_t size;
struct list_head list;
};
struct fd_bo_cache {
struct fd_bo_bucket cache_bucket[14 * 4];
int num_buckets;
time_t time;
struct fd_bo_bucket cache_bucket[14 * 4];
int num_buckets;
time_t time;
};
struct fd_device {
int fd;
enum fd_version version;
int32_t refcnt;
int fd;
enum fd_version version;
int32_t refcnt;
/* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects:
*
* handle_table: maps handle to fd_bo
* name_table: maps flink name to fd_bo
*
* We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
* returns a new handle. So we need to figure out if the bo is already
* open in the process first, before calling gem-open.
*/
struct hash_table *handle_table, *name_table;
/* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects:
*
* handle_table: maps handle to fd_bo
* name_table: maps flink name to fd_bo
*
* We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
* returns a new handle. So we need to figure out if the bo is already
* open in the process first, before calling gem-open.
*/
struct hash_table *handle_table, *name_table;
const struct fd_device_funcs *funcs;
const struct fd_device_funcs *funcs;
struct fd_bo_cache bo_cache;
struct fd_bo_cache ring_cache;
struct fd_bo_cache bo_cache;
struct fd_bo_cache ring_cache;
int closefd; /* call close(fd) upon destruction */
int closefd; /* call close(fd) upon destruction */
/* just for valgrind: */
int bo_size;
/* just for valgrind: */
int bo_size;
};
void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
struct fd_bo * fd_bo_cache_alloc(struct fd_bo_cache *cache,
uint32_t *size, uint32_t flags);
struct fd_bo *fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size,
uint32_t flags);
int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
/* for where @table_lock is already held: */
void fd_device_del_locked(struct fd_device *dev);
struct fd_pipe_funcs {
struct fd_ringbuffer * (*ringbuffer_new_object)(struct fd_pipe *pipe, uint32_t size);
struct fd_submit * (*submit_new)(struct fd_pipe *pipe);
int (*get_param)(struct fd_pipe *pipe, enum fd_param_id param, uint64_t *value);
int (*wait)(struct fd_pipe *pipe, uint32_t timestamp, uint64_t timeout);
void (*destroy)(struct fd_pipe *pipe);
struct fd_ringbuffer *(*ringbuffer_new_object)(struct fd_pipe *pipe,
uint32_t size);
struct fd_submit *(*submit_new)(struct fd_pipe *pipe);
int (*get_param)(struct fd_pipe *pipe, enum fd_param_id param,
uint64_t *value);
int (*wait)(struct fd_pipe *pipe, uint32_t timestamp, uint64_t timeout);
void (*destroy)(struct fd_pipe *pipe);
};
struct fd_pipe {
struct fd_device *dev;
enum fd_pipe_id id;
uint32_t gpu_id;
int32_t refcnt;
const struct fd_pipe_funcs *funcs;
struct fd_device *dev;
enum fd_pipe_id id;
uint32_t gpu_id;
int32_t refcnt;
const struct fd_pipe_funcs *funcs;
};
struct fd_submit_funcs {
struct fd_ringbuffer * (*new_ringbuffer)(struct fd_submit *submit,
uint32_t size, enum fd_ringbuffer_flags flags);
int (*flush)(struct fd_submit *submit, int in_fence_fd,
int *out_fence_fd, uint32_t *out_fence);
void (*destroy)(struct fd_submit *submit);
struct fd_ringbuffer *(*new_ringbuffer)(struct fd_submit *submit,
uint32_t size,
enum fd_ringbuffer_flags flags);
int (*flush)(struct fd_submit *submit, int in_fence_fd, int *out_fence_fd,
uint32_t *out_fence);
void (*destroy)(struct fd_submit *submit);
};
struct fd_submit {
struct fd_pipe *pipe;
const struct fd_submit_funcs *funcs;
struct fd_pipe *pipe;
const struct fd_submit_funcs *funcs;
};
struct fd_bo_funcs {
int (*offset)(struct fd_bo *bo, uint64_t *offset);
int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
void (*cpu_fini)(struct fd_bo *bo);
int (*madvise)(struct fd_bo *bo, int willneed);
uint64_t (*iova)(struct fd_bo *bo);
void (*set_name)(struct fd_bo *bo, const char *fmt, va_list ap);
void (*destroy)(struct fd_bo *bo);
int (*offset)(struct fd_bo *bo, uint64_t *offset);
int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
void (*cpu_fini)(struct fd_bo *bo);
int (*madvise)(struct fd_bo *bo, int willneed);
uint64_t (*iova)(struct fd_bo *bo);
void (*set_name)(struct fd_bo *bo, const char *fmt, va_list ap);
void (*destroy)(struct fd_bo *bo);
};
struct fd_bo {
struct fd_device *dev;
uint32_t size;
uint32_t handle;
uint32_t name;
int32_t refcnt;
uint32_t flags; /* flags like FD_RELOC_DUMP to use for relocs to this BO */
uint64_t iova;
void *map;
const struct fd_bo_funcs *funcs;
struct fd_device *dev;
uint32_t size;
uint32_t handle;
uint32_t name;
int32_t refcnt;
uint32_t flags; /* flags like FD_RELOC_DUMP to use for relocs to this BO */
uint64_t iova;
void *map;
const struct fd_bo_funcs *funcs;
enum {
NO_CACHE = 0,
BO_CACHE = 1,
RING_CACHE = 2,
} bo_reuse;
enum {
NO_CACHE = 0,
BO_CACHE = 1,
RING_CACHE = 2,
} bo_reuse;
struct list_head list; /* bucket-list entry */
time_t free_time; /* time when added to bucket-list */
struct list_head list; /* bucket-list entry */
time_t free_time; /* time when added to bucket-list */
};
struct fd_bo *fd_bo_new_ring(struct fd_device *dev, uint32_t size);
#define enable_debug 0 /* TODO make dynamic */
#define enable_debug 0 /* TODO make dynamic */
bool fd_dbg(void);
#define INFO_MSG(fmt, ...) \
do { if (fd_dbg()) mesa_logi("%s:%d: "fmt, \
__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
#define DEBUG_MSG(fmt, ...) \
do if (enable_debug) { mesa_logd("%s:%d: "fmt, \
__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
#define WARN_MSG(fmt, ...) \
do { mesa_logw("%s:%d: "fmt, \
__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
#define ERROR_MSG(fmt, ...) \
do { mesa_loge("%s:%d: " fmt, \
__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
#define INFO_MSG(fmt, ...) \
do { \
if (fd_dbg()) \
mesa_logi("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
} while (0)
#define DEBUG_MSG(fmt, ...) \
do \
if (enable_debug) { \
mesa_logd("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
} \
while (0)
#define WARN_MSG(fmt, ...) \
do { \
mesa_logw("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
} while (0)
#define ERROR_MSG(fmt, ...) \
do { \
mesa_loge("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
} while (0)
#define U642VOID(x) ((void *)(unsigned long)(x))
#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
#if HAVE_VALGRIND
# include <memcheck.h>
#include <memcheck.h>
/*
* For tracking the backing memory (if valgrind enabled, we force a mmap
* for the purposes of tracking)
*/
static inline void VG_BO_ALLOC(struct fd_bo *bo)
static inline void
VG_BO_ALLOC(struct fd_bo *bo)
{
if (bo && RUNNING_ON_VALGRIND) {
VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1);
}
if (bo && RUNNING_ON_VALGRIND) {
VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1);
}
}
static inline void VG_BO_FREE(struct fd_bo *bo)
static inline void
VG_BO_FREE(struct fd_bo *bo)
{
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
}
/*
@ -221,32 +231,47 @@ static inline void VG_BO_FREE(struct fd_bo *bo)
* valgrind doesn't squawk about list traversal.
*
*/
static inline void VG_BO_RELEASE(struct fd_bo *bo)
static inline void
VG_BO_RELEASE(struct fd_bo *bo)
{
if (RUNNING_ON_VALGRIND) {
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size);
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
}
if (RUNNING_ON_VALGRIND) {
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size);
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
}
}
static inline void VG_BO_OBTAIN(struct fd_bo *bo)
static inline void
VG_BO_OBTAIN(struct fd_bo *bo)
{
if (RUNNING_ON_VALGRIND) {
VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size);
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
}
if (RUNNING_ON_VALGRIND) {
VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size);
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
}
}
#else
static inline void VG_BO_ALLOC(struct fd_bo *bo) {}
static inline void VG_BO_FREE(struct fd_bo *bo) {}
static inline void VG_BO_RELEASE(struct fd_bo *bo) {}
static inline void VG_BO_OBTAIN(struct fd_bo *bo) {}
static inline void
VG_BO_ALLOC(struct fd_bo *bo)
{
}
static inline void
VG_BO_FREE(struct fd_bo *bo)
{
}
static inline void
VG_BO_RELEASE(struct fd_bo *bo)
{
}
static inline void
VG_BO_OBTAIN(struct fd_bo *bo)
{
}
#endif
#define FD_DEFINE_CAST(parent, child) \
static inline struct child * to_ ## child (struct parent *x) \
{ return (struct child *)x; }
#define FD_DEFINE_CAST(parent, child) \
static inline struct child *to_##child(struct parent *x) \
{ \
return (struct child *)x; \
}
#endif /* FREEDRENO_PRIV_H_ */

View file

@ -27,42 +27,42 @@
#include <assert.h>
#include "freedreno_drmif.h"
#include "freedreno_ringbuffer.h"
#include "freedreno_priv.h"
#include "freedreno_ringbuffer.h"
struct fd_submit *
fd_submit_new(struct fd_pipe *pipe)
{
return pipe->funcs->submit_new(pipe);
return pipe->funcs->submit_new(pipe);
}
void
fd_submit_del(struct fd_submit *submit)
{
return submit->funcs->destroy(submit);
return submit->funcs->destroy(submit);
}
int
fd_submit_flush(struct fd_submit *submit, int in_fence_fd, int *out_fence_fd,
uint32_t *out_fence)
uint32_t *out_fence)
{
return submit->funcs->flush(submit, in_fence_fd, out_fence_fd, out_fence);
return submit->funcs->flush(submit, in_fence_fd, out_fence_fd, out_fence);
}
struct fd_ringbuffer *
fd_submit_new_ringbuffer(struct fd_submit *submit, uint32_t size,
enum fd_ringbuffer_flags flags)
enum fd_ringbuffer_flags flags)
{
debug_assert(!(flags & _FD_RINGBUFFER_OBJECT));
if (flags & FD_RINGBUFFER_STREAMING) {
debug_assert(!(flags & FD_RINGBUFFER_GROWABLE));
debug_assert(!(flags & FD_RINGBUFFER_PRIMARY));
}
return submit->funcs->new_ringbuffer(submit, size, flags);
debug_assert(!(flags & _FD_RINGBUFFER_OBJECT));
if (flags & FD_RINGBUFFER_STREAMING) {
debug_assert(!(flags & FD_RINGBUFFER_GROWABLE));
debug_assert(!(flags & FD_RINGBUFFER_PRIMARY));
}
return submit->funcs->new_ringbuffer(submit, size, flags);
}
struct fd_ringbuffer *
fd_ringbuffer_new_object(struct fd_pipe *pipe, uint32_t size)
{
return pipe->funcs->ringbuffer_new_object(pipe, size);
return pipe->funcs->ringbuffer_new_object(pipe, size);
}

View file

@ -32,9 +32,9 @@
#include "util/u_debug.h"
#include "util/u_dynarray.h"
#include "freedreno_drmif.h"
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
#include "freedreno_drmif.h"
#ifdef __cplusplus
extern "C" {
@ -45,40 +45,40 @@ struct fd_ringbuffer;
enum fd_ringbuffer_flags {
/* Primary ringbuffer for a submit, ie. an IB1 level rb
* which kernel must setup RB->IB1 CP_INDIRECT_BRANCH
* packets.
*/
FD_RINGBUFFER_PRIMARY = 0x1,
/* Primary ringbuffer for a submit, ie. an IB1 level rb
* which kernel must setup RB->IB1 CP_INDIRECT_BRANCH
* packets.
*/
FD_RINGBUFFER_PRIMARY = 0x1,
/* Hint that the stateobj will be used for streaming state
* that is used once or a few times and then discarded.
*
* For sub-allocation, non streaming stateobj's should be
* sub-allocated from a page size buffer, so one long lived
* state obj doesn't prevent other pages from being freed.
* (Ie. it would be no worse than allocating a page sized
* bo for each small non-streaming stateobj).
*
* But streaming stateobj's could be sub-allocated from a
* larger buffer to reduce the alloc/del overhead.
*/
FD_RINGBUFFER_STREAMING = 0x2,
/* Hint that the stateobj will be used for streaming state
* that is used once or a few times and then discarded.
*
* For sub-allocation, non streaming stateobj's should be
* sub-allocated from a page size buffer, so one long lived
* state obj doesn't prevent other pages from being freed.
* (Ie. it would be no worse than allocating a page sized
* bo for each small non-streaming stateobj).
*
* But streaming stateobj's could be sub-allocated from a
* larger buffer to reduce the alloc/del overhead.
*/
FD_RINGBUFFER_STREAMING = 0x2,
/* Indicates that "growable" cmdstream can be used,
* consisting of multiple physical cmdstream buffers
*/
FD_RINGBUFFER_GROWABLE = 0x4,
/* Indicates that "growable" cmdstream can be used,
* consisting of multiple physical cmdstream buffers
*/
FD_RINGBUFFER_GROWABLE = 0x4,
/* Internal use only: */
_FD_RINGBUFFER_OBJECT = 0x8,
/* Internal use only: */
_FD_RINGBUFFER_OBJECT = 0x8,
};
/* A submit object manages/tracks all the state buildup for a "submit"
* ioctl to the kernel. Additionally, with the exception of long-lived
* non-STREAMING stateobj rb's, rb's are allocated from the submit.
*/
struct fd_submit * fd_submit_new(struct fd_pipe *pipe);
struct fd_submit *fd_submit_new(struct fd_pipe *pipe);
/* NOTE: all ringbuffer's create from the submit should be unref'd
* before destroying the submit.
@ -86,27 +86,26 @@ struct fd_submit * fd_submit_new(struct fd_pipe *pipe);
void fd_submit_del(struct fd_submit *submit);
/* Allocate a new rb from the submit. */
struct fd_ringbuffer * fd_submit_new_ringbuffer(struct fd_submit *submit,
uint32_t size, enum fd_ringbuffer_flags flags);
struct fd_ringbuffer *fd_submit_new_ringbuffer(struct fd_submit *submit,
uint32_t size,
enum fd_ringbuffer_flags flags);
/* in_fence_fd: -1 for no in-fence, else fence fd
* out_fence_fd: NULL for no output-fence requested, else ptr to return out-fence
*/
int fd_submit_flush(struct fd_submit *submit,
int in_fence_fd, int *out_fence_fd,
uint32_t *out_fence);
int fd_submit_flush(struct fd_submit *submit, int in_fence_fd,
int *out_fence_fd, uint32_t *out_fence);
struct fd_ringbuffer;
struct fd_reloc;
struct fd_ringbuffer_funcs {
void (*grow)(struct fd_ringbuffer *ring, uint32_t size);
void (*emit_reloc)(struct fd_ringbuffer *ring,
const struct fd_reloc *reloc);
uint32_t (*emit_reloc_ring)(struct fd_ringbuffer *ring,
struct fd_ringbuffer *target, uint32_t cmd_idx);
uint32_t (*cmd_count)(struct fd_ringbuffer *ring);
void (*destroy)(struct fd_ringbuffer *ring);
void (*grow)(struct fd_ringbuffer *ring, uint32_t size);
void (*emit_reloc)(struct fd_ringbuffer *ring, const struct fd_reloc *reloc);
uint32_t (*emit_reloc_ring)(struct fd_ringbuffer *ring,
struct fd_ringbuffer *target, uint32_t cmd_idx);
uint32_t (*cmd_count)(struct fd_ringbuffer *ring);
void (*destroy)(struct fd_ringbuffer *ring);
};
/* the ringbuffer object is not opaque so that OUT_RING() type stuff
@ -114,67 +113,65 @@ struct fd_ringbuffer_funcs {
* the size of this struct.
*/
struct fd_ringbuffer {
uint32_t *cur, *end, *start;
const struct fd_ringbuffer_funcs *funcs;
uint32_t *cur, *end, *start;
const struct fd_ringbuffer_funcs *funcs;
// size or end coudl probably go away
int size;
int32_t refcnt;
enum fd_ringbuffer_flags flags;
// size or end coudl probably go away
int size;
int32_t refcnt;
enum fd_ringbuffer_flags flags;
};
/* Allocate a new long-lived state object, not associated with
* a submit:
*/
struct fd_ringbuffer * fd_ringbuffer_new_object(struct fd_pipe *pipe,
uint32_t size);
struct fd_ringbuffer *fd_ringbuffer_new_object(struct fd_pipe *pipe,
uint32_t size);
static inline void
fd_ringbuffer_del(struct fd_ringbuffer *ring)
{
if (!p_atomic_dec_zero(&ring->refcnt))
return;
if (!p_atomic_dec_zero(&ring->refcnt))
return;
ring->funcs->destroy(ring);
ring->funcs->destroy(ring);
}
static inline
struct fd_ringbuffer *
static inline struct fd_ringbuffer *
fd_ringbuffer_ref(struct fd_ringbuffer *ring)
{
p_atomic_inc(&ring->refcnt);
return ring;
p_atomic_inc(&ring->refcnt);
return ring;
}
static inline void
fd_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t ndwords)
{
assert(ring->funcs->grow); /* unsupported on kgsl */
assert(ring->funcs->grow); /* unsupported on kgsl */
/* there is an upper bound on IB size, which appears to be 0x100000 */
if (ring->size < 0x100000)
ring->size *= 2;
/* there is an upper bound on IB size, which appears to be 0x100000 */
if (ring->size < 0x100000)
ring->size *= 2;
ring->funcs->grow(ring, ring->size);
ring->funcs->grow(ring, ring->size);
}
static inline void
fd_ringbuffer_emit(struct fd_ringbuffer *ring,
uint32_t data)
fd_ringbuffer_emit(struct fd_ringbuffer *ring, uint32_t data)
{
(*ring->cur++) = data;
(*ring->cur++) = data;
}
struct fd_reloc {
struct fd_bo *bo;
uint64_t iova;
#define FD_RELOC_READ 0x0001
#define FD_RELOC_WRITE 0x0002
#define FD_RELOC_DUMP 0x0004
uint32_t offset;
uint32_t orlo;
int32_t shift;
uint32_t orhi; /* used for a5xx+ */
struct fd_bo *bo;
uint64_t iova;
#define FD_RELOC_READ 0x0001
#define FD_RELOC_WRITE 0x0002
#define FD_RELOC_DUMP 0x0004
uint32_t offset;
uint32_t orlo;
int32_t shift;
uint32_t orhi; /* used for a5xx+ */
};
/* We always mark BOs for write, instead of tracking it across reloc
@ -190,42 +187,42 @@ struct fd_reloc {
/* NOTE: relocs are 2 dwords on a5xx+ */
static inline void
fd_ringbuffer_reloc(struct fd_ringbuffer *ring,
const struct fd_reloc *reloc)
fd_ringbuffer_reloc(struct fd_ringbuffer *ring, const struct fd_reloc *reloc)
{
ring->funcs->emit_reloc(ring, reloc);
ring->funcs->emit_reloc(ring, reloc);
}
static inline uint32_t
fd_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
{
if (!ring->funcs->cmd_count)
return 1;
return ring->funcs->cmd_count(ring);
if (!ring->funcs->cmd_count)
return 1;
return ring->funcs->cmd_count(ring);
}
static inline uint32_t
fd_ringbuffer_emit_reloc_ring_full(struct fd_ringbuffer *ring,
struct fd_ringbuffer *target, uint32_t cmd_idx)
struct fd_ringbuffer *target,
uint32_t cmd_idx)
{
return ring->funcs->emit_reloc_ring(ring, target, cmd_idx);
return ring->funcs->emit_reloc_ring(ring, target, cmd_idx);
}
static inline uint32_t
offset_bytes(void *end, void *start)
{
return ((char *)end) - ((char *)start);
return ((char *)end) - ((char *)start);
}
static inline uint32_t
fd_ringbuffer_size(struct fd_ringbuffer *ring)
{
/* only really needed for stateobj ringbuffers, and won't really
* do what you expect for growable rb's.. so lets just restrict
* this to stateobj's for now:
*/
debug_assert(!(ring->flags & FD_RINGBUFFER_GROWABLE));
return offset_bytes(ring->cur, ring->start);
/* only really needed for stateobj ringbuffers, and won't really
* do what you expect for growable rb's.. so lets just restrict
* this to stateobj's for now:
*/
debug_assert(!(ring->flags & FD_RINGBUFFER_GROWABLE));
return offset_bytes(ring->cur, ring->start);
}
#define LOG_DWORDS 0
@ -233,11 +230,11 @@ fd_ringbuffer_size(struct fd_ringbuffer *ring)
static inline void
OUT_RING(struct fd_ringbuffer *ring, uint32_t data)
{
if (LOG_DWORDS) {
fprintf(stderr, "ring[%p]: OUT_RING %04x: %08x", ring,
(uint32_t)(ring->cur - ring->start), data);
}
fd_ringbuffer_emit(ring, data);
if (LOG_DWORDS) {
fprintf(stderr, "ring[%p]: OUT_RING %04x: %08x", ring,
(uint32_t)(ring->cur - ring->start), data);
}
fd_ringbuffer_emit(ring, data);
}
/*
@ -245,66 +242,68 @@ OUT_RING(struct fd_ringbuffer *ring, uint32_t data)
*/
#ifndef __cplusplus
static inline void
OUT_RELOC(struct fd_ringbuffer *ring, struct fd_bo *bo,
uint32_t offset, uint64_t or, int32_t shift)
OUT_RELOC(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t offset,
uint64_t or, int32_t shift)
{
if (LOG_DWORDS) {
fprintf(stderr, "ring[%p]: OUT_RELOC %04x: %p+%u << %d", ring,
(uint32_t)(ring->cur - ring->start), bo, offset, shift);
}
debug_assert(offset < fd_bo_size(bo));
if (LOG_DWORDS) {
fprintf(stderr, "ring[%p]: OUT_RELOC %04x: %p+%u << %d", ring,
(uint32_t)(ring->cur - ring->start), bo, offset, shift);
}
debug_assert(offset < fd_bo_size(bo));
uint64_t iova = fd_bo_get_iova(bo) + offset;
uint64_t iova = fd_bo_get_iova(bo) + offset;
if (shift < 0)
iova >>= -shift;
else
iova <<= shift;
if (shift < 0)
iova >>= -shift;
else
iova <<= shift;
iova |= or;
iova |= or ;
fd_ringbuffer_reloc(ring, &(struct fd_reloc){
.bo = bo,
.iova = iova,
.offset = offset,
.orlo = or,
.shift = shift,
.orhi = or >> 32,
});
fd_ringbuffer_reloc(ring, &(struct fd_reloc){
.bo = bo,
.iova = iova,
.offset = offset,
.orlo = or
,
.shift = shift,
.orhi = or >> 32,
});
}
#endif
static inline void
OUT_RB(struct fd_ringbuffer *ring, struct fd_ringbuffer *target)
{
fd_ringbuffer_emit_reloc_ring_full(ring, target, 0);
fd_ringbuffer_emit_reloc_ring_full(ring, target, 0);
}
static inline void BEGIN_RING(struct fd_ringbuffer *ring, uint32_t ndwords)
static inline void
BEGIN_RING(struct fd_ringbuffer *ring, uint32_t ndwords)
{
if (unlikely(ring->cur + ndwords > ring->end))
fd_ringbuffer_grow(ring, ndwords);
if (unlikely(ring->cur + ndwords > ring->end))
fd_ringbuffer_grow(ring, ndwords);
}
static inline void
OUT_PKT0(struct fd_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
BEGIN_RING(ring, cnt+1);
OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
BEGIN_RING(ring, cnt + 1);
OUT_RING(ring, CP_TYPE0_PKT | ((cnt - 1) << 16) | (regindx & 0x7FFF));
}
static inline void
OUT_PKT2(struct fd_ringbuffer *ring)
{
BEGIN_RING(ring, 1);
OUT_RING(ring, CP_TYPE2_PKT);
BEGIN_RING(ring, 1);
OUT_RING(ring, CP_TYPE2_PKT);
}
static inline void
OUT_PKT3(struct fd_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
BEGIN_RING(ring, cnt+1);
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
BEGIN_RING(ring, cnt + 1);
OUT_RING(ring, CP_TYPE3_PKT | ((cnt - 1) << 16) | ((opcode & 0xFF) << 8));
}
/*
@ -314,47 +313,45 @@ OUT_PKT3(struct fd_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
static inline unsigned
_odd_parity_bit(unsigned val)
{
/* See: http://graphics.stanford.edu/~seander/bithacks.html#ParityParallel
* note that we want odd parity so 0x6996 is inverted.
*/
val ^= val >> 16;
val ^= val >> 8;
val ^= val >> 4;
val &= 0xf;
return (~0x6996 >> val) & 1;
/* See: http://graphics.stanford.edu/~seander/bithacks.html#ParityParallel
* note that we want odd parity so 0x6996 is inverted.
*/
val ^= val >> 16;
val ^= val >> 8;
val ^= val >> 4;
val &= 0xf;
return (~0x6996 >> val) & 1;
}
static inline void
OUT_PKT4(struct fd_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
BEGIN_RING(ring, cnt+1);
OUT_RING(ring, CP_TYPE4_PKT | cnt |
(_odd_parity_bit(cnt) << 7) |
((regindx & 0x3ffff) << 8) |
((_odd_parity_bit(regindx) << 27)));
BEGIN_RING(ring, cnt + 1);
OUT_RING(ring, CP_TYPE4_PKT | cnt | (_odd_parity_bit(cnt) << 7) |
((regindx & 0x3ffff) << 8) |
((_odd_parity_bit(regindx) << 27)));
}
static inline void
OUT_PKT7(struct fd_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
BEGIN_RING(ring, cnt+1);
OUT_RING(ring, CP_TYPE7_PKT | cnt |
(_odd_parity_bit(cnt) << 15) |
((opcode & 0x7f) << 16) |
((_odd_parity_bit(opcode) << 23)));
BEGIN_RING(ring, cnt + 1);
OUT_RING(ring, CP_TYPE7_PKT | cnt | (_odd_parity_bit(cnt) << 15) |
((opcode & 0x7f) << 16) |
((_odd_parity_bit(opcode) << 23)));
}
static inline void
OUT_WFI(struct fd_ringbuffer *ring)
{
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
OUT_RING(ring, 0x00000000);
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
OUT_RING(ring, 0x00000000);
}
static inline void
OUT_WFI5(struct fd_ringbuffer *ring)
{
OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
}
#ifdef __cplusplus

View file

@ -26,173 +26,182 @@
#include "msm_priv.h"
static int bo_allocate(struct msm_bo *msm_bo)
static int
bo_allocate(struct msm_bo *msm_bo)
{
struct fd_bo *bo = &msm_bo->base;
if (!msm_bo->offset) {
struct drm_msm_gem_info req = {
.handle = bo->handle,
.info = MSM_INFO_GET_OFFSET,
};
int ret;
struct fd_bo *bo = &msm_bo->base;
if (!msm_bo->offset) {
struct drm_msm_gem_info req = {
.handle = bo->handle,
.info = MSM_INFO_GET_OFFSET,
};
int ret;
/* if the buffer is already backed by pages then this
* doesn't actually do anything (other than giving us
* the offset)
*/
ret = drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_INFO,
&req, sizeof(req));
if (ret) {
ERROR_MSG("alloc failed: %s", strerror(errno));
return ret;
}
/* if the buffer is already backed by pages then this
* doesn't actually do anything (other than giving us
* the offset)
*/
ret =
drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req));
if (ret) {
ERROR_MSG("alloc failed: %s", strerror(errno));
return ret;
}
msm_bo->offset = req.value;
}
msm_bo->offset = req.value;
}
return 0;
return 0;
}
static int msm_bo_offset(struct fd_bo *bo, uint64_t *offset)
static int
msm_bo_offset(struct fd_bo *bo, uint64_t *offset)
{
struct msm_bo *msm_bo = to_msm_bo(bo);
int ret = bo_allocate(msm_bo);
if (ret)
return ret;
*offset = msm_bo->offset;
return 0;
struct msm_bo *msm_bo = to_msm_bo(bo);
int ret = bo_allocate(msm_bo);
if (ret)
return ret;
*offset = msm_bo->offset;
return 0;
}
static int msm_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
static int
msm_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
{
struct drm_msm_gem_cpu_prep req = {
.handle = bo->handle,
.op = op,
};
struct drm_msm_gem_cpu_prep req = {
.handle = bo->handle,
.op = op,
};
get_abs_timeout(&req.timeout, 5000000000);
get_abs_timeout(&req.timeout, 5000000000);
return drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_CPU_PREP, &req, sizeof(req));
return drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_CPU_PREP, &req, sizeof(req));
}
static void msm_bo_cpu_fini(struct fd_bo *bo)
static void
msm_bo_cpu_fini(struct fd_bo *bo)
{
struct drm_msm_gem_cpu_fini req = {
.handle = bo->handle,
};
struct drm_msm_gem_cpu_fini req = {
.handle = bo->handle,
};
drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_CPU_FINI, &req, sizeof(req));
drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_CPU_FINI, &req, sizeof(req));
}
static int msm_bo_madvise(struct fd_bo *bo, int willneed)
static int
msm_bo_madvise(struct fd_bo *bo, int willneed)
{
struct drm_msm_gem_madvise req = {
.handle = bo->handle,
.madv = willneed ? MSM_MADV_WILLNEED : MSM_MADV_DONTNEED,
};
int ret;
struct drm_msm_gem_madvise req = {
.handle = bo->handle,
.madv = willneed ? MSM_MADV_WILLNEED : MSM_MADV_DONTNEED,
};
int ret;
/* older kernels do not support this: */
if (bo->dev->version < FD_VERSION_MADVISE)
return willneed;
/* older kernels do not support this: */
if (bo->dev->version < FD_VERSION_MADVISE)
return willneed;
ret = drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_MADVISE, &req, sizeof(req));
if (ret)
return ret;
ret =
drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_MADVISE, &req, sizeof(req));
if (ret)
return ret;
return req.retained;
return req.retained;
}
static uint64_t msm_bo_iova(struct fd_bo *bo)
static uint64_t
msm_bo_iova(struct fd_bo *bo)
{
struct drm_msm_gem_info req = {
.handle = bo->handle,
.info = MSM_INFO_GET_IOVA,
};
int ret;
struct drm_msm_gem_info req = {
.handle = bo->handle,
.info = MSM_INFO_GET_IOVA,
};
int ret;
ret = drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req));
if (ret)
return 0;
ret = drmCommandWriteRead(bo->dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req));
if (ret)
return 0;
return req.value;
return req.value;
}
static void msm_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
static void
msm_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
{
struct drm_msm_gem_info req = {
.handle = bo->handle,
.info = MSM_INFO_SET_NAME,
};
char buf[32];
int sz;
struct drm_msm_gem_info req = {
.handle = bo->handle,
.info = MSM_INFO_SET_NAME,
};
char buf[32];
int sz;
if (bo->dev->version < FD_VERSION_SOFTPIN)
return;
if (bo->dev->version < FD_VERSION_SOFTPIN)
return;
sz = vsnprintf(buf, sizeof(buf), fmt, ap);
sz = vsnprintf(buf, sizeof(buf), fmt, ap);
req.value = VOID2U64(buf);
req.len = MIN2(sz, sizeof(buf));
req.value = VOID2U64(buf);
req.len = MIN2(sz, sizeof(buf));
drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req));
drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req));
}
static void msm_bo_destroy(struct fd_bo *bo)
static void
msm_bo_destroy(struct fd_bo *bo)
{
struct msm_bo *msm_bo = to_msm_bo(bo);
free(msm_bo);
struct msm_bo *msm_bo = to_msm_bo(bo);
free(msm_bo);
}
static const struct fd_bo_funcs funcs = {
.offset = msm_bo_offset,
.cpu_prep = msm_bo_cpu_prep,
.cpu_fini = msm_bo_cpu_fini,
.madvise = msm_bo_madvise,
.iova = msm_bo_iova,
.set_name = msm_bo_set_name,
.destroy = msm_bo_destroy,
.offset = msm_bo_offset,
.cpu_prep = msm_bo_cpu_prep,
.cpu_fini = msm_bo_cpu_fini,
.madvise = msm_bo_madvise,
.iova = msm_bo_iova,
.set_name = msm_bo_set_name,
.destroy = msm_bo_destroy,
};
/* allocate a buffer handle: */
int msm_bo_new_handle(struct fd_device *dev,
uint32_t size, uint32_t flags, uint32_t *handle)
int
msm_bo_new_handle(struct fd_device *dev, uint32_t size, uint32_t flags,
uint32_t *handle)
{
struct drm_msm_gem_new req = {
.size = size,
.flags = MSM_BO_WC, // TODO figure out proper flags..
};
int ret;
struct drm_msm_gem_new req = {
.size = size,
.flags = MSM_BO_WC, // TODO figure out proper flags..
};
int ret;
if (flags & DRM_FREEDRENO_GEM_SCANOUT)
req.flags |= MSM_BO_SCANOUT;
if (flags & DRM_FREEDRENO_GEM_SCANOUT)
req.flags |= MSM_BO_SCANOUT;
if (flags & DRM_FREEDRENO_GEM_GPUREADONLY)
req.flags |= MSM_BO_GPU_READONLY;
if (flags & DRM_FREEDRENO_GEM_GPUREADONLY)
req.flags |= MSM_BO_GPU_READONLY;
ret = drmCommandWriteRead(dev->fd, DRM_MSM_GEM_NEW,
&req, sizeof(req));
if (ret)
return ret;
ret = drmCommandWriteRead(dev->fd, DRM_MSM_GEM_NEW, &req, sizeof(req));
if (ret)
return ret;
*handle = req.handle;
*handle = req.handle;
return 0;
return 0;
}
/* allocate a new buffer object */
struct fd_bo * msm_bo_from_handle(struct fd_device *dev,
uint32_t size, uint32_t handle)
struct fd_bo *
msm_bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
{
struct msm_bo *msm_bo;
struct fd_bo *bo;
struct msm_bo *msm_bo;
struct fd_bo *bo;
msm_bo = calloc(1, sizeof(*msm_bo));
if (!msm_bo)
return NULL;
msm_bo = calloc(1, sizeof(*msm_bo));
if (!msm_bo)
return NULL;
bo = &msm_bo->base;
bo->funcs = &funcs;
bo = &msm_bo->base;
bo->funcs = &funcs;
return bo;
return bo;
}

View file

@ -24,38 +24,40 @@
* Rob Clark <robclark@freedesktop.org>
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "msm_priv.h"
static void msm_device_destroy(struct fd_device *dev)
static void
msm_device_destroy(struct fd_device *dev)
{
struct msm_device *msm_dev = to_msm_device(dev);
free(msm_dev);
struct msm_device *msm_dev = to_msm_device(dev);
free(msm_dev);
}
static const struct fd_device_funcs funcs = {
.bo_new_handle = msm_bo_new_handle,
.bo_from_handle = msm_bo_from_handle,
.pipe_new = msm_pipe_new,
.destroy = msm_device_destroy,
.bo_new_handle = msm_bo_new_handle,
.bo_from_handle = msm_bo_from_handle,
.pipe_new = msm_pipe_new,
.destroy = msm_device_destroy,
};
struct fd_device * msm_device_new(int fd)
struct fd_device *
msm_device_new(int fd)
{
struct msm_device *msm_dev;
struct fd_device *dev;
struct msm_device *msm_dev;
struct fd_device *dev;
msm_dev = calloc(1, sizeof(*msm_dev));
if (!msm_dev)
return NULL;
msm_dev = calloc(1, sizeof(*msm_dev));
if (!msm_dev)
return NULL;
dev = &msm_dev->base;
dev->funcs = &funcs;
dev = &msm_dev->base;
dev->funcs = &funcs;
dev->bo_size = sizeof(struct msm_bo);
dev->bo_size = sizeof(struct msm_bo);
return dev;
return dev;
}

View file

@ -28,227 +28,232 @@
#include "msm_priv.h"
static int query_param(struct fd_pipe *pipe, uint32_t param,
uint64_t *value)
static int
query_param(struct fd_pipe *pipe, uint32_t param, uint64_t *value)
{
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
struct drm_msm_param req = {
.pipe = msm_pipe->pipe,
.param = param,
};
int ret;
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
struct drm_msm_param req = {
.pipe = msm_pipe->pipe,
.param = param,
};
int ret;
ret = drmCommandWriteRead(pipe->dev->fd, DRM_MSM_GET_PARAM,
&req, sizeof(req));
if (ret)
return ret;
ret =
drmCommandWriteRead(pipe->dev->fd, DRM_MSM_GET_PARAM, &req, sizeof(req));
if (ret)
return ret;
*value = req.value;
*value = req.value;
return 0;
return 0;
}
static int query_queue_param(struct fd_pipe *pipe, uint32_t param,
uint64_t *value)
static int
query_queue_param(struct fd_pipe *pipe, uint32_t param, uint64_t *value)
{
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
struct drm_msm_submitqueue_query req = {
.data = VOID2U64(value),
.id = msm_pipe->queue_id,
.param = param,
.len = sizeof(*value),
};
int ret;
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
struct drm_msm_submitqueue_query req = {
.data = VOID2U64(value),
.id = msm_pipe->queue_id,
.param = param,
.len = sizeof(*value),
};
int ret;
ret = drmCommandWriteRead(pipe->dev->fd, DRM_MSM_SUBMITQUEUE_QUERY,
&req, sizeof(req));
if (ret)
return ret;
ret = drmCommandWriteRead(pipe->dev->fd, DRM_MSM_SUBMITQUEUE_QUERY, &req,
sizeof(req));
if (ret)
return ret;
return 0;
return 0;
}
static int msm_pipe_get_param(struct fd_pipe *pipe,
enum fd_param_id param, uint64_t *value)
static int
msm_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
uint64_t *value)
{
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
switch(param) {
case FD_DEVICE_ID: // XXX probably get rid of this..
case FD_GPU_ID:
*value = msm_pipe->gpu_id;
return 0;
case FD_GMEM_SIZE:
*value = msm_pipe->gmem;
return 0;
case FD_GMEM_BASE:
*value = msm_pipe->gmem_base;
return 0;
case FD_CHIP_ID:
*value = msm_pipe->chip_id;
return 0;
case FD_MAX_FREQ:
return query_param(pipe, MSM_PARAM_MAX_FREQ, value);
case FD_TIMESTAMP:
return query_param(pipe, MSM_PARAM_TIMESTAMP, value);
case FD_NR_RINGS:
return query_param(pipe, MSM_PARAM_NR_RINGS, value);
case FD_PP_PGTABLE:
return query_param(pipe, MSM_PARAM_PP_PGTABLE, value);
case FD_CTX_FAULTS:
return query_queue_param(pipe, MSM_SUBMITQUEUE_PARAM_FAULTS, value);
case FD_GLOBAL_FAULTS:
return query_param(pipe, MSM_PARAM_FAULTS, value);
default:
ERROR_MSG("invalid param id: %d", param);
return -1;
}
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
switch (param) {
case FD_DEVICE_ID: // XXX probably get rid of this..
case FD_GPU_ID:
*value = msm_pipe->gpu_id;
return 0;
case FD_GMEM_SIZE:
*value = msm_pipe->gmem;
return 0;
case FD_GMEM_BASE:
*value = msm_pipe->gmem_base;
return 0;
case FD_CHIP_ID:
*value = msm_pipe->chip_id;
return 0;
case FD_MAX_FREQ:
return query_param(pipe, MSM_PARAM_MAX_FREQ, value);
case FD_TIMESTAMP:
return query_param(pipe, MSM_PARAM_TIMESTAMP, value);
case FD_NR_RINGS:
return query_param(pipe, MSM_PARAM_NR_RINGS, value);
case FD_PP_PGTABLE:
return query_param(pipe, MSM_PARAM_PP_PGTABLE, value);
case FD_CTX_FAULTS:
return query_queue_param(pipe, MSM_SUBMITQUEUE_PARAM_FAULTS, value);
case FD_GLOBAL_FAULTS:
return query_param(pipe, MSM_PARAM_FAULTS, value);
default:
ERROR_MSG("invalid param id: %d", param);
return -1;
}
}
static int msm_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp,
uint64_t timeout)
static int
msm_pipe_wait(struct fd_pipe *pipe, uint32_t timestamp, uint64_t timeout)
{
struct fd_device *dev = pipe->dev;
struct drm_msm_wait_fence req = {
.fence = timestamp,
.queueid = to_msm_pipe(pipe)->queue_id,
};
int ret;
struct fd_device *dev = pipe->dev;
struct drm_msm_wait_fence req = {
.fence = timestamp,
.queueid = to_msm_pipe(pipe)->queue_id,
};
int ret;
get_abs_timeout(&req.timeout, timeout);
get_abs_timeout(&req.timeout, timeout);
ret = drmCommandWrite(dev->fd, DRM_MSM_WAIT_FENCE, &req, sizeof(req));
if (ret && (ret != -ETIMEDOUT)) {
ERROR_MSG("wait-fence failed! %d (%s)", ret, strerror(errno));
}
ret = drmCommandWrite(dev->fd, DRM_MSM_WAIT_FENCE, &req, sizeof(req));
if (ret && (ret != -ETIMEDOUT)) {
ERROR_MSG("wait-fence failed! %d (%s)", ret, strerror(errno));
}
return ret;
return ret;
}
static int open_submitqueue(struct fd_pipe *pipe, uint32_t prio)
static int
open_submitqueue(struct fd_pipe *pipe, uint32_t prio)
{
struct drm_msm_submitqueue req = {
.flags = 0,
.prio = prio,
};
uint64_t nr_rings = 1;
int ret;
struct drm_msm_submitqueue req = {
.flags = 0,
.prio = prio,
};
uint64_t nr_rings = 1;
int ret;
if (fd_device_version(pipe->dev) < FD_VERSION_SUBMIT_QUEUES) {
to_msm_pipe(pipe)->queue_id = 0;
return 0;
}
if (fd_device_version(pipe->dev) < FD_VERSION_SUBMIT_QUEUES) {
to_msm_pipe(pipe)->queue_id = 0;
return 0;
}
msm_pipe_get_param(pipe, FD_NR_RINGS, &nr_rings);
msm_pipe_get_param(pipe, FD_NR_RINGS, &nr_rings);
req.prio = MIN2(req.prio, MAX2(nr_rings, 1) - 1);
req.prio = MIN2(req.prio, MAX2(nr_rings, 1) - 1);
ret = drmCommandWriteRead(pipe->dev->fd, DRM_MSM_SUBMITQUEUE_NEW,
&req, sizeof(req));
if (ret) {
ERROR_MSG("could not create submitqueue! %d (%s)", ret, strerror(errno));
return ret;
}
ret = drmCommandWriteRead(pipe->dev->fd, DRM_MSM_SUBMITQUEUE_NEW, &req,
sizeof(req));
if (ret) {
ERROR_MSG("could not create submitqueue! %d (%s)", ret, strerror(errno));
return ret;
}
to_msm_pipe(pipe)->queue_id = req.id;
return 0;
to_msm_pipe(pipe)->queue_id = req.id;
return 0;
}
static void close_submitqueue(struct fd_pipe *pipe, uint32_t queue_id)
static void
close_submitqueue(struct fd_pipe *pipe, uint32_t queue_id)
{
if (fd_device_version(pipe->dev) < FD_VERSION_SUBMIT_QUEUES)
return;
if (fd_device_version(pipe->dev) < FD_VERSION_SUBMIT_QUEUES)
return;
drmCommandWrite(pipe->dev->fd, DRM_MSM_SUBMITQUEUE_CLOSE,
&queue_id, sizeof(queue_id));
drmCommandWrite(pipe->dev->fd, DRM_MSM_SUBMITQUEUE_CLOSE, &queue_id,
sizeof(queue_id));
}
static void msm_pipe_destroy(struct fd_pipe *pipe)
static void
msm_pipe_destroy(struct fd_pipe *pipe)
{
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
close_submitqueue(pipe, msm_pipe->queue_id);
msm_pipe_sp_ringpool_init(msm_pipe);
free(msm_pipe);
struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
close_submitqueue(pipe, msm_pipe->queue_id);
msm_pipe_sp_ringpool_init(msm_pipe);
free(msm_pipe);
}
static const struct fd_pipe_funcs sp_funcs = {
.ringbuffer_new_object = msm_ringbuffer_sp_new_object,
.submit_new = msm_submit_sp_new,
.get_param = msm_pipe_get_param,
.wait = msm_pipe_wait,
.destroy = msm_pipe_destroy,
.ringbuffer_new_object = msm_ringbuffer_sp_new_object,
.submit_new = msm_submit_sp_new,
.get_param = msm_pipe_get_param,
.wait = msm_pipe_wait,
.destroy = msm_pipe_destroy,
};
static const struct fd_pipe_funcs legacy_funcs = {
.ringbuffer_new_object = msm_ringbuffer_new_object,
.submit_new = msm_submit_new,
.get_param = msm_pipe_get_param,
.wait = msm_pipe_wait,
.destroy = msm_pipe_destroy,
.ringbuffer_new_object = msm_ringbuffer_new_object,
.submit_new = msm_submit_new,
.get_param = msm_pipe_get_param,
.wait = msm_pipe_wait,
.destroy = msm_pipe_destroy,
};
static uint64_t get_param(struct fd_pipe *pipe, uint32_t param)
static uint64_t
get_param(struct fd_pipe *pipe, uint32_t param)
{
uint64_t value;
int ret = query_param(pipe, param, &value);
if (ret) {
ERROR_MSG("get-param failed! %d (%s)", ret, strerror(errno));
return 0;
}
return value;
uint64_t value;
int ret = query_param(pipe, param, &value);
if (ret) {
ERROR_MSG("get-param failed! %d (%s)", ret, strerror(errno));
return 0;
}
return value;
}
struct fd_pipe * msm_pipe_new(struct fd_device *dev,
enum fd_pipe_id id, uint32_t prio)
struct fd_pipe *
msm_pipe_new(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
{
static const uint32_t pipe_id[] = {
[FD_PIPE_3D] = MSM_PIPE_3D0,
[FD_PIPE_2D] = MSM_PIPE_2D0,
};
struct msm_pipe *msm_pipe = NULL;
struct fd_pipe *pipe = NULL;
static const uint32_t pipe_id[] = {
[FD_PIPE_3D] = MSM_PIPE_3D0,
[FD_PIPE_2D] = MSM_PIPE_2D0,
};
struct msm_pipe *msm_pipe = NULL;
struct fd_pipe *pipe = NULL;
msm_pipe = calloc(1, sizeof(*msm_pipe));
if (!msm_pipe) {
ERROR_MSG("allocation failed");
goto fail;
}
msm_pipe = calloc(1, sizeof(*msm_pipe));
if (!msm_pipe) {
ERROR_MSG("allocation failed");
goto fail;
}
pipe = &msm_pipe->base;
pipe = &msm_pipe->base;
if (fd_device_version(dev) >= FD_VERSION_SOFTPIN) {
pipe->funcs = &sp_funcs;
} else {
pipe->funcs = &legacy_funcs;
}
if (fd_device_version(dev) >= FD_VERSION_SOFTPIN) {
pipe->funcs = &sp_funcs;
} else {
pipe->funcs = &legacy_funcs;
}
/* initialize before get_param(): */
pipe->dev = dev;
msm_pipe->pipe = pipe_id[id];
/* initialize before get_param(): */
pipe->dev = dev;
msm_pipe->pipe = pipe_id[id];
/* these params should be supported since the first version of drm/msm: */
msm_pipe->gpu_id = get_param(pipe, MSM_PARAM_GPU_ID);
msm_pipe->gmem = get_param(pipe, MSM_PARAM_GMEM_SIZE);
msm_pipe->chip_id = get_param(pipe, MSM_PARAM_CHIP_ID);
/* these params should be supported since the first version of drm/msm: */
msm_pipe->gpu_id = get_param(pipe, MSM_PARAM_GPU_ID);
msm_pipe->gmem = get_param(pipe, MSM_PARAM_GMEM_SIZE);
msm_pipe->chip_id = get_param(pipe, MSM_PARAM_CHIP_ID);
if (fd_device_version(pipe->dev) >= FD_VERSION_GMEM_BASE)
msm_pipe->gmem_base = get_param(pipe, MSM_PARAM_GMEM_BASE);
if (fd_device_version(pipe->dev) >= FD_VERSION_GMEM_BASE)
msm_pipe->gmem_base = get_param(pipe, MSM_PARAM_GMEM_BASE);
if (! msm_pipe->gpu_id)
goto fail;
if (!msm_pipe->gpu_id)
goto fail;
INFO_MSG("Pipe Info:");
INFO_MSG(" GPU-id: %d", msm_pipe->gpu_id);
INFO_MSG(" Chip-id: 0x%08x", msm_pipe->chip_id);
INFO_MSG(" GMEM size: 0x%08x", msm_pipe->gmem);
INFO_MSG("Pipe Info:");
INFO_MSG(" GPU-id: %d", msm_pipe->gpu_id);
INFO_MSG(" Chip-id: 0x%08x", msm_pipe->chip_id);
INFO_MSG(" GMEM size: 0x%08x", msm_pipe->gmem);
if (open_submitqueue(pipe, prio))
goto fail;
if (open_submitqueue(pipe, prio))
goto fail;
msm_pipe_sp_ringpool_init(msm_pipe);
msm_pipe_sp_ringpool_init(msm_pipe);
return pipe;
return pipe;
fail:
if (pipe)
fd_pipe_del(pipe);
return NULL;
if (pipe)
fd_pipe_del(pipe);
return NULL;
}

View file

@ -32,85 +32,89 @@
#include "util/slab.h"
#ifndef __user
# define __user
#define __user
#endif
#include "drm-uapi/msm_drm.h"
struct msm_device {
struct fd_device base;
struct fd_bo_cache ring_cache;
struct fd_device base;
struct fd_bo_cache ring_cache;
};
FD_DEFINE_CAST(fd_device, msm_device);
struct fd_device * msm_device_new(int fd);
struct fd_device *msm_device_new(int fd);
struct msm_pipe {
struct fd_pipe base;
uint32_t pipe;
uint32_t gpu_id;
uint64_t gmem_base;
uint32_t gmem;
uint32_t chip_id;
uint32_t queue_id;
struct slab_parent_pool ring_pool;
struct fd_pipe base;
uint32_t pipe;
uint32_t gpu_id;
uint64_t gmem_base;
uint32_t gmem;
uint32_t chip_id;
uint32_t queue_id;
struct slab_parent_pool ring_pool;
};
FD_DEFINE_CAST(fd_pipe, msm_pipe);
struct fd_pipe * msm_pipe_new(struct fd_device *dev,
enum fd_pipe_id id, uint32_t prio);
struct fd_pipe *msm_pipe_new(struct fd_device *dev, enum fd_pipe_id id,
uint32_t prio);
struct fd_ringbuffer * msm_ringbuffer_new_object(struct fd_pipe *pipe, uint32_t size);
struct fd_ringbuffer * msm_ringbuffer_sp_new_object(struct fd_pipe *pipe, uint32_t size);
struct fd_ringbuffer *msm_ringbuffer_new_object(struct fd_pipe *pipe,
uint32_t size);
struct fd_ringbuffer *msm_ringbuffer_sp_new_object(struct fd_pipe *pipe,
uint32_t size);
struct fd_submit * msm_submit_new(struct fd_pipe *pipe);
struct fd_submit * msm_submit_sp_new(struct fd_pipe *pipe);
struct fd_submit *msm_submit_new(struct fd_pipe *pipe);
struct fd_submit *msm_submit_sp_new(struct fd_pipe *pipe);
void msm_pipe_sp_ringpool_init(struct msm_pipe *msm_pipe);
void msm_pipe_sp_ringpool_fini(struct msm_pipe *msm_pipe);
struct msm_bo {
struct fd_bo base;
uint64_t offset;
uint32_t idx;
struct fd_bo base;
uint64_t offset;
uint32_t idx;
};
FD_DEFINE_CAST(fd_bo, msm_bo);
int msm_bo_new_handle(struct fd_device *dev,
uint32_t size, uint32_t flags, uint32_t *handle);
struct fd_bo * msm_bo_from_handle(struct fd_device *dev,
uint32_t size, uint32_t handle);
int msm_bo_new_handle(struct fd_device *dev, uint32_t size, uint32_t flags,
uint32_t *handle);
struct fd_bo *msm_bo_from_handle(struct fd_device *dev, uint32_t size,
uint32_t handle);
static inline void
msm_dump_submit(struct drm_msm_gem_submit *req)
{
for (unsigned i = 0; i < req->nr_bos; i++) {
struct drm_msm_gem_submit_bo *bos = U642VOID(req->bos);
struct drm_msm_gem_submit_bo *bo = &bos[i];
ERROR_MSG(" bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
}
for (unsigned i = 0; i < req->nr_cmds; i++) {
struct drm_msm_gem_submit_cmd *cmds = U642VOID(req->cmds);
struct drm_msm_gem_submit_cmd *cmd = &cmds[i];
struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
ERROR_MSG(" cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
for (unsigned j = 0; j < cmd->nr_relocs; j++) {
struct drm_msm_gem_submit_reloc *r = &relocs[j];
ERROR_MSG(" reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
r->reloc_idx, (uint64_t)r->reloc_offset);
}
}
for (unsigned i = 0; i < req->nr_bos; i++) {
struct drm_msm_gem_submit_bo *bos = U642VOID(req->bos);
struct drm_msm_gem_submit_bo *bo = &bos[i];
ERROR_MSG(" bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
}
for (unsigned i = 0; i < req->nr_cmds; i++) {
struct drm_msm_gem_submit_cmd *cmds = U642VOID(req->cmds);
struct drm_msm_gem_submit_cmd *cmd = &cmds[i];
struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
ERROR_MSG(" cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
for (unsigned j = 0; j < cmd->nr_relocs; j++) {
struct drm_msm_gem_submit_reloc *r = &relocs[j];
ERROR_MSG(
" reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
", reloc_offset=%" PRIu64,
j, r->submit_offset, r->or, r->shift, r->reloc_idx,
(uint64_t)r->reloc_offset);
}
}
}
static inline void get_abs_timeout(struct drm_msm_timespec *tv, uint64_t ns)
static inline void
get_abs_timeout(struct drm_msm_timespec *tv, uint64_t ns)
{
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
tv->tv_sec = t.tv_sec + ns / 1000000000;
tv->tv_nsec = t.tv_nsec + ns % 1000000000;
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
tv->tv_sec = t.tv_sec + ns / 1000000000;
tv->tv_nsec = t.tv_nsec + ns % 1000000000;
}
/*
@ -120,24 +124,26 @@ static inline void get_abs_timeout(struct drm_msm_timespec *tv, uint64_t ns)
static inline void
grow(void **ptr, uint16_t nr, uint16_t *max, uint16_t sz)
{
if ((nr + 1) > *max) {
if ((*max * 2) < (nr + 1))
*max = nr + 5;
else
*max = *max * 2;
*ptr = realloc(*ptr, *max * sz);
}
if ((nr + 1) > *max) {
if ((*max * 2) < (nr + 1))
*max = nr + 5;
else
*max = *max * 2;
*ptr = realloc(*ptr, *max * sz);
}
}
#define DECLARE_ARRAY(type, name) \
unsigned short nr_ ## name, max_ ## name; \
type * name;
#define DECLARE_ARRAY(type, name) \
unsigned short nr_##name, max_##name; \
type *name;
#define APPEND(x, name, ...) ({ \
grow((void **)&(x)->name, (x)->nr_ ## name, &(x)->max_ ## name, sizeof((x)->name[0])); \
(x)->name[(x)->nr_ ## name] = __VA_ARGS__; \
(x)->nr_ ## name ++; \
})
#define APPEND(x, name, ...) \
({ \
grow((void **)&(x)->name, (x)->nr_##name, &(x)->max_##name, \
sizeof((x)->name[0])); \
(x)->name[(x)->nr_##name] = __VA_ARGS__; \
(x)->nr_##name++; \
})
#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))

File diff suppressed because it is too large Load diff

View file

@ -38,29 +38,27 @@
* (but still builds a bos table)
*/
#define INIT_SIZE 0x1000
struct msm_submit_sp {
struct fd_submit base;
struct fd_submit base;
DECLARE_ARRAY(struct fd_bo *, bos);
DECLARE_ARRAY(struct fd_bo *, bos);
/* maps fd_bo to idx in bos table: */
struct hash_table *bo_table;
/* maps fd_bo to idx in bos table: */
struct hash_table *bo_table;
struct slab_child_pool ring_pool;
struct slab_child_pool ring_pool;
struct fd_ringbuffer *primary;
struct fd_ringbuffer *primary;
/* Allow for sub-allocation of stateobj ring buffers (ie. sharing
* the same underlying bo)..
*
* We also rely on previous stateobj having been fully constructed
* so we can reclaim extra space at it's end.
*/
struct fd_ringbuffer *suballoc_ring;
/* Allow for sub-allocation of stateobj ring buffers (ie. sharing
* the same underlying bo)..
*
* We also rely on previous stateobj having been fully constructed
* so we can reclaim extra space at it's end.
*/
struct fd_ringbuffer *suballoc_ring;
};
FD_DEFINE_CAST(fd_submit, msm_submit_sp);
@ -69,324 +67,325 @@ FD_DEFINE_CAST(fd_submit, msm_submit_sp);
* it.
*/
struct msm_cmd_sp {
struct fd_bo *ring_bo;
unsigned size;
struct fd_bo *ring_bo;
unsigned size;
};
struct msm_ringbuffer_sp {
struct fd_ringbuffer base;
struct fd_ringbuffer base;
/* for FD_RINGBUFFER_STREAMING rb's which are sub-allocated */
unsigned offset;
/* for FD_RINGBUFFER_STREAMING rb's which are sub-allocated */
unsigned offset;
union {
/* for _FD_RINGBUFFER_OBJECT case, the array of BOs referenced from
* this one
*/
struct {
struct fd_pipe *pipe;
DECLARE_ARRAY(struct fd_bo *, reloc_bos);
};
/* for other cases: */
struct {
struct fd_submit *submit;
DECLARE_ARRAY(struct msm_cmd_sp, cmds);
};
} u;
union {
/* for _FD_RINGBUFFER_OBJECT case, the array of BOs referenced from
* this one
*/
struct {
struct fd_pipe *pipe;
DECLARE_ARRAY(struct fd_bo *, reloc_bos);
};
/* for other cases: */
struct {
struct fd_submit *submit;
DECLARE_ARRAY(struct msm_cmd_sp, cmds);
};
} u;
struct fd_bo *ring_bo;
struct fd_bo *ring_bo;
};
FD_DEFINE_CAST(fd_ringbuffer, msm_ringbuffer_sp);
static void finalize_current_cmd(struct fd_ringbuffer *ring);
static struct fd_ringbuffer * msm_ringbuffer_sp_init(
struct msm_ringbuffer_sp *msm_ring,
uint32_t size, enum fd_ringbuffer_flags flags);
static struct fd_ringbuffer *
msm_ringbuffer_sp_init(struct msm_ringbuffer_sp *msm_ring, uint32_t size,
enum fd_ringbuffer_flags flags);
/* add (if needed) bo to submit and return index: */
static uint32_t
msm_submit_append_bo(struct msm_submit_sp *submit, struct fd_bo *bo)
{
struct msm_bo *msm_bo = to_msm_bo(bo);
uint32_t idx;
struct msm_bo *msm_bo = to_msm_bo(bo);
uint32_t idx;
/* NOTE: it is legal to use the same bo on different threads for
* different submits. But it is not legal to use the same submit
* from given threads.
*/
idx = READ_ONCE(msm_bo->idx);
/* NOTE: it is legal to use the same bo on different threads for
* different submits. But it is not legal to use the same submit
* from given threads.
*/
idx = READ_ONCE(msm_bo->idx);
if (unlikely((idx >= submit->nr_bos) ||
(submit->bos[idx] != bo))) {
uint32_t hash = _mesa_hash_pointer(bo);
struct hash_entry *entry;
if (unlikely((idx >= submit->nr_bos) || (submit->bos[idx] != bo))) {
uint32_t hash = _mesa_hash_pointer(bo);
struct hash_entry *entry;
entry = _mesa_hash_table_search_pre_hashed(submit->bo_table, hash, bo);
if (entry) {
/* found */
idx = (uint32_t)(uintptr_t)entry->data;
} else {
idx = APPEND(submit, bos, fd_bo_ref(bo));
entry = _mesa_hash_table_search_pre_hashed(submit->bo_table, hash, bo);
if (entry) {
/* found */
idx = (uint32_t)(uintptr_t)entry->data;
} else {
idx = APPEND(submit, bos, fd_bo_ref(bo));
_mesa_hash_table_insert_pre_hashed(submit->bo_table, hash, bo,
(void *)(uintptr_t)idx);
}
msm_bo->idx = idx;
}
_mesa_hash_table_insert_pre_hashed(submit->bo_table, hash, bo,
(void *)(uintptr_t)idx);
}
msm_bo->idx = idx;
}
return idx;
return idx;
}
static void
msm_submit_suballoc_ring_bo(struct fd_submit *submit,
struct msm_ringbuffer_sp *msm_ring, uint32_t size)
struct msm_ringbuffer_sp *msm_ring, uint32_t size)
{
struct msm_submit_sp *msm_submit = to_msm_submit_sp(submit);
unsigned suballoc_offset = 0;
struct fd_bo *suballoc_bo = NULL;
struct msm_submit_sp *msm_submit = to_msm_submit_sp(submit);
unsigned suballoc_offset = 0;
struct fd_bo *suballoc_bo = NULL;
if (msm_submit->suballoc_ring) {
struct msm_ringbuffer_sp *suballoc_ring =
to_msm_ringbuffer_sp(msm_submit->suballoc_ring);
if (msm_submit->suballoc_ring) {
struct msm_ringbuffer_sp *suballoc_ring =
to_msm_ringbuffer_sp(msm_submit->suballoc_ring);
suballoc_bo = suballoc_ring->ring_bo;
suballoc_offset = fd_ringbuffer_size(msm_submit->suballoc_ring) +
suballoc_ring->offset;
suballoc_bo = suballoc_ring->ring_bo;
suballoc_offset =
fd_ringbuffer_size(msm_submit->suballoc_ring) + suballoc_ring->offset;
suballoc_offset = align(suballoc_offset, 0x10);
suballoc_offset = align(suballoc_offset, 0x10);
if ((size + suballoc_offset) > suballoc_bo->size) {
suballoc_bo = NULL;
}
}
if ((size + suballoc_offset) > suballoc_bo->size) {
suballoc_bo = NULL;
}
}
if (!suballoc_bo) {
// TODO possibly larger size for streaming bo?
msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, 0x8000);
msm_ring->offset = 0;
} else {
msm_ring->ring_bo = fd_bo_ref(suballoc_bo);
msm_ring->offset = suballoc_offset;
}
if (!suballoc_bo) {
// TODO possibly larger size for streaming bo?
msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, 0x8000);
msm_ring->offset = 0;
} else {
msm_ring->ring_bo = fd_bo_ref(suballoc_bo);
msm_ring->offset = suballoc_offset;
}
struct fd_ringbuffer *old_suballoc_ring = msm_submit->suballoc_ring;
struct fd_ringbuffer *old_suballoc_ring = msm_submit->suballoc_ring;
msm_submit->suballoc_ring = fd_ringbuffer_ref(&msm_ring->base);
msm_submit->suballoc_ring = fd_ringbuffer_ref(&msm_ring->base);
if (old_suballoc_ring)
fd_ringbuffer_del(old_suballoc_ring);
if (old_suballoc_ring)
fd_ringbuffer_del(old_suballoc_ring);
}
static struct fd_ringbuffer *
msm_submit_sp_new_ringbuffer(struct fd_submit *submit, uint32_t size,
enum fd_ringbuffer_flags flags)
enum fd_ringbuffer_flags flags)
{
struct msm_submit_sp *msm_submit = to_msm_submit_sp(submit);
struct msm_ringbuffer_sp *msm_ring;
struct msm_submit_sp *msm_submit = to_msm_submit_sp(submit);
struct msm_ringbuffer_sp *msm_ring;
msm_ring = slab_alloc(&msm_submit->ring_pool);
msm_ring = slab_alloc(&msm_submit->ring_pool);
msm_ring->u.submit = submit;
msm_ring->u.submit = submit;
/* NOTE: needs to be before _suballoc_ring_bo() since it could
* increment the refcnt of the current ring
*/
msm_ring->base.refcnt = 1;
/* NOTE: needs to be before _suballoc_ring_bo() since it could
* increment the refcnt of the current ring
*/
msm_ring->base.refcnt = 1;
if (flags & FD_RINGBUFFER_STREAMING) {
msm_submit_suballoc_ring_bo(submit, msm_ring, size);
} else {
if (flags & FD_RINGBUFFER_GROWABLE)
size = INIT_SIZE;
if (flags & FD_RINGBUFFER_STREAMING) {
msm_submit_suballoc_ring_bo(submit, msm_ring, size);
} else {
if (flags & FD_RINGBUFFER_GROWABLE)
size = INIT_SIZE;
msm_ring->offset = 0;
msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, size);
}
msm_ring->offset = 0;
msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, size);
}
if (!msm_ringbuffer_sp_init(msm_ring, size, flags))
return NULL;
if (!msm_ringbuffer_sp_init(msm_ring, size, flags))
return NULL;
if (flags & FD_RINGBUFFER_PRIMARY) {
debug_assert(!msm_submit->primary);
msm_submit->primary = fd_ringbuffer_ref(&msm_ring->base);
}
if (flags & FD_RINGBUFFER_PRIMARY) {
debug_assert(!msm_submit->primary);
msm_submit->primary = fd_ringbuffer_ref(&msm_ring->base);
}
return &msm_ring->base;
return &msm_ring->base;
}
static int
msm_submit_sp_flush(struct fd_submit *submit, int in_fence_fd,
int *out_fence_fd, uint32_t *out_fence)
int *out_fence_fd, uint32_t *out_fence)
{
struct msm_submit_sp *msm_submit = to_msm_submit_sp(submit);
struct msm_pipe *msm_pipe = to_msm_pipe(submit->pipe);
struct drm_msm_gem_submit req = {
.flags = msm_pipe->pipe,
.queueid = msm_pipe->queue_id,
};
int ret;
struct msm_submit_sp *msm_submit = to_msm_submit_sp(submit);
struct msm_pipe *msm_pipe = to_msm_pipe(submit->pipe);
struct drm_msm_gem_submit req = {
.flags = msm_pipe->pipe,
.queueid = msm_pipe->queue_id,
};
int ret;
debug_assert(msm_submit->primary);
finalize_current_cmd(msm_submit->primary);
debug_assert(msm_submit->primary);
finalize_current_cmd(msm_submit->primary);
struct msm_ringbuffer_sp *primary = to_msm_ringbuffer_sp(msm_submit->primary);
struct drm_msm_gem_submit_cmd cmds[primary->u.nr_cmds];
struct msm_ringbuffer_sp *primary =
to_msm_ringbuffer_sp(msm_submit->primary);
struct drm_msm_gem_submit_cmd cmds[primary->u.nr_cmds];
for (unsigned i = 0; i < primary->u.nr_cmds; i++) {
cmds[i].type = MSM_SUBMIT_CMD_BUF;
cmds[i].submit_idx = msm_submit_append_bo(msm_submit,
primary->u.cmds[i].ring_bo);
cmds[i].submit_offset = primary->offset;
cmds[i].size = primary->u.cmds[i].size;
cmds[i].pad = 0;
cmds[i].nr_relocs = 0;
}
for (unsigned i = 0; i < primary->u.nr_cmds; i++) {
cmds[i].type = MSM_SUBMIT_CMD_BUF;
cmds[i].submit_idx =
msm_submit_append_bo(msm_submit, primary->u.cmds[i].ring_bo);
cmds[i].submit_offset = primary->offset;
cmds[i].size = primary->u.cmds[i].size;
cmds[i].pad = 0;
cmds[i].nr_relocs = 0;
}
if (in_fence_fd != -1) {
req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
req.fence_fd = in_fence_fd;
}
if (in_fence_fd != -1) {
req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
req.fence_fd = in_fence_fd;
}
if (out_fence_fd) {
req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
}
if (out_fence_fd) {
req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
}
/* Needs to be after get_cmd() as that could create bos/cmds table:
*
* NOTE allocate on-stack in the common case, but with an upper-
* bound to limit on-stack allocation to 4k:
*/
const unsigned bo_limit = sizeof(struct drm_msm_gem_submit_bo) / 4096;
bool bos_on_stack = msm_submit->nr_bos < bo_limit;
struct drm_msm_gem_submit_bo _submit_bos[bos_on_stack ? msm_submit->nr_bos : 0];
struct drm_msm_gem_submit_bo *submit_bos;
if (bos_on_stack) {
submit_bos = _submit_bos;
} else {
submit_bos = malloc(msm_submit->nr_bos * sizeof(submit_bos[0]));
}
for (unsigned i = 0; i < msm_submit->nr_bos; i++) {
submit_bos[i].flags = msm_submit->bos[i]->flags;
submit_bos[i].handle = msm_submit->bos[i]->handle;
submit_bos[i].presumed = 0;
}
req.bos = VOID2U64(submit_bos),
req.nr_bos = msm_submit->nr_bos;
req.cmds = VOID2U64(cmds),
req.nr_cmds = primary->u.nr_cmds;
/* Needs to be after get_cmd() as that could create bos/cmds table:
*
* NOTE allocate on-stack in the common case, but with an upper-
* bound to limit on-stack allocation to 4k:
*/
const unsigned bo_limit = sizeof(struct drm_msm_gem_submit_bo) / 4096;
bool bos_on_stack = msm_submit->nr_bos < bo_limit;
struct drm_msm_gem_submit_bo
_submit_bos[bos_on_stack ? msm_submit->nr_bos : 0];
struct drm_msm_gem_submit_bo *submit_bos;
if (bos_on_stack) {
submit_bos = _submit_bos;
} else {
submit_bos = malloc(msm_submit->nr_bos * sizeof(submit_bos[0]));
}
for (unsigned i = 0; i < msm_submit->nr_bos; i++) {
submit_bos[i].flags = msm_submit->bos[i]->flags;
submit_bos[i].handle = msm_submit->bos[i]->handle;
submit_bos[i].presumed = 0;
}
req.bos = VOID2U64(submit_bos), req.nr_bos = msm_submit->nr_bos;
req.cmds = VOID2U64(cmds), req.nr_cmds = primary->u.nr_cmds;
DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
ret = drmCommandWriteRead(submit->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
&req, sizeof(req));
if (ret) {
ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
msm_dump_submit(&req);
} else if (!ret) {
if (out_fence)
*out_fence = req.fence;
ret = drmCommandWriteRead(submit->pipe->dev->fd, DRM_MSM_GEM_SUBMIT, &req,
sizeof(req));
if (ret) {
ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
msm_dump_submit(&req);
} else if (!ret) {
if (out_fence)
*out_fence = req.fence;
if (out_fence_fd)
*out_fence_fd = req.fence_fd;
}
if (out_fence_fd)
*out_fence_fd = req.fence_fd;
}
if (!bos_on_stack)
free(submit_bos);
if (!bos_on_stack)
free(submit_bos);
return ret;
return ret;
}
static void
msm_submit_sp_destroy(struct fd_submit *submit)
{
struct msm_submit_sp *msm_submit = to_msm_submit_sp(submit);
struct msm_submit_sp *msm_submit = to_msm_submit_sp(submit);
if (msm_submit->primary)
fd_ringbuffer_del(msm_submit->primary);
if (msm_submit->suballoc_ring)
fd_ringbuffer_del(msm_submit->suballoc_ring);
if (msm_submit->primary)
fd_ringbuffer_del(msm_submit->primary);
if (msm_submit->suballoc_ring)
fd_ringbuffer_del(msm_submit->suballoc_ring);
_mesa_hash_table_destroy(msm_submit->bo_table, NULL);
_mesa_hash_table_destroy(msm_submit->bo_table, NULL);
// TODO it would be nice to have a way to debug_assert() if all
// rb's haven't been free'd back to the slab, because that is
// an indication that we are leaking bo's
slab_destroy_child(&msm_submit->ring_pool);
// TODO it would be nice to have a way to debug_assert() if all
// rb's haven't been free'd back to the slab, because that is
// an indication that we are leaking bo's
slab_destroy_child(&msm_submit->ring_pool);
for (unsigned i = 0; i < msm_submit->nr_bos; i++)
fd_bo_del(msm_submit->bos[i]);
for (unsigned i = 0; i < msm_submit->nr_bos; i++)
fd_bo_del(msm_submit->bos[i]);
free(msm_submit->bos);
free(msm_submit);
free(msm_submit->bos);
free(msm_submit);
}
static const struct fd_submit_funcs submit_funcs = {
.new_ringbuffer = msm_submit_sp_new_ringbuffer,
.flush = msm_submit_sp_flush,
.destroy = msm_submit_sp_destroy,
.new_ringbuffer = msm_submit_sp_new_ringbuffer,
.flush = msm_submit_sp_flush,
.destroy = msm_submit_sp_destroy,
};
struct fd_submit *
msm_submit_sp_new(struct fd_pipe *pipe)
{
struct msm_submit_sp *msm_submit = calloc(1, sizeof(*msm_submit));
struct fd_submit *submit;
struct msm_submit_sp *msm_submit = calloc(1, sizeof(*msm_submit));
struct fd_submit *submit;
msm_submit->bo_table = _mesa_hash_table_create(NULL,
_mesa_hash_pointer, _mesa_key_pointer_equal);
msm_submit->bo_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
slab_create_child(&msm_submit->ring_pool, &to_msm_pipe(pipe)->ring_pool);
slab_create_child(&msm_submit->ring_pool, &to_msm_pipe(pipe)->ring_pool);
submit = &msm_submit->base;
submit->pipe = pipe;
submit->funcs = &submit_funcs;
submit = &msm_submit->base;
submit->pipe = pipe;
submit->funcs = &submit_funcs;
return submit;
return submit;
}
void
msm_pipe_sp_ringpool_init(struct msm_pipe *msm_pipe)
{
// TODO tune size:
slab_create_parent(&msm_pipe->ring_pool, sizeof(struct msm_ringbuffer_sp), 16);
// TODO tune size:
slab_create_parent(&msm_pipe->ring_pool, sizeof(struct msm_ringbuffer_sp),
16);
}
void
msm_pipe_sp_ringpool_fini(struct msm_pipe *msm_pipe)
{
if (msm_pipe->ring_pool.num_elements)
slab_destroy_parent(&msm_pipe->ring_pool);
if (msm_pipe->ring_pool.num_elements)
slab_destroy_parent(&msm_pipe->ring_pool);
}
static void
finalize_current_cmd(struct fd_ringbuffer *ring)
{
debug_assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
debug_assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
APPEND(&msm_ring->u, cmds, (struct msm_cmd_sp){
.ring_bo = fd_bo_ref(msm_ring->ring_bo),
.size = offset_bytes(ring->cur, ring->start),
});
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
APPEND(&msm_ring->u, cmds,
(struct msm_cmd_sp){
.ring_bo = fd_bo_ref(msm_ring->ring_bo),
.size = offset_bytes(ring->cur, ring->start),
});
}
static void
msm_ringbuffer_sp_grow(struct fd_ringbuffer *ring, uint32_t size)
{
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
struct fd_pipe *pipe = msm_ring->u.submit->pipe;
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
struct fd_pipe *pipe = msm_ring->u.submit->pipe;
debug_assert(ring->flags & FD_RINGBUFFER_GROWABLE);
debug_assert(ring->flags & FD_RINGBUFFER_GROWABLE);
finalize_current_cmd(ring);
finalize_current_cmd(ring);
fd_bo_del(msm_ring->ring_bo);
msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
fd_bo_del(msm_ring->ring_bo);
msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
ring->start = fd_bo_map(msm_ring->ring_bo);
ring->end = &(ring->start[size/4]);
ring->cur = ring->start;
ring->size = size;
ring->start = fd_bo_map(msm_ring->ring_bo);
ring->end = &(ring->start[size / 4]);
ring->cur = ring->start;
ring->size = size;
}
#define PTRSZ 64
@ -399,124 +398,124 @@ msm_ringbuffer_sp_grow(struct fd_ringbuffer *ring, uint32_t size)
static uint32_t
msm_ringbuffer_sp_cmd_count(struct fd_ringbuffer *ring)
{
if (ring->flags & FD_RINGBUFFER_GROWABLE)
return to_msm_ringbuffer_sp(ring)->u.nr_cmds + 1;
return 1;
if (ring->flags & FD_RINGBUFFER_GROWABLE)
return to_msm_ringbuffer_sp(ring)->u.nr_cmds + 1;
return 1;
}
static void
msm_ringbuffer_sp_destroy(struct fd_ringbuffer *ring)
{
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
fd_bo_del(msm_ring->ring_bo);
fd_bo_del(msm_ring->ring_bo);
if (ring->flags & _FD_RINGBUFFER_OBJECT) {
for (unsigned i = 0; i < msm_ring->u.nr_reloc_bos; i++) {
fd_bo_del(msm_ring->u.reloc_bos[i]);
}
free(msm_ring->u.reloc_bos);
if (ring->flags & _FD_RINGBUFFER_OBJECT) {
for (unsigned i = 0; i < msm_ring->u.nr_reloc_bos; i++) {
fd_bo_del(msm_ring->u.reloc_bos[i]);
}
free(msm_ring->u.reloc_bos);
free(msm_ring);
} else {
struct fd_submit *submit = msm_ring->u.submit;
free(msm_ring);
} else {
struct fd_submit *submit = msm_ring->u.submit;
for (unsigned i = 0; i < msm_ring->u.nr_cmds; i++) {
fd_bo_del(msm_ring->u.cmds[i].ring_bo);
}
free(msm_ring->u.cmds);
for (unsigned i = 0; i < msm_ring->u.nr_cmds; i++) {
fd_bo_del(msm_ring->u.cmds[i].ring_bo);
}
free(msm_ring->u.cmds);
slab_free(&to_msm_submit_sp(submit)->ring_pool, msm_ring);
}
slab_free(&to_msm_submit_sp(submit)->ring_pool, msm_ring);
}
}
static const struct fd_ringbuffer_funcs ring_funcs_nonobj_32 = {
.grow = msm_ringbuffer_sp_grow,
.emit_reloc = msm_ringbuffer_sp_emit_reloc_nonobj_32,
.emit_reloc_ring = msm_ringbuffer_sp_emit_reloc_ring_32,
.cmd_count = msm_ringbuffer_sp_cmd_count,
.destroy = msm_ringbuffer_sp_destroy,
.grow = msm_ringbuffer_sp_grow,
.emit_reloc = msm_ringbuffer_sp_emit_reloc_nonobj_32,
.emit_reloc_ring = msm_ringbuffer_sp_emit_reloc_ring_32,
.cmd_count = msm_ringbuffer_sp_cmd_count,
.destroy = msm_ringbuffer_sp_destroy,
};
static const struct fd_ringbuffer_funcs ring_funcs_obj_32 = {
.grow = msm_ringbuffer_sp_grow,
.emit_reloc = msm_ringbuffer_sp_emit_reloc_obj_32,
.emit_reloc_ring = msm_ringbuffer_sp_emit_reloc_ring_32,
.cmd_count = msm_ringbuffer_sp_cmd_count,
.destroy = msm_ringbuffer_sp_destroy,
.grow = msm_ringbuffer_sp_grow,
.emit_reloc = msm_ringbuffer_sp_emit_reloc_obj_32,
.emit_reloc_ring = msm_ringbuffer_sp_emit_reloc_ring_32,
.cmd_count = msm_ringbuffer_sp_cmd_count,
.destroy = msm_ringbuffer_sp_destroy,
};
static const struct fd_ringbuffer_funcs ring_funcs_nonobj_64 = {
.grow = msm_ringbuffer_sp_grow,
.emit_reloc = msm_ringbuffer_sp_emit_reloc_nonobj_64,
.emit_reloc_ring = msm_ringbuffer_sp_emit_reloc_ring_64,
.cmd_count = msm_ringbuffer_sp_cmd_count,
.destroy = msm_ringbuffer_sp_destroy,
.grow = msm_ringbuffer_sp_grow,
.emit_reloc = msm_ringbuffer_sp_emit_reloc_nonobj_64,
.emit_reloc_ring = msm_ringbuffer_sp_emit_reloc_ring_64,
.cmd_count = msm_ringbuffer_sp_cmd_count,
.destroy = msm_ringbuffer_sp_destroy,
};
static const struct fd_ringbuffer_funcs ring_funcs_obj_64 = {
.grow = msm_ringbuffer_sp_grow,
.emit_reloc = msm_ringbuffer_sp_emit_reloc_obj_64,
.emit_reloc_ring = msm_ringbuffer_sp_emit_reloc_ring_64,
.cmd_count = msm_ringbuffer_sp_cmd_count,
.destroy = msm_ringbuffer_sp_destroy,
.grow = msm_ringbuffer_sp_grow,
.emit_reloc = msm_ringbuffer_sp_emit_reloc_obj_64,
.emit_reloc_ring = msm_ringbuffer_sp_emit_reloc_ring_64,
.cmd_count = msm_ringbuffer_sp_cmd_count,
.destroy = msm_ringbuffer_sp_destroy,
};
static inline struct fd_ringbuffer *
msm_ringbuffer_sp_init(struct msm_ringbuffer_sp *msm_ring, uint32_t size,
enum fd_ringbuffer_flags flags)
enum fd_ringbuffer_flags flags)
{
struct fd_ringbuffer *ring = &msm_ring->base;
struct fd_ringbuffer *ring = &msm_ring->base;
/* We don't do any translation from internal FD_RELOC flags to MSM flags. */
STATIC_ASSERT(FD_RELOC_READ == MSM_SUBMIT_BO_READ);
STATIC_ASSERT(FD_RELOC_WRITE == MSM_SUBMIT_BO_WRITE);
STATIC_ASSERT(FD_RELOC_DUMP == MSM_SUBMIT_BO_DUMP);
/* We don't do any translation from internal FD_RELOC flags to MSM flags. */
STATIC_ASSERT(FD_RELOC_READ == MSM_SUBMIT_BO_READ);
STATIC_ASSERT(FD_RELOC_WRITE == MSM_SUBMIT_BO_WRITE);
STATIC_ASSERT(FD_RELOC_DUMP == MSM_SUBMIT_BO_DUMP);
debug_assert(msm_ring->ring_bo);
debug_assert(msm_ring->ring_bo);
uint8_t *base = fd_bo_map(msm_ring->ring_bo);
ring->start = (void *)(base + msm_ring->offset);
ring->end = &(ring->start[size/4]);
ring->cur = ring->start;
uint8_t *base = fd_bo_map(msm_ring->ring_bo);
ring->start = (void *)(base + msm_ring->offset);
ring->end = &(ring->start[size / 4]);
ring->cur = ring->start;
ring->size = size;
ring->flags = flags;
ring->size = size;
ring->flags = flags;
if (flags & _FD_RINGBUFFER_OBJECT) {
if (msm_ring->u.pipe->gpu_id >= 500) {
ring->funcs = &ring_funcs_obj_64;
} else {
ring->funcs = &ring_funcs_obj_32;
}
} else {
if (msm_ring->u.submit->pipe->gpu_id >= 500) {
ring->funcs = &ring_funcs_nonobj_64;
} else {
ring->funcs = &ring_funcs_nonobj_32;
}
}
if (flags & _FD_RINGBUFFER_OBJECT) {
if (msm_ring->u.pipe->gpu_id >= 500) {
ring->funcs = &ring_funcs_obj_64;
} else {
ring->funcs = &ring_funcs_obj_32;
}
} else {
if (msm_ring->u.submit->pipe->gpu_id >= 500) {
ring->funcs = &ring_funcs_nonobj_64;
} else {
ring->funcs = &ring_funcs_nonobj_32;
}
}
// TODO initializing these could probably be conditional on flags
// since unneed for FD_RINGBUFFER_STAGING case..
msm_ring->u.cmds = NULL;
msm_ring->u.nr_cmds = msm_ring->u.max_cmds = 0;
// TODO initializing these could probably be conditional on flags
// since unneed for FD_RINGBUFFER_STAGING case..
msm_ring->u.cmds = NULL;
msm_ring->u.nr_cmds = msm_ring->u.max_cmds = 0;
msm_ring->u.reloc_bos = NULL;
msm_ring->u.nr_reloc_bos = msm_ring->u.max_reloc_bos = 0;
msm_ring->u.reloc_bos = NULL;
msm_ring->u.nr_reloc_bos = msm_ring->u.max_reloc_bos = 0;
return ring;
return ring;
}
struct fd_ringbuffer *
msm_ringbuffer_sp_new_object(struct fd_pipe *pipe, uint32_t size)
{
struct msm_ringbuffer_sp *msm_ring = malloc(sizeof(*msm_ring));
struct msm_ringbuffer_sp *msm_ring = malloc(sizeof(*msm_ring));
msm_ring->u.pipe = pipe;
msm_ring->offset = 0;
msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
msm_ring->base.refcnt = 1;
msm_ring->u.pipe = pipe;
msm_ring->offset = 0;
msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
msm_ring->base.refcnt = 1;
return msm_ringbuffer_sp_init(msm_ring, size, _FD_RINGBUFFER_OBJECT);
return msm_ringbuffer_sp_init(msm_ring, size, _FD_RINGBUFFER_OBJECT);
}

View file

@ -22,119 +22,114 @@
*/
#ifdef X
# undef X
#undef X
#endif
#if PTRSZ == 32
# define X(n) n ## _32
#define X(n) n##_32
#else
# define X(n) n ## _64
#define X(n) n##_64
#endif
static void
X(emit_reloc_common)(struct fd_ringbuffer *ring, const struct fd_reloc *reloc)
static void X(emit_reloc_common)(struct fd_ringbuffer *ring,
const struct fd_reloc *reloc)
{
(*ring->cur++) = (uint32_t)reloc->iova;
(*ring->cur++) = (uint32_t)reloc->iova;
#if PTRSZ == 64
(*ring->cur++) = (uint32_t)(reloc->iova >> 32);
(*ring->cur++) = (uint32_t)(reloc->iova >> 32);
#endif
}
static void
X(msm_ringbuffer_sp_emit_reloc_nonobj)(struct fd_ringbuffer *ring,
const struct fd_reloc *reloc)
static void X(msm_ringbuffer_sp_emit_reloc_nonobj)(struct fd_ringbuffer *ring,
const struct fd_reloc *reloc)
{
X(emit_reloc_common)(ring, reloc);
X(emit_reloc_common)(ring, reloc);
assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
struct msm_submit_sp *msm_submit =
to_msm_submit_sp(msm_ring->u.submit);
struct msm_submit_sp *msm_submit = to_msm_submit_sp(msm_ring->u.submit);
msm_submit_append_bo(msm_submit, reloc->bo);
msm_submit_append_bo(msm_submit, reloc->bo);
}
static void
X(msm_ringbuffer_sp_emit_reloc_obj)(struct fd_ringbuffer *ring,
const struct fd_reloc *reloc)
static void X(msm_ringbuffer_sp_emit_reloc_obj)(struct fd_ringbuffer *ring,
const struct fd_reloc *reloc)
{
X(emit_reloc_common)(ring, reloc);
X(emit_reloc_common)(ring, reloc);
assert(ring->flags & _FD_RINGBUFFER_OBJECT);
assert(ring->flags & _FD_RINGBUFFER_OBJECT);
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
/* Avoid emitting duplicate BO references into the list. Ringbuffer
* objects are long-lived, so this saves ongoing work at draw time in
* exchange for a bit at context setup/first draw. And the number of
* relocs per ringbuffer object is fairly small, so the O(n^2) doesn't
* hurt much.
*/
bool found = false;
for (int i = 0; i < msm_ring->u.nr_reloc_bos; i++) {
if (msm_ring->u.reloc_bos[i] == reloc->bo) {
found = true;
break;
}
}
if (!found) {
APPEND(&msm_ring->u, reloc_bos, fd_bo_ref(reloc->bo));
}
/* Avoid emitting duplicate BO references into the list. Ringbuffer
* objects are long-lived, so this saves ongoing work at draw time in
* exchange for a bit at context setup/first draw. And the number of
* relocs per ringbuffer object is fairly small, so the O(n^2) doesn't
* hurt much.
*/
bool found = false;
for (int i = 0; i < msm_ring->u.nr_reloc_bos; i++) {
if (msm_ring->u.reloc_bos[i] == reloc->bo) {
found = true;
break;
}
}
if (!found) {
APPEND(&msm_ring->u, reloc_bos, fd_bo_ref(reloc->bo));
}
}
static uint32_t
X(msm_ringbuffer_sp_emit_reloc_ring)(struct fd_ringbuffer *ring,
struct fd_ringbuffer *target, uint32_t cmd_idx)
static uint32_t X(msm_ringbuffer_sp_emit_reloc_ring)(
struct fd_ringbuffer *ring, struct fd_ringbuffer *target, uint32_t cmd_idx)
{
struct msm_ringbuffer_sp *msm_target = to_msm_ringbuffer_sp(target);
struct fd_bo *bo;
uint32_t size;
struct msm_ringbuffer_sp *msm_target = to_msm_ringbuffer_sp(target);
struct fd_bo *bo;
uint32_t size;
if ((target->flags & FD_RINGBUFFER_GROWABLE) &&
(cmd_idx < msm_target->u.nr_cmds)) {
bo = msm_target->u.cmds[cmd_idx].ring_bo;
size = msm_target->u.cmds[cmd_idx].size;
} else {
bo = msm_target->ring_bo;
size = offset_bytes(target->cur, target->start);
}
if ((target->flags & FD_RINGBUFFER_GROWABLE) &&
(cmd_idx < msm_target->u.nr_cmds)) {
bo = msm_target->u.cmds[cmd_idx].ring_bo;
size = msm_target->u.cmds[cmd_idx].size;
} else {
bo = msm_target->ring_bo;
size = offset_bytes(target->cur, target->start);
}
if (ring->flags & _FD_RINGBUFFER_OBJECT) {
X(msm_ringbuffer_sp_emit_reloc_obj)(ring, &(struct fd_reloc){
.bo = bo,
.iova = bo->iova + msm_target->offset,
.offset = msm_target->offset,
});
} else {
X(msm_ringbuffer_sp_emit_reloc_nonobj)(ring, &(struct fd_reloc){
.bo = bo,
.iova = bo->iova + msm_target->offset,
.offset = msm_target->offset,
});
}
if (ring->flags & _FD_RINGBUFFER_OBJECT) {
X(msm_ringbuffer_sp_emit_reloc_obj)(ring, &(struct fd_reloc){
.bo = bo,
.iova = bo->iova + msm_target->offset,
.offset = msm_target->offset,
});
} else {
X(msm_ringbuffer_sp_emit_reloc_nonobj)(ring, &(struct fd_reloc){
.bo = bo,
.iova = bo->iova + msm_target->offset,
.offset = msm_target->offset,
});
}
if (!(target->flags & _FD_RINGBUFFER_OBJECT))
return size;
if (!(target->flags & _FD_RINGBUFFER_OBJECT))
return size;
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring);
if (ring->flags & _FD_RINGBUFFER_OBJECT) {
for (unsigned i = 0; i < msm_target->u.nr_reloc_bos; i++) {
APPEND(&msm_ring->u, reloc_bos, fd_bo_ref(msm_target->u.reloc_bos[i]));
}
} else {
// TODO it would be nice to know whether we have already
// seen this target before. But hopefully we hit the
// append_bo() fast path enough for this to not matter:
struct msm_submit_sp *msm_submit = to_msm_submit_sp(msm_ring->u.submit);
if (ring->flags & _FD_RINGBUFFER_OBJECT) {
for (unsigned i = 0; i < msm_target->u.nr_reloc_bos; i++) {
APPEND(&msm_ring->u, reloc_bos, fd_bo_ref(msm_target->u.reloc_bos[i]));
}
} else {
// TODO it would be nice to know whether we have already
// seen this target before. But hopefully we hit the
// append_bo() fast path enough for this to not matter:
struct msm_submit_sp *msm_submit = to_msm_submit_sp(msm_ring->u.submit);
for (unsigned i = 0; i < msm_target->u.nr_reloc_bos; i++) {
msm_submit_append_bo(msm_submit, msm_target->u.reloc_bos[i]);
}
}
for (unsigned i = 0; i < msm_target->u.nr_reloc_bos; i++) {
msm_submit_append_bo(msm_submit, msm_target->u.reloc_bos[i]);
}
}
return size;
return size;
}