panfrost: Avoid direct accesses to some panfrost_bo fields

We are about to delegate some BO-related operations to the pan_kmod
layer, but before we can do that, we need to hide panfrost_bo
internals so we can redirect such accesses to pan_kmod.

Provide panfrost_bo_{size,handle}() accessors and start using them.

Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Erik Faye-Lund <erik.faye-lund@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26357>
This commit is contained in:
Boris Brezillon 2023-11-20 15:06:34 +01:00 committed by Marge Bot
parent d47e01a3c3
commit 70bcdbee6c
12 changed files with 74 additions and 51 deletions

View file

@ -1769,7 +1769,7 @@ emit_image_bufs(struct panfrost_batch *batch, enum pipe_shader_type shader,
cfg.type = pan_modifier_to_attr_type(rsrc->image.layout.modifier);
cfg.pointer = rsrc->image.data.bo->ptr.gpu + offset;
cfg.stride = util_format_get_blocksize(image->format);
cfg.size = rsrc->image.data.bo->size - offset;
cfg.size = panfrost_bo_size(rsrc->image.data.bo) - offset;
}
if (is_buffer) {

View file

@ -149,10 +149,12 @@ jm_submit_jc(struct panfrost_batch *batch, mali_ptr first_job_desc,
* by fragment jobs (the polygon list is coming from this heap).
*/
if (batch->jm.jobs.vtc_jc.first_tiler)
bo_handles[submit.bo_handle_count++] = dev->tiler_heap->gem_handle;
bo_handles[submit.bo_handle_count++] =
panfrost_bo_handle(dev->tiler_heap);
/* Always used on Bifrost, occassionally used on Midgard */
bo_handles[submit.bo_handle_count++] = dev->sample_positions->gem_handle;
bo_handles[submit.bo_handle_count++] =
panfrost_bo_handle(dev->sample_positions);
submit.bo_handles = (u64)(uintptr_t)bo_handles;
if (ctx->is_noop)
@ -365,10 +367,10 @@ jm_emit_tiler_desc(struct panfrost_batch *batch)
struct panfrost_ptr t = pan_pool_alloc_desc(&batch->pool.base, TILER_HEAP);
pan_pack(t.cpu, TILER_HEAP, heap) {
heap.size = dev->tiler_heap->size;
heap.size = panfrost_bo_size(dev->tiler_heap);
heap.base = dev->tiler_heap->ptr.gpu;
heap.bottom = dev->tiler_heap->ptr.gpu;
heap.top = dev->tiler_heap->ptr.gpu + dev->tiler_heap->size;
heap.top = dev->tiler_heap->ptr.gpu + panfrost_bo_size(dev->tiler_heap);
}
mali_ptr heap = t.gpu;

View file

@ -300,7 +300,7 @@ panfrost_batch_uses_resource(struct panfrost_batch *batch,
struct panfrost_resource *rsrc)
{
/* A resource is used iff its current BO is used */
uint32_t handle = rsrc->image.data.bo->gem_handle;
uint32_t handle = panfrost_bo_handle(rsrc->image.data.bo);
unsigned size = util_dynarray_num_elements(&batch->bos, pan_bo_access);
/* If out of bounds, certainly not used */
@ -318,7 +318,8 @@ panfrost_batch_add_bo_old(struct panfrost_batch *batch, struct panfrost_bo *bo,
if (!bo)
return;
pan_bo_access *entry = panfrost_batch_get_bo_access(batch, bo->gem_handle);
pan_bo_access *entry =
panfrost_batch_get_bo_access(batch, panfrost_bo_handle(bo));
pan_bo_access old_flags = *entry;
if (!old_flags) {
@ -417,7 +418,7 @@ panfrost_batch_get_scratchpad(struct panfrost_batch *batch,
size_per_thread, thread_tls_alloc, core_id_range);
if (batch->scratchpad) {
assert(batch->scratchpad->size >= size);
assert(panfrost_bo_size(batch->scratchpad) >= size);
} else {
batch->scratchpad =
panfrost_batch_create_bo(batch, size, PAN_BO_INVISIBLE,
@ -434,7 +435,7 @@ panfrost_batch_get_shared_memory(struct panfrost_batch *batch, unsigned size,
unsigned workgroup_count)
{
if (batch->shared_memory) {
assert(batch->shared_memory->size >= size);
assert(panfrost_bo_size(batch->shared_memory) >= size);
} else {
batch->shared_memory = panfrost_batch_create_bo(
batch, size, PAN_BO_INVISIBLE, PIPE_SHADER_VERTEX,

View file

@ -104,8 +104,8 @@ panfrost_pool_get_bo_handles(struct panfrost_pool *pool, uint32_t *handles)
unsigned idx = 0;
util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo) {
assert((*bo)->gem_handle > 0);
handles[idx++] = (*bo)->gem_handle;
assert(panfrost_bo_handle(*bo) > 0);
handles[idx++] = panfrost_bo_handle(*bo);
/* Update the BO access flags so that panfrost_bo_wait() knows
* about all pending accesses.

View file

@ -194,7 +194,7 @@ panfrost_resource_get_handle(struct pipe_screen *pscreen,
if (handle->type == WINSYS_HANDLE_TYPE_KMS && dev->ro) {
return renderonly_get_handle(scanout, handle);
} else if (handle->type == WINSYS_HANDLE_TYPE_KMS) {
handle->handle = rsrc->image.data.bo->gem_handle;
handle->handle = panfrost_bo_handle(rsrc->image.data.bo);
} else if (handle->type == WINSYS_HANDLE_TYPE_FD) {
int fd = panfrost_bo_export(rsrc->image.data.bo);
@ -1140,9 +1140,10 @@ panfrost_ptr_map(struct pipe_context *pctx, struct pipe_resource *resource,
/* If we haven't already mmaped, now's the time */
panfrost_bo_mmap(bo);
if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
pandecode_inject_mmap(dev->decode_ctx, bo->ptr.gpu, bo->ptr.cpu, bo->size,
NULL);
if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC)) {
pandecode_inject_mmap(dev->decode_ctx, bo->ptr.gpu, bo->ptr.cpu,
panfrost_bo_size(bo), NULL);
}
/* Upgrade writes to uninitialized ranges to UNSYNCHRONIZED */
if ((usage & PIPE_MAP_WRITE) && resource->target == PIPE_BUFFER &&
@ -1208,12 +1209,16 @@ panfrost_ptr_map(struct pipe_context *pctx, struct pipe_resource *resource,
* importer/exporter wouldn't see the change we're
* doing to it.
*/
if (!(bo->flags & PAN_BO_SHARED))
newbo = panfrost_bo_create(dev, bo->size, flags, bo->label);
if (!(bo->flags & PAN_BO_SHARED)) {
newbo =
panfrost_bo_create(dev, panfrost_bo_size(bo), flags, bo->label);
}
if (newbo) {
if (copy_resource)
memcpy(newbo->ptr.cpu, rsrc->image.data.bo->ptr.cpu, bo->size);
if (copy_resource) {
memcpy(newbo->ptr.cpu, rsrc->image.data.bo->ptr.cpu,
panfrost_bo_size(bo));
}
/* Swap the pointers, dropping a reference to
* the old BO which is no long referenced from
@ -1518,7 +1523,7 @@ panfrost_pack_afbc(struct panfrost_context *ctx,
}
unsigned new_size = ALIGN_POT(total_size, 4096); // FIXME
unsigned old_size = prsrc->image.data.bo->size;
unsigned old_size = panfrost_bo_size(prsrc->image.data.bo);
unsigned ratio = 100 * new_size / old_size;
if (ratio > screen->max_afbc_packing_ratio)
@ -1606,7 +1611,7 @@ panfrost_ptr_unmap(struct pipe_context *pctx, struct pipe_transfer *transfer)
if (panfrost_should_linear_convert(dev, prsrc, transfer)) {
panfrost_resource_setup(dev, prsrc, DRM_FORMAT_MOD_LINEAR,
prsrc->image.layout.format);
if (prsrc->image.layout.data_size > bo->size) {
if (prsrc->image.layout.data_size > panfrost_bo_size(bo)) {
const char *label = bo->label;
panfrost_bo_unreference(bo);
bo = prsrc->image.data.bo = panfrost_bo_create(

View file

@ -93,7 +93,7 @@ panfrost_bo_alloc(struct panfrost_device *dev, size_t size, uint32_t flags,
static void
panfrost_bo_free(struct panfrost_bo *bo)
{
struct drm_gem_close gem_close = {.handle = bo->gem_handle};
struct drm_gem_close gem_close = {.handle = panfrost_bo_handle(bo)};
int fd = panfrost_device_fd(bo->dev);
int ret;
@ -116,7 +116,7 @@ bool
panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns, bool wait_readers)
{
struct drm_panfrost_wait_bo req = {
.handle = bo->gem_handle,
.handle = panfrost_bo_handle(bo),
.timeout_ns = timeout_ns,
};
int ret;
@ -195,7 +195,7 @@ panfrost_bo_cache_fetch(struct panfrost_device *dev, size_t size,
/* Iterate the bucket looking for something suitable */
list_for_each_entry_safe(struct panfrost_bo, entry, bucket, bucket_link) {
if (entry->size < size || entry->flags != flags)
if (panfrost_bo_size(entry) < size || entry->flags != flags)
continue;
/* If the oldest BO in the cache is busy, likely so is
@ -204,7 +204,7 @@ panfrost_bo_cache_fetch(struct panfrost_device *dev, size_t size,
break;
struct drm_panfrost_madvise madv = {
.handle = entry->gem_handle,
.handle = panfrost_bo_handle(entry),
.madv = PANFROST_MADV_WILLNEED,
};
int ret;
@ -268,11 +268,11 @@ panfrost_bo_cache_put(struct panfrost_bo *bo)
/* Must be first */
pthread_mutex_lock(&dev->bo_cache.lock);
struct list_head *bucket = pan_bucket(dev, MAX2(bo->size, 4096));
struct list_head *bucket = pan_bucket(dev, MAX2(panfrost_bo_size(bo), 4096));
struct drm_panfrost_madvise madv;
struct timespec time;
madv.handle = bo->gem_handle;
madv.handle = panfrost_bo_handle(bo);
madv.madv = PANFROST_MADV_DONTNEED;
madv.retained = 0;
@ -324,7 +324,7 @@ panfrost_bo_cache_evict_all(struct panfrost_device *dev)
void
panfrost_bo_mmap(struct panfrost_bo *bo)
{
struct drm_panfrost_mmap_bo mmap_bo = {.handle = bo->gem_handle};
struct drm_panfrost_mmap_bo mmap_bo = {.handle = panfrost_bo_handle(bo)};
int ret;
if (bo->ptr.cpu)
@ -337,14 +337,15 @@ panfrost_bo_mmap(struct panfrost_bo *bo)
assert(0);
}
bo->ptr.cpu = os_mmap(NULL, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
panfrost_device_fd(bo->dev), mmap_bo.offset);
bo->ptr.cpu =
os_mmap(NULL, panfrost_bo_size(bo), PROT_READ | PROT_WRITE, MAP_SHARED,
panfrost_device_fd(bo->dev), mmap_bo.offset);
if (bo->ptr.cpu == MAP_FAILED) {
bo->ptr.cpu = NULL;
fprintf(stderr,
"mmap failed: result=%p size=0x%llx fd=%i offset=0x%llx %m\n",
bo->ptr.cpu, (long long)bo->size, panfrost_device_fd(bo->dev),
(long long)mmap_bo.offset);
bo->ptr.cpu, (long long)panfrost_bo_size(bo),
panfrost_device_fd(bo->dev), (long long)mmap_bo.offset);
}
}
@ -354,7 +355,7 @@ panfrost_bo_munmap(struct panfrost_bo *bo)
if (!bo->ptr.cpu)
return;
if (os_munmap((void *)(uintptr_t)bo->ptr.cpu, bo->size)) {
if (os_munmap((void *)(uintptr_t)bo->ptr.cpu, panfrost_bo_size(bo))) {
perror("munmap");
abort();
}
@ -409,11 +410,11 @@ panfrost_bo_create(struct panfrost_device *dev, size_t size, uint32_t flags,
if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC)) {
if (flags & PAN_BO_INVISIBLE)
pandecode_inject_mmap(dev->decode_ctx, bo->ptr.gpu, NULL, bo->size,
NULL);
pandecode_inject_mmap(dev->decode_ctx, bo->ptr.gpu, NULL,
panfrost_bo_size(bo), NULL);
else if (!(flags & PAN_BO_DELAY_MMAP))
pandecode_inject_mmap(dev->decode_ctx, bo->ptr.gpu, bo->ptr.cpu,
bo->size, NULL);
panfrost_bo_size(bo), NULL);
}
return bo;
@ -451,7 +452,8 @@ panfrost_bo_unreference(struct panfrost_bo *bo)
panfrost_bo_munmap(bo);
if (dev->debug & (PAN_DBG_TRACE | PAN_DBG_SYNC))
pandecode_inject_free(dev->decode_ctx, bo->ptr.gpu, bo->size);
pandecode_inject_free(dev->decode_ctx, bo->ptr.gpu,
panfrost_bo_size(bo));
/* Rather than freeing the BO now, we'll cache the BO for later
* allocations if we're allowed to.
@ -492,7 +494,7 @@ panfrost_bo_import(struct panfrost_device *dev, int fd)
* a nice thing for mmap to try mmap. Be more robust also
* for zero sized maps and fail nicely too
*/
if ((bo->size == 0) || (bo->size == (size_t)-1)) {
if ((panfrost_bo_size(bo) == 0) || (panfrost_bo_size(bo) == (size_t)-1)) {
pthread_mutex_unlock(&dev->bo_map_lock);
return NULL;
}
@ -524,7 +526,7 @@ int
panfrost_bo_export(struct panfrost_bo *bo)
{
struct drm_prime_handle args = {
.handle = bo->gem_handle,
.handle = panfrost_bo_handle(bo),
.flags = DRM_CLOEXEC,
};

View file

@ -117,6 +117,18 @@ struct panfrost_bo {
const char *label;
};
static inline size_t
panfrost_bo_size(struct panfrost_bo *bo)
{
return bo->size;
}
static inline size_t
panfrost_bo_handle(struct panfrost_bo *bo)
{
return bo->gem_handle;
}
bool panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns,
bool wait_readers);
void panfrost_bo_reference(struct panfrost_bo *bo);

View file

@ -589,7 +589,8 @@ pan_emit_midgard_tiler(const struct panfrost_device *dev,
cfg.polygon_list_size = panfrost_tiler_full_size(
fb->width, fb->height, cfg.hierarchy_mask, hierarchy);
cfg.heap_start = dev->tiler_heap->ptr.gpu;
cfg.heap_end = dev->tiler_heap->ptr.gpu + dev->tiler_heap->size;
cfg.heap_end =
dev->tiler_heap->ptr.gpu + panfrost_bo_size(dev->tiler_heap);
}
cfg.polygon_list = tiler_ctx->midgard.polygon_list->ptr.gpu;

View file

@ -61,7 +61,7 @@ panvk_pool_alloc_backing(struct panvk_pool *pool, size_t bo_sz)
pool->base.label);
}
if (bo->size == pool->base.slab_size)
if (panfrost_bo_size(bo) == pool->base.slab_size)
util_dynarray_append(&pool->bos, struct panfrost_bo *, bo);
else
util_dynarray_append(&pool->big_bos, struct panfrost_bo *, bo);
@ -149,7 +149,7 @@ panvk_pool_get_bo_handles(struct panvk_pool *pool, uint32_t *handles)
{
unsigned idx = 0;
util_dynarray_foreach(&pool->bos, struct panfrost_bo *, bo) {
assert((*bo)->gem_handle > 0);
handles[idx++] = (*bo)->gem_handle;
assert(panfrost_bo_handle(*bo) > 0);
handles[idx++] = panfrost_bo_handle(*bo);
}
}

View file

@ -829,10 +829,10 @@ panvk_per_arch(emit_tiler_context)(const struct panvk_device *dev,
const struct panfrost_device *pdev = &dev->physical_device->pdev;
pan_pack(descs->cpu + pan_size(TILER_CONTEXT), TILER_HEAP, cfg) {
cfg.size = pdev->tiler_heap->size;
cfg.size = panfrost_bo_size(pdev->tiler_heap);
cfg.base = pdev->tiler_heap->ptr.gpu;
cfg.bottom = pdev->tiler_heap->ptr.gpu;
cfg.top = pdev->tiler_heap->ptr.gpu + pdev->tiler_heap->size;
cfg.top = pdev->tiler_heap->ptr.gpu + panfrost_bo_size(pdev->tiler_heap);
}
pan_pack(descs->cpu, TILER_CONTEXT, cfg) {

View file

@ -253,20 +253,20 @@ panvk_per_arch(queue_submit)(struct vk_queue *vk_queue,
for (unsigned i = 0; i < batch->fb.info->attachment_count; i++) {
const struct pan_image *image = pan_image_view_get_plane(
&batch->fb.info->attachments[i].iview->pview, 0);
bos[bo_idx++] = image->data.bo->gem_handle;
bos[bo_idx++] = panfrost_bo_handle(image->data.bo);
}
}
if (batch->blit.src)
bos[bo_idx++] = batch->blit.src->gem_handle;
bos[bo_idx++] = panfrost_bo_handle(batch->blit.src);
if (batch->blit.dst)
bos[bo_idx++] = batch->blit.dst->gem_handle;
bos[bo_idx++] = panfrost_bo_handle(batch->blit.dst);
if (batch->jc.first_tiler)
bos[bo_idx++] = pdev->tiler_heap->gem_handle;
bos[bo_idx++] = panfrost_bo_handle(pdev->tiler_heap);
bos[bo_idx++] = pdev->sample_positions->gem_handle;
bos[bo_idx++] = panfrost_bo_handle(pdev->sample_positions);
assert(bo_idx == nr_bos);
/* Merge identical BO entries. */

View file

@ -143,7 +143,7 @@ panvk_per_arch(CreateImageView)(VkDevice _device,
: MALI_ATTRIBUTE_TYPE_3D_INTERLEAVED;
cfg.pointer = image->pimage.data.bo->ptr.gpu + offset;
cfg.stride = util_format_get_blocksize(view->pview.format);
cfg.size = image->pimage.data.bo->size - offset;
cfg.size = panfrost_bo_size(image->pimage.data.bo) - offset;
}
attrib_buf += pan_size(ATTRIBUTE_BUFFER);