asahi: remove agx_bo::dev

track at call sites.

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30633>
This commit is contained in:
Alyssa Rosenzweig 2024-08-01 07:23:05 -04:00
parent c834758ba7
commit df725d4f64
20 changed files with 100 additions and 97 deletions

View file

@ -97,9 +97,8 @@ agx_bo_cache_evict_stale_bos(struct agx_device *dev, unsigned tv_sec)
}
static void
agx_bo_cache_put_locked(struct agx_bo *bo)
agx_bo_cache_put_locked(struct agx_device *dev, struct agx_bo *bo)
{
struct agx_device *dev = bo->dev;
struct list_head *bucket = agx_bucket(dev, bo->size);
struct timespec time;
@ -132,15 +131,13 @@ agx_bo_cache_put_locked(struct agx_bo *bo)
/* Tries to add a BO to the cache. Returns if it was successful */
static bool
agx_bo_cache_put(struct agx_bo *bo)
agx_bo_cache_put(struct agx_device *dev, struct agx_bo *bo)
{
struct agx_device *dev = bo->dev;
if (bo->flags & AGX_BO_SHARED) {
return false;
} else {
simple_mtx_lock(&dev->bo_cache.lock);
agx_bo_cache_put_locked(bo);
agx_bo_cache_put_locked(dev, bo);
simple_mtx_unlock(&dev->bo_cache.lock);
return true;
@ -172,7 +169,7 @@ agx_bo_reference(struct agx_bo *bo)
}
void
agx_bo_unreference(struct agx_bo *bo)
agx_bo_unreference(struct agx_device *dev, struct agx_bo *bo)
{
if (!bo)
return;
@ -181,8 +178,6 @@ agx_bo_unreference(struct agx_bo *bo)
if (p_atomic_dec_return(&bo->refcnt))
return;
struct agx_device *dev = bo->dev;
pthread_mutex_lock(&dev->bo_map_lock);
/* Someone might have imported this BO while we were waiting for the
@ -194,7 +189,7 @@ agx_bo_unreference(struct agx_bo *bo)
if (dev->debug & AGX_DBG_TRACE)
agxdecode_track_free(dev->agxdecode, bo);
if (!agx_bo_cache_put(bo))
if (!agx_bo_cache_put(dev, bo))
agx_bo_free(dev, bo);
}

View file

@ -83,9 +83,6 @@ struct agx_bo {
/* Current writer, if any (queue in upper 32 bits, syncobj in lower 32 bits) */
uint64_t writer;
/* Owner */
struct agx_device *dev;
/* Update atomically */
int32_t refcnt;
@ -132,9 +129,9 @@ agx_bo_create(struct agx_device *dev, unsigned size, enum agx_bo_flags flags,
}
void agx_bo_reference(struct agx_bo *bo);
void agx_bo_unreference(struct agx_bo *bo);
void agx_bo_unreference(struct agx_device *dev, struct agx_bo *bo);
struct agx_bo *agx_bo_import(struct agx_device *dev, int fd);
int agx_bo_export(struct agx_bo *bo);
int agx_bo_export(struct agx_device *dev, struct agx_bo *bo);
void agx_bo_free(struct agx_device *dev, struct agx_bo *bo);
struct agx_bo *agx_bo_cache_fetch(struct agx_device *dev, size_t size,

View file

@ -175,7 +175,6 @@ agx_bo_alloc(struct agx_device *dev, size_t size, size_t align,
bo->size = gem_create.size;
bo->align = MAX2(dev->params.vm_page_size, align);
bo->flags = flags;
bo->dev = dev;
bo->handle = handle;
bo->prime_fd = -1;
@ -207,7 +206,7 @@ agx_bo_alloc(struct agx_device *dev, size_t size, size_t align,
return NULL;
}
dev->ops.bo_mmap(bo);
dev->ops.bo_mmap(dev, bo);
if (flags & AGX_BO_LOW_VA)
bo->ptr.gpu -= dev->shader_base;
@ -218,7 +217,7 @@ agx_bo_alloc(struct agx_device *dev, size_t size, size_t align,
}
static void
agx_bo_mmap(struct agx_bo *bo)
agx_bo_mmap(struct agx_device *dev, struct agx_bo *bo)
{
struct drm_asahi_gem_mmap_offset gem_mmap_offset = {.handle = bo->handle};
int ret;
@ -226,20 +225,20 @@ agx_bo_mmap(struct agx_bo *bo)
if (bo->ptr.cpu)
return;
ret =
drmIoctl(bo->dev->fd, DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET, &gem_mmap_offset);
ret = drmIoctl(dev->fd, DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET, &gem_mmap_offset);
if (ret) {
fprintf(stderr, "DRM_IOCTL_ASAHI_MMAP_BO failed: %m\n");
assert(0);
}
bo->ptr.cpu = os_mmap(NULL, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
bo->dev->fd, gem_mmap_offset.offset);
dev->fd, gem_mmap_offset.offset);
if (bo->ptr.cpu == MAP_FAILED) {
bo->ptr.cpu = NULL;
fprintf(stderr,
"mmap failed: result=%p size=0x%llx fd=%i offset=0x%llx %m\n",
bo->ptr.cpu, (long long)bo->size, bo->dev->fd,
bo->ptr.cpu, (long long)bo->size, dev->fd,
(long long)gem_mmap_offset.offset);
}
}
@ -263,9 +262,9 @@ agx_bo_import(struct agx_device *dev, int fd)
bo = agx_lookup_bo(dev, gem_handle);
dev->max_handle = MAX2(dev->max_handle, gem_handle);
if (!bo->dev) {
bo->dev = dev;
if (!bo->size) {
bo->size = lseek(fd, 0, SEEK_END);
bo->align = dev->params.vm_page_size;
/* Sometimes this can fail and return -1. size of -1 is not
* a nice thing for mmap to try mmap. Be more robust also
@ -345,13 +344,13 @@ error:
}
int
agx_bo_export(struct agx_bo *bo)
agx_bo_export(struct agx_device *dev, struct agx_bo *bo)
{
int fd;
assert(bo->flags & AGX_BO_SHAREABLE);
if (drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, &fd))
if (drmPrimeHandleToFD(dev->fd, bo->handle, DRM_CLOEXEC, &fd))
return -1;
if (!(bo->flags & AGX_BO_SHARED)) {
@ -366,11 +365,11 @@ agx_bo_export(struct agx_bo *bo)
if (writer) {
int out_sync_fd = -1;
int ret = drmSyncobjExportSyncFile(
bo->dev->fd, agx_bo_writer_syncobj(writer), &out_sync_fd);
dev->fd, agx_bo_writer_syncobj(writer), &out_sync_fd);
assert(ret >= 0);
assert(out_sync_fd >= 0);
ret = agx_import_sync_file(bo->dev, bo, out_sync_fd);
ret = agx_import_sync_file(dev, bo, out_sync_fd);
assert(ret >= 0);
close(out_sync_fd);
}
@ -615,7 +614,7 @@ void
agx_close_device(struct agx_device *dev)
{
ralloc_free((void *)dev->libagx);
agx_bo_unreference(dev->helper);
agx_bo_unreference(dev, dev->helper);
agx_bo_cache_evict_all(dev);
util_sparse_array_finish(&dev->bo_map);
agxdecode_destroy_context(dev->agxdecode);
@ -725,7 +724,7 @@ agx_debug_fault(struct agx_device *dev, uint64_t addr)
if (bo->flags & AGX_BO_LOW_VA)
bo_addr += dev->shader_base;
if (!bo->dev || bo_addr > addr)
if (!bo->size || bo_addr > addr)
continue;
if (!best || bo_addr > best->ptr.gpu)

View file

@ -64,7 +64,7 @@ typedef struct {
enum agx_bo_flags flags);
int (*bo_bind)(struct agx_device *dev, struct agx_bo *bo, uint64_t addr,
uint32_t flags);
void (*bo_mmap)(struct agx_bo *bo);
void (*bo_mmap)(struct agx_device *dev, struct agx_bo *bo);
ssize_t (*get_params)(struct agx_device *dev, void *buf, size_t size);
int (*submit)(struct agx_device *dev, struct drm_asahi_submit *submit,
uint32_t vbo_res_id);

View file

@ -122,14 +122,13 @@ agx_virtio_bo_alloc(struct agx_device *dev, size_t size, size_t align,
bo->size = size;
bo->align = MAX2(dev->params.vm_page_size, align);
bo->flags = flags;
bo->dev = dev;
bo->handle = handle;
bo->prime_fd = -1;
bo->blob_id = blob_id;
bo->ptr.gpu = ptr_gpu;
bo->vbo_res_id = vdrm_handle_to_res_id(dev->vdrm, handle);
dev->ops.bo_mmap(bo);
dev->ops.bo_mmap(dev, bo);
if (flags & AGX_BO_LOW_VA)
bo->ptr.gpu -= dev->shader_base;
@ -164,17 +163,17 @@ agx_virtio_bo_bind(struct agx_device *dev, struct agx_bo *bo, uint64_t addr,
}
static void
agx_virtio_bo_mmap(struct agx_bo *bo)
agx_virtio_bo_mmap(struct agx_device *dev, struct agx_bo *bo)
{
if (bo->ptr.cpu) {
return;
}
bo->ptr.cpu = vdrm_bo_map(bo->dev->vdrm, bo->handle, bo->size, NULL);
bo->ptr.cpu = vdrm_bo_map(dev->vdrm, bo->handle, bo->size, NULL);
if (bo->ptr.cpu == MAP_FAILED) {
bo->ptr.cpu = NULL;
fprintf(stderr, "mmap failed: result=%p size=0x%llx fd=%i\n", bo->ptr.cpu,
(long long)bo->size, bo->dev->fd);
(long long)bo->size, dev->fd);
}
}

View file

@ -92,7 +92,7 @@ static void
agx_scratch_realloc(struct agx_scratch *scratch)
{
if (scratch->buf)
agx_bo_unreference(scratch->buf);
agx_bo_unreference(scratch->dev, scratch->buf);
struct spill_size size = agx_scratch_get_spill_size(scratch->size_dwords);
@ -301,6 +301,6 @@ void
agx_scratch_fini(struct agx_scratch *scratch)
{
if (scratch->buf)
agx_bo_unreference(scratch->buf);
agx_bo_unreference(scratch->dev, scratch->buf);
scratch->buf = NULL;
}

View file

@ -45,7 +45,7 @@ void
agx_pool_cleanup(struct agx_pool *pool)
{
util_dynarray_foreach(&pool->bos, struct agx_bo *, bo) {
agx_bo_unreference(*bo);
agx_bo_unreference(pool->dev, *bo);
}
util_dynarray_fini(&pool->bos);

View file

@ -48,6 +48,7 @@ static void
hk_free_resettable_cmd_buffer(struct hk_cmd_buffer *cmd)
{
struct hk_cmd_pool *pool = hk_cmd_buffer_pool(cmd);
struct hk_device *dev = hk_cmd_pool_device(pool);
hk_descriptor_state_fini(cmd, &cmd->state.gfx.descriptors);
hk_descriptor_state_fini(cmd, &cmd->state.cs.descriptors);
@ -61,7 +62,7 @@ hk_free_resettable_cmd_buffer(struct hk_cmd_buffer *cmd)
}
util_dynarray_foreach(&cmd->large_bos, struct agx_bo *, bo) {
agx_bo_unreference(*bo);
agx_bo_unreference(&dev->dev, *bo);
}
util_dynarray_clear(&cmd->large_bos);

View file

@ -38,7 +38,8 @@ hk_cmd_bo_create(struct hk_cmd_pool *pool, bool usc, struct hk_cmd_bo **bo_out)
static void
hk_cmd_bo_destroy(struct hk_cmd_pool *pool, struct hk_cmd_bo *bo)
{
agx_bo_unreference(bo->bo);
struct hk_device *dev = hk_cmd_pool_device(pool);
agx_bo_unreference(&dev->dev, bo->bo);
vk_free(&pool->vk.alloc, bo);
}

View file

@ -419,7 +419,7 @@ hk_destroy_descriptor_pool(struct hk_device *dev,
hk_descriptor_set_destroy(dev, pool, set);
util_vma_heap_finish(&pool->heap);
agx_bo_unreference(pool->bo);
agx_bo_unreference(&dev->dev, pool->bo);
vk_object_free(&dev->vk, pAllocator, pool);
}

View file

@ -88,7 +88,7 @@ void
hk_descriptor_table_finish(struct hk_device *dev,
struct hk_descriptor_table *table)
{
agx_bo_unreference(table->bo);
agx_bo_unreference(&dev->dev, table->bo);
vk_free(&dev->vk.alloc, table->free_table);
simple_mtx_destroy(&table->mutex);
}

View file

@ -440,7 +440,7 @@ fail_mem_cache:
fail_queue:
hk_queue_finish(dev, &dev->queue);
fail_rodata:
agx_bo_unreference(dev->rodata.bo);
agx_bo_unreference(&dev->dev, dev->rodata.bo);
fail_bg_eot:
agx_bg_eot_cleanup(&dev->bg_eot);
fail_internal_shaders_2:
@ -487,8 +487,8 @@ hk_DestroyDevice(VkDevice _device, const VkAllocationCallbacks *pAllocator)
hk_destroy_sampler_heap(dev, &dev->samplers);
hk_descriptor_table_finish(dev, &dev->images);
hk_descriptor_table_finish(dev, &dev->occlusion_queries);
agx_bo_unreference(dev->rodata.bo);
agx_bo_unreference(dev->heap);
agx_bo_unreference(&dev->dev, dev->rodata.bo);
agx_bo_unreference(&dev->dev, dev->heap);
agx_bg_eot_cleanup(&dev->bg_eot);
agx_close_device(&dev->dev);
vk_free(&dev->vk.alloc, dev);

View file

@ -80,7 +80,7 @@ hk_GetMemoryFdPropertiesKHR(VkDevice device,
pMemoryFdProperties->memoryTypeBits = type_bits;
agx_bo_unreference(bo);
agx_bo_unreference(&dev->dev, bo);
return VK_SUCCESS;
}
@ -190,7 +190,7 @@ hk_FreeMemory(VkDevice device, VkDeviceMemory _mem,
struct hk_memory_heap *heap = &pdev->mem_heaps[type->heapIndex];
p_atomic_add(&heap->used, -((int64_t)mem->bo->size));
agx_bo_unreference(mem->bo);
agx_bo_unreference(&dev->dev, mem->bo);
vk_device_memory_destroy(&dev->vk, pAllocator, &mem->vk);
}
@ -312,7 +312,7 @@ hk_GetMemoryFdKHR(VkDevice device, const VkMemoryGetFdInfoKHR *pGetFdInfo,
switch (pGetFdInfo->handleType) {
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
*pFD = agx_bo_export(memory->bo);
*pFD = agx_bo_export(&dev->dev, memory->bo);
return VK_SUCCESS;
default:
assert(!"unsupported handle type");

View file

@ -52,7 +52,7 @@ hk_DestroyEvent(VkDevice device, VkEvent _event,
if (!event)
return;
agx_bo_unreference(event->bo);
agx_bo_unreference(&dev->dev, event->bo);
vk_object_free(&dev->vk, pAllocator, event);
}

View file

@ -140,7 +140,7 @@ hk_DestroyQueryPool(VkDevice device, VkQueryPool queryPool,
hk_descriptor_table_remove(dev, &dev->occlusion_queries, oq_index[i]);
}
agx_bo_unreference(pool->bo);
agx_bo_unreference(&dev->dev, pool->bo);
vk_query_pool_destroy(&dev->vk, pAllocator, &pool->vk);
}

View file

@ -869,30 +869,30 @@ hk_compile_nir(struct hk_device *dev, const VkAllocationCallbacks *pAllocator,
static const struct vk_shader_ops hk_shader_ops;
static void
hk_destroy_linked_shader(struct hk_linked_shader *linked)
hk_destroy_linked_shader(struct hk_device *dev, struct hk_linked_shader *linked)
{
agx_bo_unreference(linked->b.bo);
agx_bo_unreference(&dev->dev, linked->b.bo);
ralloc_free(linked);
}
static void
hk_destroy_linked_shader_ht(struct hash_entry *he)
{
hk_destroy_linked_shader(he->data);
}
static void
hk_shader_destroy(struct hk_shader *s)
hk_shader_destroy(struct hk_device *dev, struct hk_shader *s)
{
free((void *)s->code_ptr);
free((void *)s->data_ptr);
agx_bo_unreference(s->bo);
agx_bo_unreference(&dev->dev, s->bo);
simple_mtx_destroy(&s->linked.lock);
_mesa_hash_table_destroy(s->linked.ht, hk_destroy_linked_shader_ht);
if (s->only_linked)
hk_destroy_linked_shader(s->only_linked);
hk_destroy_linked_shader(dev, s->only_linked);
if (s->linked.ht) {
hash_table_foreach(s->linked.ht, entry) {
hk_destroy_linked_shader(dev, entry->data);
}
_mesa_hash_table_destroy(s->linked.ht, NULL);
}
}
void
@ -904,7 +904,7 @@ hk_api_shader_destroy(struct vk_device *vk_dev, struct vk_shader *vk_shader,
container_of(vk_shader, struct hk_api_shader, vk);
hk_foreach_variant(obj, shader) {
hk_shader_destroy(shader);
hk_shader_destroy(dev, shader);
}
vk_shader_free(&dev->vk, pAllocator, &obj->vk);

View file

@ -136,7 +136,7 @@ agx_batch_init(struct agx_context *ctx,
batch->initialized = false;
batch->draws = 0;
batch->incoherent_writes = false;
agx_bo_unreference(batch->sampler_heap.bo);
agx_bo_unreference(dev, batch->sampler_heap.bo);
batch->sampler_heap.bo = NULL;
batch->sampler_heap.count = 0;
batch->vs_scratch = false;
@ -347,7 +347,7 @@ agx_batch_cleanup(struct agx_context *ctx, struct agx_batch *batch, bool reset)
/* We should write no buffers if this is an empty batch */
assert(agx_writer_get(ctx, handle) != batch);
agx_bo_unreference(agx_lookup_bo(dev, handle));
agx_bo_unreference(dev, agx_lookup_bo(dev, handle));
}
} else {
int handle;
@ -363,12 +363,12 @@ agx_batch_cleanup(struct agx_context *ctx, struct agx_batch *batch, bool reset)
p_atomic_cmpxchg(&bo->writer,
agx_bo_writer(ctx->queue_id, batch->syncobj), 0);
agx_bo_unreference(agx_lookup_bo(dev, handle));
agx_bo_unreference(dev, agx_lookup_bo(dev, handle));
}
}
agx_bo_unreference(batch->vdm.bo);
agx_bo_unreference(batch->cdm.bo);
agx_bo_unreference(dev, batch->vdm.bo);
agx_bo_unreference(dev, batch->cdm.bo);
agx_pool_cleanup(&batch->pool);
agx_pool_cleanup(&batch->pipeline_pool);

View file

@ -281,7 +281,7 @@ agx_resource_get_handle(struct pipe_screen *pscreen, struct pipe_context *ctx,
handle->handle = rsrc->bo->handle;
} else if (handle->type == WINSYS_HANDLE_TYPE_FD) {
int fd = agx_bo_export(rsrc->bo);
int fd = agx_bo_export(dev, rsrc->bo);
if (fd < 0)
return false;
@ -631,7 +631,7 @@ agx_resource_destroy(struct pipe_screen *screen, struct pipe_resource *prsrc)
if (rsrc->scanout)
renderonly_scanout_destroy(rsrc->scanout, agx_screen->dev.ro);
agx_bo_unreference(rsrc->bo);
agx_bo_unreference(&agx_screen->dev, rsrc->bo);
FREE(rsrc);
}
@ -717,7 +717,7 @@ agx_shadow(struct agx_context *ctx, struct agx_resource *rsrc, bool needs_copy)
}
/* Swap the pointers, dropping a reference */
agx_bo_unreference(rsrc->bo);
agx_bo_unreference(dev, rsrc->bo);
rsrc->bo = new_;
/* Reemit descriptors using this resource */
@ -1000,11 +1000,11 @@ agx_transfer_map(struct pipe_context *pctx, struct pipe_resource *resource,
agx_sync_writer(ctx, staging, "GPU read staging blit");
}
dev->ops.bo_mmap(staging->bo);
dev->ops.bo_mmap(dev, staging->bo);
return staging->bo->ptr.cpu;
}
dev->ops.bo_mmap(rsrc->bo);
dev->ops.bo_mmap(dev, rsrc->bo);
if (ail_is_level_twiddled_uncompressed(&rsrc->layout, level)) {
/* Should never happen for buffers, and it's not safe */
@ -1791,7 +1791,7 @@ agx_destroy_context(struct pipe_context *pctx)
agx_bg_eot_cleanup(&ctx->bg_eot);
agx_destroy_meta_shaders(ctx);
agx_bo_unreference(ctx->result_buf);
agx_bo_unreference(dev, ctx->result_buf);
/* Lock around the syncobj destruction, to avoid racing
* command submission in another context.

View file

@ -48,6 +48,7 @@ is_timer(struct agx_query *query)
struct agx_oq_heap {
/* The GPU allocation itself */
struct agx_device *dev;
struct agx_bo *bo;
/* Bitset of query indices that are in use */
@ -58,7 +59,7 @@ static void
agx_destroy_oq_heap(void *heap_)
{
struct agx_oq_heap *heap = heap_;
agx_bo_unreference(heap->bo);
agx_bo_unreference(heap->dev, heap->bo);
}
static struct agx_oq_heap *
@ -67,8 +68,9 @@ agx_alloc_oq_heap(struct agx_context *ctx)
struct agx_oq_heap *heap = rzalloc(ctx, struct agx_oq_heap);
ralloc_set_destructor(heap, agx_destroy_oq_heap);
heap->bo = agx_bo_create(agx_device(ctx->base.screen),
AGX_MAX_OCCLUSION_QUERIES * sizeof(uint64_t),
heap->dev = agx_device(ctx->base.screen);
heap->bo =
agx_bo_create(heap->dev, AGX_MAX_OCCLUSION_QUERIES * sizeof(uint64_t),
AGX_BO_WRITEBACK, "Occlusion query heap");
/* At the start, everything is available */
@ -216,6 +218,7 @@ agx_destroy_query(struct pipe_context *pctx, struct pipe_query *pquery)
{
struct agx_context *ctx = agx_context(pctx);
struct agx_query *query = (struct agx_query *)pquery;
struct agx_device *dev = agx_device(pctx->screen);
/* We don't reference count the occlusion query allocations, so we need to
* sync writers when destroying so we can freely write from the CPU after
@ -228,7 +231,7 @@ agx_destroy_query(struct pipe_context *pctx, struct pipe_query *pquery)
sync_query_writers(ctx, query, "Occlusion query destroy");
agx_free_oq(ctx, query);
} else {
agx_bo_unreference(query->bo);
agx_bo_unreference(dev, query->bo);
}
free(pquery);

View file

@ -2463,34 +2463,35 @@ agx_bind_cs_state(struct pipe_context *pctx, void *cso)
}
/* Forward declare because of the recursion hit with geometry shaders */
static void agx_delete_uncompiled_shader(struct agx_uncompiled_shader *so);
static void agx_delete_uncompiled_shader(struct agx_device *dev,
struct agx_uncompiled_shader *so);
static void
agx_delete_compiled_shader_internal(struct agx_compiled_shader *so)
agx_delete_compiled_shader(struct agx_device *dev,
struct agx_compiled_shader *so)
{
if (so->gs_count)
agx_delete_compiled_shader_internal(so->gs_count);
agx_delete_compiled_shader(dev, so->gs_count);
if (so->pre_gs)
agx_delete_compiled_shader_internal(so->pre_gs);
agx_delete_compiled_shader(dev, so->pre_gs);
if (so->gs_copy)
agx_delete_compiled_shader_internal(so->gs_copy);
agx_delete_compiled_shader(dev, so->gs_copy);
agx_bo_unreference(so->bo);
agx_bo_unreference(dev, so->bo);
FREE(so);
}
static void
agx_delete_compiled_shader(struct hash_entry *ent)
agx_delete_uncompiled_shader(struct agx_device *dev,
struct agx_uncompiled_shader *so)
{
agx_delete_compiled_shader_internal(ent->data);
}
hash_table_foreach(so->variants, ent) {
agx_delete_compiled_shader(dev, ent->data);
}
static void
agx_delete_uncompiled_shader(struct agx_uncompiled_shader *so)
{
_mesa_hash_table_destroy(so->variants, agx_delete_compiled_shader);
_mesa_hash_table_destroy(so->variants, NULL);
blob_finish(&so->serialized_nir);
blob_finish(&so->early_serialized_nir);
@ -2498,14 +2499,15 @@ agx_delete_uncompiled_shader(struct agx_uncompiled_shader *so)
for (unsigned j = 0; j < 3; ++j) {
for (unsigned k = 0; k < 2; ++k) {
if (so->passthrough_progs[i][j][k])
agx_delete_uncompiled_shader(so->passthrough_progs[i][j][k]);
agx_delete_uncompiled_shader(dev,
so->passthrough_progs[i][j][k]);
}
}
}
for (unsigned i = 0; i < ARRAY_SIZE(so->passthrough_tcs); ++i) {
if (so->passthrough_tcs[i])
agx_delete_uncompiled_shader(so->passthrough_tcs[i]);
agx_delete_uncompiled_shader(dev, so->passthrough_tcs[i]);
}
ralloc_free(so);
@ -2514,7 +2516,8 @@ agx_delete_uncompiled_shader(struct agx_uncompiled_shader *so)
static void
agx_delete_shader_state(struct pipe_context *ctx, void *cso)
{
agx_delete_uncompiled_shader(cso);
struct agx_device *dev = agx_device(ctx->screen);
agx_delete_uncompiled_shader(dev, cso);
}
struct agx_generic_meta_key {
@ -2552,7 +2555,12 @@ agx_init_meta_shaders(struct agx_context *ctx)
void
agx_destroy_meta_shaders(struct agx_context *ctx)
{
_mesa_hash_table_destroy(ctx->generic_meta, agx_delete_compiled_shader);
struct agx_device *dev = agx_device(ctx->base.screen);
hash_table_foreach(ctx->generic_meta, ent) {
agx_delete_compiled_shader(dev, ent->data);
}
_mesa_hash_table_destroy(ctx->generic_meta, NULL);
}
static struct agx_compiled_shader *