mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-01 10:18:05 +02:00
asahi: always pass align parameter for BOs
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30633>
This commit is contained in:
parent
7ff31a4bf8
commit
5c7dd392d3
20 changed files with 31 additions and 37 deletions
|
|
@ -197,8 +197,8 @@ agx_bo_unreference(struct agx_device *dev, struct agx_bo *bo)
|
|||
}
|
||||
|
||||
struct agx_bo *
|
||||
agx_bo_create_aligned(struct agx_device *dev, unsigned size, unsigned align,
|
||||
enum agx_bo_flags flags, const char *label)
|
||||
agx_bo_create(struct agx_device *dev, unsigned size, unsigned align,
|
||||
enum agx_bo_flags flags, const char *label)
|
||||
{
|
||||
struct agx_bo *bo;
|
||||
assert(size > 0);
|
||||
|
|
|
|||
|
|
@ -103,15 +103,9 @@ agx_bo_writer(uint32_t queue, uint32_t syncobj)
|
|||
return (((uint64_t)queue) << 32) | syncobj;
|
||||
}
|
||||
|
||||
struct agx_bo *agx_bo_create_aligned(struct agx_device *dev, unsigned size,
|
||||
unsigned align, enum agx_bo_flags flags,
|
||||
const char *label);
|
||||
static inline struct agx_bo *
|
||||
agx_bo_create(struct agx_device *dev, unsigned size, enum agx_bo_flags flags,
|
||||
const char *label)
|
||||
{
|
||||
return agx_bo_create_aligned(dev, size, 0, flags, label);
|
||||
}
|
||||
struct agx_bo *agx_bo_create(struct agx_device *dev, unsigned size,
|
||||
unsigned align, enum agx_bo_flags flags,
|
||||
const char *label);
|
||||
|
||||
void agx_bo_reference(struct agx_bo *bo);
|
||||
void agx_bo_unreference(struct agx_device *dev, struct agx_bo *bo);
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ agx_fast_link(struct agx_linked_shader *linked, struct agx_device *dev,
|
|||
|
||||
assert(size > 0 && "must stop");
|
||||
|
||||
linked->bo = agx_bo_create(dev, size, AGX_BO_EXEC | AGX_BO_LOW_VA,
|
||||
linked->bo = agx_bo_create(dev, size, 0, AGX_BO_EXEC | AGX_BO_LOW_VA,
|
||||
"Linked executable");
|
||||
|
||||
size_t offset = 0;
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ struct agx_bo *
|
|||
agx_build_helper(struct agx_device *dev)
|
||||
{
|
||||
struct agx_bo *bo = agx_bo_create(
|
||||
dev, sizeof(libagx_g13_helper),
|
||||
dev, sizeof(libagx_g13_helper), 0,
|
||||
AGX_BO_READONLY | AGX_BO_EXEC | AGX_BO_LOW_VA, "Helper shader");
|
||||
assert(bo);
|
||||
memcpy(bo->ptr.cpu, libagx_g13_helper, sizeof(libagx_g13_helper));
|
||||
|
|
@ -130,8 +130,8 @@ agx_scratch_realloc(struct agx_scratch *scratch)
|
|||
#ifdef SCRATCH_DEBUG
|
||||
flags = AGX_BO_WRITEBACK;
|
||||
#endif
|
||||
scratch->buf = agx_bo_create_aligned(scratch->dev, total_alloc,
|
||||
block_size_bytes, flags, "Scratch");
|
||||
scratch->buf = agx_bo_create(scratch->dev, total_alloc, block_size_bytes,
|
||||
flags, "Scratch");
|
||||
memset(scratch->buf->ptr.cpu, 0, blocks_off);
|
||||
|
||||
struct agx_helper_header *hdr = scratch->buf->ptr.cpu;
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ static struct agx_bo *
|
|||
agx_pool_alloc_backing(struct agx_pool *pool, size_t bo_sz)
|
||||
{
|
||||
struct agx_bo *bo =
|
||||
agx_bo_create(pool->dev, bo_sz, pool->create_flags, "Pool");
|
||||
agx_bo_create(pool->dev, bo_sz, 0, pool->create_flags, "Pool");
|
||||
|
||||
util_dynarray_append(&pool->bos, struct agx_bo *, bo);
|
||||
pool->transient_bo = bo;
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ hk_pool_alloc_internal(struct hk_cmd_buffer *cmd, uint32_t size,
|
|||
if (size > HK_CMD_BO_SIZE) {
|
||||
uint32_t flags = usc ? AGX_BO_LOW_VA : 0;
|
||||
struct agx_bo *bo =
|
||||
agx_bo_create(&dev->dev, size, flags, "Large pool allocation");
|
||||
agx_bo_create(&dev->dev, size, flags, 0, "Large pool allocation");
|
||||
|
||||
util_dynarray_append(&cmd->large_bos, struct agx_bo *, bo);
|
||||
return bo->ptr;
|
||||
|
|
|
|||
|
|
@ -871,7 +871,7 @@ hk_geometry_state(struct hk_cmd_buffer *cmd)
|
|||
/* We tie heap allocation to geometry state allocation, so allocate now. */
|
||||
if (unlikely(!dev->heap)) {
|
||||
size_t size = 128 * 1024 * 1024;
|
||||
dev->heap = agx_bo_create(&dev->dev, size, 0, "Geometry heap");
|
||||
dev->heap = agx_bo_create(&dev->dev, size, 0, 0, "Geometry heap");
|
||||
|
||||
/* The geometry state buffer is initialized here and then is treated by
|
||||
* the CPU as rodata, even though the GPU uses it for scratch internally.
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ hk_cmd_bo_create(struct hk_cmd_pool *pool, bool usc, struct hk_cmd_bo **bo_out)
|
|||
if (bo == NULL)
|
||||
return vk_error(pool, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
bo->bo = agx_bo_create(&dev->dev, HK_CMD_BO_SIZE, usc ? AGX_BO_LOW_VA : 0,
|
||||
bo->bo = agx_bo_create(&dev->dev, HK_CMD_BO_SIZE, 0, usc ? AGX_BO_LOW_VA : 0,
|
||||
"Command pool");
|
||||
if (bo->bo == NULL) {
|
||||
vk_free(&pool->vk.alloc, bo);
|
||||
|
|
|
|||
|
|
@ -483,7 +483,7 @@ hk_CreateDescriptorPool(VkDevice _device,
|
|||
bo_size += HK_MIN_UBO_ALIGNMENT * pCreateInfo->maxSets;
|
||||
|
||||
if (bo_size) {
|
||||
pool->bo = agx_bo_create(&dev->dev, bo_size, 0, "Descriptor pool");
|
||||
pool->bo = agx_bo_create(&dev->dev, bo_size, 0, 0, "Descriptor pool");
|
||||
if (!pool->bo) {
|
||||
hk_destroy_descriptor_pool(dev, pAllocator, pool);
|
||||
return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ hk_descriptor_table_grow_locked(struct hk_device *dev,
|
|||
assert(new_alloc > table->alloc && new_alloc <= table->max_alloc);
|
||||
|
||||
const uint32_t new_bo_size = new_alloc * table->desc_size;
|
||||
new_bo = agx_bo_create(&dev->dev, new_bo_size, 0, "Descriptor table");
|
||||
new_bo = agx_bo_create(&dev->dev, new_bo_size, 0, 0, "Descriptor table");
|
||||
|
||||
if (new_bo == NULL) {
|
||||
return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ static VkResult
|
|||
hk_upload_rodata(struct hk_device *dev)
|
||||
{
|
||||
dev->rodata.bo =
|
||||
agx_bo_create(&dev->dev, AGX_SAMPLER_LENGTH, 0, "Read only data");
|
||||
agx_bo_create(&dev->dev, AGX_SAMPLER_LENGTH, 0, 0, "Read only data");
|
||||
|
||||
if (!dev->rodata.bo)
|
||||
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ hk_AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
|
|||
if (handle_types)
|
||||
flags |= AGX_BO_SHAREABLE;
|
||||
|
||||
mem->bo = agx_bo_create(&dev->dev, aligned_size, flags, "App memory");
|
||||
mem->bo = agx_bo_create(&dev->dev, aligned_size, 0, flags, "App memory");
|
||||
if (!mem->bo) {
|
||||
result = vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
goto fail_alloc;
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ hk_CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
|
|||
* XXX
|
||||
*/
|
||||
event->bo =
|
||||
agx_bo_create(&dev->dev, HK_EVENT_MEM_SIZE, AGX_BO_WRITEBACK, "Event");
|
||||
agx_bo_create(&dev->dev, HK_EVENT_MEM_SIZE, 0, AGX_BO_WRITEBACK, "Event");
|
||||
event->status = event->bo->ptr.cpu;
|
||||
event->addr = event->bo->ptr.gpu;
|
||||
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ hk_CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
|
|||
bo_size += pool->query_stride * pool->vk.query_count;
|
||||
|
||||
pool->bo =
|
||||
agx_bo_create(&dev->dev, bo_size, AGX_BO_WRITEBACK, "Query pool");
|
||||
agx_bo_create(&dev->dev, bo_size, 0, AGX_BO_WRITEBACK, "Query pool");
|
||||
if (!pool->bo) {
|
||||
hk_DestroyQueryPool(device, hk_query_pool_to_handle(pool), pAllocator);
|
||||
return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
|
|
|
|||
|
|
@ -671,8 +671,8 @@ hk_upload_shader(struct hk_device *dev, struct hk_shader *shader)
|
|||
size_t size = shader->b.binary_size - offs;
|
||||
assert(size > 0);
|
||||
|
||||
shader->bo = agx_bo_create(&dev->dev, size, AGX_BO_EXEC | AGX_BO_LOW_VA,
|
||||
"Preamble");
|
||||
shader->bo = agx_bo_create(&dev->dev, size, 0,
|
||||
AGX_BO_EXEC | AGX_BO_LOW_VA, "Preamble");
|
||||
memcpy(shader->bo->ptr.cpu, shader->b.binary + offs, size);
|
||||
shader->preamble_addr = shader->bo->ptr.gpu;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ agx_batch_mark_complete(struct agx_batch *batch)
|
|||
struct agx_encoder
|
||||
agx_encoder_allocate(struct agx_batch *batch, struct agx_device *dev)
|
||||
{
|
||||
struct agx_bo *bo = agx_bo_create(dev, 0x80000, 0, "Encoder");
|
||||
struct agx_bo *bo = agx_bo_create(dev, 0x80000, 0, 0, "Encoder");
|
||||
|
||||
return (struct agx_encoder){
|
||||
.bo = bo,
|
||||
|
|
|
|||
|
|
@ -107,12 +107,12 @@ read_shader(struct agx_screen *screen, struct blob_reader *blob,
|
|||
blob_copy_bytes(blob, binary->b.binary, binary->b.binary_size);
|
||||
|
||||
if (size) {
|
||||
binary->bo = agx_bo_create(&screen->dev, size,
|
||||
binary->bo = agx_bo_create(&screen->dev, size, 0,
|
||||
AGX_BO_EXEC | AGX_BO_LOW_VA, "Executable");
|
||||
memcpy(binary->bo->ptr.cpu, binary->b.binary, size);
|
||||
}
|
||||
} else if (size) {
|
||||
binary->bo = agx_bo_create(&screen->dev, size,
|
||||
binary->bo = agx_bo_create(&screen->dev, size, 0,
|
||||
AGX_BO_EXEC | AGX_BO_LOW_VA, "Executable");
|
||||
blob_copy_bytes(blob, binary->bo->ptr.cpu, size);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -599,7 +599,7 @@ agx_resource_create_with_modifiers(struct pipe_screen *screen,
|
|||
create_flags |= AGX_BO_SHAREABLE;
|
||||
|
||||
nresource->bo =
|
||||
agx_bo_create(dev, nresource->layout.size_B, create_flags, label);
|
||||
agx_bo_create(dev, nresource->layout.size_B, 0, create_flags, label);
|
||||
|
||||
if (!nresource->bo) {
|
||||
FREE(nresource);
|
||||
|
|
@ -702,7 +702,7 @@ agx_shadow(struct agx_context *ctx, struct agx_resource *rsrc, bool needs_copy)
|
|||
if (needs_copy)
|
||||
flags |= AGX_BO_WRITEBACK;
|
||||
|
||||
struct agx_bo *new_ = agx_bo_create(dev, size, flags, old->label);
|
||||
struct agx_bo *new_ = agx_bo_create(dev, size, 0, flags, old->label);
|
||||
|
||||
/* If allocation failed, we can fallback on a flush gracefully*/
|
||||
if (new_ == NULL)
|
||||
|
|
@ -1922,7 +1922,7 @@ agx_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
|
|||
|
||||
ctx->result_buf =
|
||||
agx_bo_create(agx_device(screen),
|
||||
(2 * sizeof(union agx_batch_result)) * AGX_MAX_BATCHES,
|
||||
(2 * sizeof(union agx_batch_result)) * AGX_MAX_BATCHES, 0,
|
||||
AGX_BO_WRITEBACK, "Batch result buffer");
|
||||
assert(ctx->result_buf);
|
||||
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ agx_alloc_oq_heap(struct agx_context *ctx)
|
|||
|
||||
heap->dev = agx_device(ctx->base.screen);
|
||||
heap->bo =
|
||||
agx_bo_create(heap->dev, AGX_MAX_OCCLUSION_QUERIES * sizeof(uint64_t),
|
||||
agx_bo_create(heap->dev, AGX_MAX_OCCLUSION_QUERIES * sizeof(uint64_t), 0,
|
||||
AGX_BO_WRITEBACK, "Occlusion query heap");
|
||||
|
||||
/* At the start, everything is available */
|
||||
|
|
@ -166,7 +166,7 @@ agx_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
|
|||
* tracking / reference counting to deal with lifetimes.
|
||||
*/
|
||||
query->bo = agx_bo_create(agx_device(ctx->screen), sizeof(uint64_t) * 2,
|
||||
AGX_BO_WRITEBACK, "Query");
|
||||
0, AGX_BO_WRITEBACK, "Query");
|
||||
query->ptr = query->bo->ptr;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1590,7 +1590,7 @@ agx_compile_nir(struct agx_device *dev, nir_shader *nir,
|
|||
agx_compile_shader_nir(nir, &key, debug, &compiled->b);
|
||||
|
||||
if (compiled->b.binary_size && !secondary) {
|
||||
compiled->bo = agx_bo_create(dev, compiled->b.binary_size,
|
||||
compiled->bo = agx_bo_create(dev, compiled->b.binary_size, 0,
|
||||
AGX_BO_EXEC | AGX_BO_LOW_VA, "Executable");
|
||||
|
||||
memcpy(compiled->bo->ptr.cpu, compiled->b.binary,
|
||||
|
|
@ -2799,7 +2799,7 @@ agx_sampler_heap_add(struct agx_device *dev, struct agx_sampler_heap *heap,
|
|||
/* Allocate (maximally sized) BO if we haven't already */
|
||||
if (!heap->bo) {
|
||||
heap->bo = agx_bo_create(dev, AGX_SAMPLER_HEAP_SIZE * AGX_SAMPLER_LENGTH,
|
||||
AGX_BO_WRITEBACK, "Sampler heap");
|
||||
0, AGX_BO_WRITEBACK, "Sampler heap");
|
||||
|
||||
assert(heap->count == 0);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue