util/vma: Add "nospan"

Add a way to request that allocations do not span a specified PoT
boundary.  This can be used, for ex, to ensure that allocations do
not span 4GB boundaries to work around hw/fw bugs.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20263>
This commit is contained in:
Rob Clark 2022-12-07 15:04:29 -08:00 committed by Marge Bot
parent 793741c936
commit 7d0d82f25f
2 changed files with 42 additions and 1 deletions

View file

@ -24,6 +24,7 @@
#include <stdlib.h>
#include <inttypes.h>
#include "util/macros.h"
#include "util/u_math.h"
#include "util/vma.h"
@ -52,6 +53,9 @@ util_vma_heap_init(struct util_vma_heap *heap,
/* Default to using high addresses */
heap->alloc_high = true;
/* Default to not having a nospan alignment */
heap->nospan_shift = 0;
}
void
@ -158,6 +162,14 @@ util_vma_heap_alloc(struct util_vma_heap *heap,
util_vma_heap_validate(heap);
/* The requested alignment should not be stronger than the block/nospan
* alignment.
*/
if (heap->nospan_shift) {
assert(ALIGN(BITFIELD64_BIT(heap->nospan_shift), alignment) ==
BITFIELD64_BIT(heap->nospan_shift));
}
if (heap->alloc_high) {
util_vma_foreach_hole_safe(hole, heap) {
if (size > hole->size)
@ -171,6 +183,18 @@ util_vma_heap_alloc(struct util_vma_heap *heap,
*/
uint64_t offset = (hole->size - size) + hole->offset;
if (heap->nospan_shift) {
uint64_t end = offset + size - 1;
if ((end >> heap->nospan_shift) != (offset >> heap->nospan_shift)) {
/* can we shift the offset down and still fit in the current hole? */
end &= ~BITFIELD64_MASK(heap->nospan_shift);
assert(end >= size);
offset -= size;
if (offset < hole->offset)
continue;
}
}
/* Align the offset. We align down and not up because we are
* allocating from the top of the hole and not the bottom.
*/
@ -200,6 +224,16 @@ util_vma_heap_alloc(struct util_vma_heap *heap,
offset += pad;
}
if (heap->nospan_shift) {
uint64_t end = offset + size - 1;
if ((end >> heap->nospan_shift) != (offset >> heap->nospan_shift)) {
/* can we shift the offset up and still fit in the current hole? */
offset = end & ~BITFIELD64_MASK(heap->nospan_shift);
if ((offset + size) > (hole->offset + hole->size))
continue;
}
}
util_vma_hole_alloc(heap, hole, offset, size);
util_vma_heap_validate(heap);
return offset;
@ -325,7 +359,7 @@ util_vma_heap_print(struct util_vma_heap *heap, FILE *fp,
uint64_t total_free = 0;
util_vma_foreach_hole(hole, heap) {
fprintf(fp, "%s hole: offset = %"PRIu64" (0x%"PRIx64", "
fprintf(fp, "%s hole: offset = %"PRIu64" (0x%"PRIx64"), "
"size = %"PRIu64" (0x%"PRIx64")\n",
tab, hole->offset, hole->offset, hole->size, hole->size);
total_free += hole->size;

View file

@ -44,6 +44,13 @@ struct util_vma_heap {
* Default is true.
*/
bool alloc_high;
/**
* If non-zero, util_vma_heap_alloc will avoid allocating regions which
* span (1 << nospan_shift) ranges. For example, to avoid allocations
* which straddle 4GB boundaries, use nospan_shift=log2(4GB)
*/
unsigned nospan_shift;
};
void util_vma_heap_init(struct util_vma_heap *heap,