anv,hasvk: migrate to align64 from util

Signed-off-by: Rohan Garg <rohan.garg@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20104>
This commit is contained in:
Rohan Garg 2022-12-02 14:37:31 +05:30 committed by Marge Bot
parent a06f751ec8
commit 4504188508
15 changed files with 30 additions and 42 deletions

View file

@ -1436,19 +1436,19 @@ anv_device_alloc_bo(struct anv_device *device,
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
/* The kernel is going to give us whole pages anyway */
size = align_u64(size, 4096);
size = align64(size, 4096);
uint64_t ccs_size = 0;
if (device->info->has_aux_map && (alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS)) {
/* Align the size up to the next multiple of 64K so we don't have any
* AUX-TT entries pointing from a 64K page to itself.
*/
size = align_u64(size, 64 * 1024);
size = align64(size, 64 * 1024);
/* See anv_bo::_ccs_size */
uint64_t aux_ratio =
intel_aux_get_main_to_aux_ratio(device->aux_map_ctx);
ccs_size = align_u64(DIV_ROUND_UP(size, aux_ratio), 4096);
ccs_size = align64(DIV_ROUND_UP(size, aux_ratio), 4096);
}
uint32_t gem_handle;

View file

@ -548,8 +548,8 @@ anv_image_init_from_gralloc(struct anv_device *device,
&mem_reqs);
VkDeviceSize aligned_image_size =
align_u64(mem_reqs.memoryRequirements.size,
mem_reqs.memoryRequirements.alignment);
align64(mem_reqs.memoryRequirements.size,
mem_reqs.memoryRequirements.alignment);
if (bo->size < aligned_image_size) {
result = vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,

View file

@ -295,9 +295,9 @@ anv_cmd_buffer_set_ray_query_buffer(struct anv_cmd_buffer *cmd_buffer,
struct anv_device *device = cmd_buffer->device;
uint64_t ray_shadow_size =
align_u64(brw_rt_ray_queries_shadow_stacks_size(device->info,
pipeline->ray_queries),
4096);
align64(brw_rt_ray_queries_shadow_stacks_size(device->info,
pipeline->ray_queries),
4096);
if (ray_shadow_size > 0 &&
(!cmd_buffer->state.ray_query_shadow_bo ||
cmd_buffer->state.ray_query_shadow_bo->size < ray_shadow_size)) {

View file

@ -1558,7 +1558,7 @@ anv_descriptor_set_write_buffer(struct anv_device *device,
*/
if (type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
bind_range = align_u64(bind_range, ANV_UBO_ALIGNMENT);
bind_range = align64(bind_range, ANV_UBO_ALIGNMENT);
if (data & ANV_DESCRIPTOR_ADDRESS_RANGE) {
struct anv_address_range_descriptor desc_data = {

View file

@ -3967,7 +3967,7 @@ VkResult anv_AllocateMemory(
assert(pAllocateInfo->allocationSize > 0);
VkDeviceSize aligned_alloc_size =
align_u64(pAllocateInfo->allocationSize, 4096);
align64(pAllocateInfo->allocationSize, 4096);
if (aligned_alloc_size > MAX_MEMORY_ALLOCATION_SIZE)
return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
@ -4397,7 +4397,7 @@ VkResult anv_MapMemory(
uint64_t map_size = (offset + size) - map_offset;
/* Let's map whole pages */
map_size = align_u64(map_size, 4096);
map_size = align64(map_size, 4096);
void *map;
VkResult result = anv_device_map_bo(device, mem->bo, map_offset,
@ -4656,7 +4656,7 @@ anv_get_buffer_memory_requirements(struct anv_device *device,
if (device->robust_buffer_access &&
(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT ||
usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT))
pMemoryRequirements->memoryRequirements.size = align_u64(size, 4);
pMemoryRequirements->memoryRequirements.size = align64(size, 4);
pMemoryRequirements->memoryRequirements.memoryTypeBits = memory_types;

View file

@ -133,7 +133,7 @@ image_binding_grow(const struct anv_device *device,
&image->bindings[binding].memory_range;
if (has_implicit_offset) {
offset = align_u64(container->offset + container->size, alignment);
offset = align64(container->offset + container->size, alignment);
} else {
/* Offset must be validated because it comes from
* VkImageDrmFormatModifierExplicitCreateInfoEXT.

View file

@ -314,12 +314,6 @@ align_down_u64(uint64_t v, uint64_t a)
return v & ~(a - 1);
}
static inline uint64_t
align_u64(uint64_t v, uint64_t a)
{
return align_down_u64(v + a - 1, a);
}
static inline int32_t
align_i32(int32_t v, int32_t a)
{
@ -2491,7 +2485,7 @@ anv_gfx8_9_vb_cache_range_needs_workaround(struct anv_vb_cache_range *bound,
/* Align everything to a cache line */
bound->start &= ~(64ull - 1ull);
bound->end = align_u64(bound->end, 64);
bound->end = align64(bound->end, 64);
/* Compute the dirty range */
dirty->start = MIN2(dirty->start, bound->start);

View file

@ -168,7 +168,7 @@ get_gpu_size_estimate(const VkAccelerationStructureBuildGeometryInfoKHR *pInfo,
struct MKSizeEstimate est = {};
uint64_t size = sizeof(BVHBase);
size = align_u64(size, 64);
size = align64(size, 64);
/* Must immediately follow BVHBase because we use fixed offset to nodes. */
est.node_data_start = size;
@ -259,25 +259,25 @@ get_gpu_size_estimate(const VkAccelerationStructureBuildGeometryInfoKHR *pInfo,
unreachable("Unsupported acceleration structure type");
}
size = align_u64(size, 64);
size = align64(size, 64);
est.instance_descs_start = size;
size += sizeof(struct InstanceDesc) * num_instances;
est.geo_meta_data_start = size;
size += sizeof(struct GeoMetaData) * pInfo->geometryCount;
size = align_u64(size, 64);
size = align64(size, 64);
assert(size == align_u64(size, 64));
assert(size == align64(size, 64));
est.back_pointer_start = size;
const bool alloc_backpointers = false; /* RT TODO */
if (alloc_backpointers) {
size += est.max_inner_nodes * sizeof(uint32_t);
size = align_u64(size, 64);
size = align64(size, 64);
}
assert(size < UINT32_MAX);
est.sizeTotal = align_u64(size, 64);
est.sizeTotal = align64(size, 64);
return est;
}

View file

@ -536,7 +536,7 @@ anv_image_init_aux_tt(struct anv_cmd_buffer *cmd_buffer,
/* Aux operates 64K at a time */
start_offset_B = align_down_u64(start_offset_B, 64 * 1024);
end_offset_B = align_u64(end_offset_B, 64 * 1024);
end_offset_B = align64(end_offset_B, 64 * 1024);
for (uint64_t offset = start_offset_B;
offset < end_offset_B; offset += 64 * 1024) {

View file

@ -1589,7 +1589,7 @@ anv_device_alloc_bo(struct anv_device *device,
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
/* The kernel is going to give us whole pages anyway */
size = align_u64(size, 4096);
size = align64(size, 4096);
uint32_t gem_handle = anv_gem_create(device, size);
if (gem_handle == 0)

View file

@ -548,8 +548,8 @@ anv_image_init_from_gralloc(struct anv_device *device,
&mem_reqs);
VkDeviceSize aligned_image_size =
align_u64(mem_reqs.memoryRequirements.size,
mem_reqs.memoryRequirements.alignment);
align64(mem_reqs.memoryRequirements.size,
mem_reqs.memoryRequirements.alignment);
if (bo->size < aligned_image_size) {
result = vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,

View file

@ -1590,7 +1590,7 @@ anv_descriptor_set_write_buffer(struct anv_device *device,
*/
if (type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
bind_range = align_u64(bind_range, ANV_UBO_ALIGNMENT);
bind_range = align64(bind_range, ANV_UBO_ALIGNMENT);
if (data & ANV_DESCRIPTOR_ADDRESS_RANGE) {
struct anv_address_range_descriptor desc_data = {

View file

@ -3237,7 +3237,7 @@ VkResult anv_AllocateMemory(
assert(pAllocateInfo->allocationSize > 0);
VkDeviceSize aligned_alloc_size =
align_u64(pAllocateInfo->allocationSize, 4096);
align64(pAllocateInfo->allocationSize, 4096);
if (aligned_alloc_size > MAX_MEMORY_ALLOCATION_SIZE)
return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
@ -3643,7 +3643,7 @@ VkResult anv_MapMemory(
uint64_t map_size = (offset + size) - map_offset;
/* Let's map whole pages */
map_size = align_u64(map_size, 4096);
map_size = align64(map_size, 4096);
void *map;
VkResult result = anv_device_map_bo(device, mem->bo, map_offset,
@ -3900,7 +3900,7 @@ anv_get_buffer_memory_requirements(struct anv_device *device,
if (device->robust_buffer_access &&
(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT ||
usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT))
pMemoryRequirements->memoryRequirements.size = align_u64(size, 4);
pMemoryRequirements->memoryRequirements.size = align64(size, 4);
pMemoryRequirements->memoryRequirements.memoryTypeBits = memory_types;

View file

@ -126,7 +126,7 @@ image_binding_grow(const struct anv_device *device,
&image->bindings[binding].memory_range;
if (has_implicit_offset) {
offset = align_u64(container->offset + container->size, alignment);
offset = align64(container->offset + container->size, alignment);
} else {
/* Offset must be validated because it comes from
* VkImageDrmFormatModifierExplicitCreateInfoEXT.

View file

@ -298,12 +298,6 @@ align_down_u64(uint64_t v, uint64_t a)
return v & ~(a - 1);
}
static inline uint64_t
align_u64(uint64_t v, uint64_t a)
{
return align_down_u64(v + a - 1, a);
}
static inline int32_t
align_i32(int32_t v, int32_t a)
{
@ -2415,7 +2409,7 @@ anv_gfx8_9_vb_cache_range_needs_workaround(struct anv_vb_cache_range *bound,
/* Align everything to a cache line */
bound->start &= ~(64ull - 1ull);
bound->end = align_u64(bound->end, 64);
bound->end = align64(bound->end, 64);
/* Compute the dirty range */
dirty->start = MIN2(dirty->start, bound->start);