mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-05 16:08:04 +02:00
nvk: Flush descriptor tables and heap maps on submit
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33959>
This commit is contained in:
parent
4d04baba7d
commit
01e56f408b
6 changed files with 68 additions and 2 deletions
|
|
@ -121,6 +121,7 @@ nvk_descriptor_table_write_locked(struct nvk_descriptor_table *table,
|
|||
|
||||
assert(desc_size == table->desc_size);
|
||||
memcpy(map, desc_data, table->desc_size);
|
||||
nvk_mem_arena_set_map_dirty(&table->arena);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -130,6 +131,7 @@ nvk_descriptor_table_clear_locked(struct nvk_descriptor_table *table,
|
|||
void *map = nvk_descriptor_table_map_locked(table, index);
|
||||
|
||||
memset(map, 0, table->desc_size);
|
||||
nvk_mem_arena_set_map_dirty(&table->arena);
|
||||
}
|
||||
|
||||
static VkResult
|
||||
|
|
|
|||
|
|
@ -55,6 +55,13 @@ void nvk_descriptor_table_remove(struct nvk_device *dev,
|
|||
struct nvk_descriptor_table *table,
|
||||
uint32_t index);
|
||||
|
||||
static void
|
||||
nvk_descriptor_table_flush_map(struct nvk_device *dev,
|
||||
struct nvk_descriptor_table *table)
|
||||
{
|
||||
nvk_mem_arena_flush_map(dev, &table->arena);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
nvk_descriptor_table_base_address(struct nvk_descriptor_table *table)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -35,6 +35,12 @@ VkResult nvk_heap_upload(struct nvk_device *dev, struct nvk_heap *heap,
|
|||
void nvk_heap_free(struct nvk_device *dev, struct nvk_heap *heap,
|
||||
uint64_t addr, uint64_t size);
|
||||
|
||||
static void
|
||||
nvk_heap_flush_maps(struct nvk_device *dev, struct nvk_heap *heap)
|
||||
{
|
||||
nvk_mem_arena_flush_map(dev, &heap->arena);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
nvk_heap_contiguous_base_address(struct nvk_heap *heap)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -158,8 +158,33 @@ nvk_mem_arena_map(const struct nvk_mem_arena *arena,
|
|||
return mem->mem->map + mem_offset_B;
|
||||
}
|
||||
|
||||
static void
|
||||
nvk_mem_arena_flush_map_locked(struct nvk_device *dev,
|
||||
struct nvk_mem_arena *arena)
|
||||
{
|
||||
if (p_atomic_xchg(&arena->map_dirty, 0) == 0)
|
||||
return;
|
||||
|
||||
const uint32_t mem_count = nvk_mem_arena_mem_count(arena);
|
||||
|
||||
for (uint32_t mem_idx = 0; mem_idx < mem_count; mem_idx++) {
|
||||
const struct nvk_arena_mem *mem = &arena->mem[mem_idx];
|
||||
const uint64_t mem_size_B = nvk_mem_arena_mem_size_B(mem_idx);
|
||||
nvkmd_mem_sync_map_to_gpu(mem->mem, 0, mem_size_B);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nvk_mem_arena_copy_to_gpu(const struct nvk_mem_arena *arena,
|
||||
nvk_mem_arena_flush_map(struct nvk_device *dev,
|
||||
struct nvk_mem_arena *arena)
|
||||
{
|
||||
simple_mtx_lock(&arena->mutex);
|
||||
nvk_mem_arena_flush_map_locked(dev, arena);
|
||||
simple_mtx_unlock(&arena->mutex);
|
||||
}
|
||||
|
||||
void
|
||||
nvk_mem_arena_copy_to_gpu(struct nvk_mem_arena *arena,
|
||||
uint64_t dst_addr, const void *src, size_t size_B)
|
||||
{
|
||||
assert(nvk_mem_arena_is_mapped(arena));
|
||||
|
|
@ -182,4 +207,6 @@ nvk_mem_arena_copy_to_gpu(const struct nvk_mem_arena *arena,
|
|||
src += copy_size_B;
|
||||
size_B -= copy_size_B;
|
||||
}
|
||||
|
||||
nvk_mem_arena_set_map_dirty(arena);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -72,6 +72,9 @@ struct nvk_mem_arena {
|
|||
*/
|
||||
uint32_t mem_count;
|
||||
|
||||
/* Non-zero if any maps of this arena are dirty */
|
||||
uint32_t map_dirty;
|
||||
|
||||
struct nvk_arena_mem mem[NVK_MEM_ARENA_MAX_MEM_COUNT];
|
||||
};
|
||||
|
||||
|
|
@ -188,7 +191,23 @@ nvk_contiguous_mem_arena_map_offset(const struct nvk_mem_arena *arena,
|
|||
void *nvk_mem_arena_map(const struct nvk_mem_arena *arena,
|
||||
uint64_t addr, size_t map_range_B);
|
||||
|
||||
void nvk_mem_arena_copy_to_gpu(const struct nvk_mem_arena *arena,
|
||||
/** Mark the arena map dirty
|
||||
*
|
||||
* This should be called after writing data into the arena via a pointer
|
||||
* returned by nvk_mem_arena_map(). It must be called after the write, not
|
||||
* before, to ensure that the next call to nvk_mem_arena_flush_map() will
|
||||
* flush out the new writes.
|
||||
*/
|
||||
static inline void
|
||||
nvk_mem_arena_set_map_dirty(struct nvk_mem_arena *arena)
|
||||
{
|
||||
return p_atomic_set(&arena->map_dirty, 1);
|
||||
}
|
||||
|
||||
void nvk_mem_arena_flush_map(struct nvk_device *dev,
|
||||
struct nvk_mem_arena *arena);
|
||||
|
||||
void nvk_mem_arena_copy_to_gpu(struct nvk_mem_arena *arena,
|
||||
uint64_t dst_addr,
|
||||
const void *src, size_t size_B);
|
||||
|
||||
|
|
|
|||
|
|
@ -213,6 +213,11 @@ nvk_queue_submit_exec(struct nvk_queue *queue,
|
|||
VkResult result;
|
||||
|
||||
if (submit->command_buffer_count > 0) {
|
||||
nvk_descriptor_table_flush_map(dev, &dev->images);
|
||||
nvk_descriptor_table_flush_map(dev, &dev->samplers);
|
||||
nvk_heap_flush_maps(dev, &dev->shader_heap);
|
||||
assert(dev->event_heap.arena.mem_flags & NVKMD_MEM_COHERENT);
|
||||
|
||||
result = nvk_queue_state_update(queue, &queue->state);
|
||||
if (result != VK_SUCCESS)
|
||||
return result;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue