mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-04-27 00:10:40 +02:00
iris/xe: Add VMs to error dump
To ask Xe KMD to dump BO/VMA content to error state it is necessary to set DRM_XE_VM_BIND_FLAG_DUMPABLE during VM bind, so places that were setting 'bo.real.capture = true' after the VM bind were useless for Xe KMD. To fix that BO_ALLOC_CAPTURE was added, alloc_fresh_bo() will set 'bo.real.capture = true' automaticacly for new bos before vm bind. alloc_bo_from_cache() also required to check if capture states matches between asked bo and bo in cache. Only slabs might be left out of error dump if DEBUG_CAPTURE_ALL is not set but that was already happening for i915. Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Signed-off-by: José Roberto de Souza <jose.souza@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/27661>
This commit is contained in:
parent
c26663eede
commit
6f9a5f4811
5 changed files with 24 additions and 13 deletions
|
|
@ -426,8 +426,8 @@ create_batch(struct iris_batch *batch)
|
|||
/* TODO: We probably could suballocate batches... */
|
||||
batch->bo = iris_bo_alloc(bufmgr, "command buffer",
|
||||
BATCH_SZ + BATCH_RESERVED, 8,
|
||||
IRIS_MEMZONE_OTHER, BO_ALLOC_NO_SUBALLOC);
|
||||
iris_get_backing_bo(batch->bo)->real.capture = true;
|
||||
IRIS_MEMZONE_OTHER,
|
||||
BO_ALLOC_NO_SUBALLOC | BO_ALLOC_CAPTURE);
|
||||
batch->map = iris_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
|
||||
batch->map_next = batch->map;
|
||||
|
||||
|
|
|
|||
|
|
@ -1008,6 +1008,9 @@ alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
|
|||
if (match_zone && memzone != iris_memzone_for_address(cur->address))
|
||||
continue;
|
||||
|
||||
if (cur->real.capture != !!(flags & BO_ALLOC_CAPTURE))
|
||||
continue;
|
||||
|
||||
/* If the last BO in the cache is busy, there are no idle BOs. Bail,
|
||||
* either falling back to a non-matching memzone, or if that fails,
|
||||
* allocating a fresh buffer.
|
||||
|
|
@ -1126,6 +1129,7 @@ alloc_fresh_bo(struct iris_bufmgr *bufmgr, uint64_t bo_size, unsigned flags)
|
|||
bo->size = bo_size;
|
||||
bo->idle = true;
|
||||
bo->zeroed = true;
|
||||
bo->real.capture = (flags & BO_ALLOC_CAPTURE) != 0;
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
|
@ -1174,6 +1178,12 @@ iris_bo_alloc(struct iris_bufmgr *bufmgr,
|
|||
if (memzone != IRIS_MEMZONE_OTHER || (flags & BO_ALLOC_COHERENT))
|
||||
flags |= BO_ALLOC_NO_SUBALLOC;
|
||||
|
||||
/* By default, capture all driver-internal buffers like shader kernels,
|
||||
* surface states, dynamic states, border colors, and so on.
|
||||
*/
|
||||
if (memzone < IRIS_MEMZONE_OTHER || INTEL_DEBUG(DEBUG_CAPTURE_ALL))
|
||||
flags |= BO_ALLOC_CAPTURE;
|
||||
|
||||
bo = alloc_bo_from_slabs(bufmgr, name, size, alignment, flags);
|
||||
|
||||
if (bo)
|
||||
|
|
@ -1227,12 +1237,6 @@ iris_bo_alloc(struct iris_bufmgr *bufmgr,
|
|||
bo->index = -1;
|
||||
bo->real.prime_fd = -1;
|
||||
|
||||
/* By default, capture all driver-internal buffers like shader kernels,
|
||||
* surface states, dynamic states, border colors, and so on.
|
||||
*/
|
||||
if (memzone < IRIS_MEMZONE_OTHER || INTEL_DEBUG(DEBUG_CAPTURE_ALL))
|
||||
bo->real.capture = true;
|
||||
|
||||
assert(bo->real.map == NULL || bo->real.mmap_mode == mmap_mode);
|
||||
bo->real.mmap_mode = mmap_mode;
|
||||
|
||||
|
|
@ -2162,7 +2166,7 @@ intel_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
|
|||
unsigned int page_size = getpagesize();
|
||||
size = MAX2(ALIGN(size, page_size), page_size);
|
||||
|
||||
struct iris_bo *bo = alloc_fresh_bo(bufmgr, size, 0);
|
||||
struct iris_bo *bo = alloc_fresh_bo(bufmgr, size, BO_ALLOC_CAPTURE);
|
||||
if (!bo) {
|
||||
free(buf);
|
||||
return NULL;
|
||||
|
|
@ -2182,7 +2186,6 @@ intel_aux_map_buffer_alloc(void *driver_ctx, uint32_t size)
|
|||
bo->name = "aux-map";
|
||||
p_atomic_set(&bo->refcount, 1);
|
||||
bo->index = -1;
|
||||
bo->real.capture = true;
|
||||
bo->real.mmap_mode = heap_to_mmap_mode(bufmgr, bo->real.heap);
|
||||
bo->real.prime_fd = -1;
|
||||
|
||||
|
|
|
|||
|
|
@ -326,7 +326,10 @@ struct iris_bo {
|
|||
/** Boolean of whether this buffer is protected (HW encryption) */
|
||||
bool protected;
|
||||
|
||||
/** Boolean of whether this buffer needs to be captured in error dump */
|
||||
/** Boolean of whether this buffer needs to be captured in error dump.
|
||||
* Xe KMD requires this to be set before vm bind while i915 needs
|
||||
* this set before batch_submit().
|
||||
*/
|
||||
bool capture;
|
||||
} real;
|
||||
struct {
|
||||
|
|
@ -345,6 +348,7 @@ struct iris_bo {
|
|||
#define BO_ALLOC_LMEM (1<<5)
|
||||
#define BO_ALLOC_PROTECTED (1<<6)
|
||||
#define BO_ALLOC_SHARED (1<<7)
|
||||
#define BO_ALLOC_CAPTURE (1<<8)
|
||||
|
||||
/**
|
||||
* Allocate a buffer object.
|
||||
|
|
|
|||
|
|
@ -743,7 +743,6 @@ iris_init_identifier_bo(struct iris_screen *screen)
|
|||
|
||||
assert(iris_bo_is_real(screen->workaround_bo));
|
||||
|
||||
screen->workaround_bo->real.capture = true;
|
||||
screen->workaround_address = (struct iris_address) {
|
||||
.bo = screen->workaround_bo,
|
||||
.offset = ALIGN(
|
||||
|
|
@ -814,7 +813,7 @@ iris_screen_create(int fd, const struct pipe_screen_config *config)
|
|||
|
||||
screen->workaround_bo =
|
||||
iris_bo_alloc(screen->bufmgr, "workaround", 4096, 4096,
|
||||
IRIS_MEMZONE_OTHER, BO_ALLOC_NO_SUBALLOC);
|
||||
IRIS_MEMZONE_OTHER, BO_ALLOC_NO_SUBALLOC | BO_ALLOC_CAPTURE);
|
||||
if (!screen->workaround_bo)
|
||||
return NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -114,6 +114,7 @@ xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op)
|
|||
.flags = DRM_XE_SYNC_FLAG_SIGNAL,
|
||||
};
|
||||
uint64_t range, obj_offset = 0;
|
||||
uint32_t flags = 0;
|
||||
int ret, fd;
|
||||
|
||||
fd = iris_bufmgr_get_fd(bufmgr);
|
||||
|
|
@ -134,6 +135,9 @@ xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op)
|
|||
if (op != DRM_XE_VM_BIND_OP_UNMAP)
|
||||
pat_index = iris_heap_to_pat_entry(devinfo, bo->real.heap)->index;
|
||||
|
||||
if (bo->real.capture)
|
||||
flags |= DRM_XE_VM_BIND_FLAG_DUMPABLE;
|
||||
|
||||
struct drm_xe_vm_bind args = {
|
||||
.vm_id = iris_bufmgr_get_global_vm_id(bufmgr),
|
||||
.num_syncs = 1,
|
||||
|
|
@ -145,6 +149,7 @@ xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op)
|
|||
.bind.addr = intel_48b_address(bo->address),
|
||||
.bind.op = op,
|
||||
.bind.pat_index = pat_index,
|
||||
.bind.flags = flags,
|
||||
};
|
||||
|
||||
xe_sync.timeline_value = intel_bind_timeline_bind_begin(bind_timeline);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue