Revert "amdgpu: clean up non list code path for vamgr"

This reverts commit 41b94a3fb6.

It caused crashes with radeonsi in at least glxgears and Xorg.
This commit is contained in:
Michel Dänzer 2018-02-08 09:50:53 +01:00 committed by Michel Dänzer
parent 09642c073e
commit fa35b51f63
2 changed files with 80 additions and 43 deletions

View file

@ -53,6 +53,8 @@ struct amdgpu_bo_va_hole {
}; };
struct amdgpu_bo_va_mgr { struct amdgpu_bo_va_mgr {
/* the start virtual address */
uint64_t va_offset;
uint64_t va_max; uint64_t va_max;
struct list_head va_holes; struct list_head va_holes;
pthread_mutex_t bo_va_mutex; pthread_mutex_t bo_va_mutex;

View file

@ -48,19 +48,12 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
uint64_t max, uint64_t alignment) uint64_t max, uint64_t alignment)
{ {
struct amdgpu_bo_va_hole *n; mgr->va_offset = start;
mgr->va_max = max; mgr->va_max = max;
mgr->va_alignment = alignment; mgr->va_alignment = alignment;
list_inithead(&mgr->va_holes); list_inithead(&mgr->va_holes);
pthread_mutex_init(&mgr->bo_va_mutex, NULL); pthread_mutex_init(&mgr->bo_va_mutex, NULL);
pthread_mutex_lock(&mgr->bo_va_mutex);
n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
n->size = mgr->va_max;
n->offset = start;
list_add(&n->list, &mgr->va_holes);
pthread_mutex_unlock(&mgr->bo_va_mutex);
} }
drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr) drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
@ -129,14 +122,41 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
} }
} }
if (base_required) {
if (base_required < mgr->va_offset) {
pthread_mutex_unlock(&mgr->bo_va_mutex); pthread_mutex_unlock(&mgr->bo_va_mutex);
return AMDGPU_INVALID_VA_ADDRESS; return AMDGPU_INVALID_VA_ADDRESS;
} }
offset = mgr->va_offset;
waste = base_required - mgr->va_offset;
} else {
offset = mgr->va_offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
}
if (offset + waste + size > mgr->va_max) {
pthread_mutex_unlock(&mgr->bo_va_mutex);
return AMDGPU_INVALID_VA_ADDRESS;
}
if (waste) {
n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
n->size = waste;
n->offset = offset;
list_add(&n->list, &mgr->va_holes);
}
offset += waste;
mgr->va_offset += size + waste;
pthread_mutex_unlock(&mgr->bo_va_mutex);
return offset;
}
static drm_private void static drm_private void
amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size) amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
{ {
struct amdgpu_bo_va_hole *hole, *next; struct amdgpu_bo_va_hole *hole;
if (va == AMDGPU_INVALID_VA_ADDRESS) if (va == AMDGPU_INVALID_VA_ADDRESS)
return; return;
@ -144,6 +164,20 @@ amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
size = ALIGN(size, mgr->va_alignment); size = ALIGN(size, mgr->va_alignment);
pthread_mutex_lock(&mgr->bo_va_mutex); pthread_mutex_lock(&mgr->bo_va_mutex);
if ((va + size) == mgr->va_offset) {
mgr->va_offset = va;
/* Delete uppermost hole if it reaches the new top */
if (!LIST_IS_EMPTY(&mgr->va_holes)) {
hole = container_of(mgr->va_holes.next, hole, list);
if ((hole->offset + hole->size) == va) {
mgr->va_offset = hole->offset;
list_del(&hole->list);
free(hole);
}
}
} else {
struct amdgpu_bo_va_hole *next;
hole = container_of(&mgr->va_holes, hole, list); hole = container_of(&mgr->va_holes, hole, list);
LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) { LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
if (next->offset < va) if (next->offset < va)
@ -164,6 +198,7 @@ amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
list_del(&hole->list); list_del(&hole->list);
free(hole); free(hole);
} }
goto out;
} }
} }
@ -183,7 +218,7 @@ amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
next->offset = va; next->offset = va;
list_add(&next->list, &hole->list); list_add(&next->list, &hole->list);
} }
}
out: out:
pthread_mutex_unlock(&mgr->bo_va_mutex); pthread_mutex_unlock(&mgr->bo_va_mutex);
} }