Compatibility code for 2.6.15-2.6.18. It is ugly but a little comfort is that

it will go away in the mainstream kernel.
Some bugfixes, mainly in error paths.
This commit is contained in:
Thomas Hellstrom 2006-10-11 22:21:01 +02:00
parent f2db76e2f2
commit 3070389367
7 changed files with 435 additions and 46 deletions

View file

@ -2624,7 +2624,8 @@ int drmBOCreate(int fd, void *ttm, unsigned long start, unsigned long size,
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
int ret;
memset(buf, 0, sizeof(*buf));
memset(&arg, 0, sizeof(arg));
req->mask = mask;
@ -2650,7 +2651,11 @@ int drmBOCreate(int fd, void *ttm, unsigned long start, unsigned long size,
}
req->op = drm_bo_create;
if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
do {
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
} while (ret != 0 && errno == EAGAIN);
if (ret)
return -errno;
if (!arg.handled) {
return -EFAULT;

View file

@ -67,14 +67,23 @@ static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict)
{
drm_device_t *dev = buf->dev;
drm_buffer_manager_t *bm = &dev->bm;
int ret;
BUG_ON(!buf->tt);
mutex_lock(&dev->struct_mutex);
if (evict)
drm_evict_ttm(buf->ttm);
ret = drm_evict_ttm(buf->ttm);
else
drm_unbind_ttm(buf->ttm);
ret = drm_unbind_ttm(buf->ttm);
if (ret) {
mutex_unlock(&dev->struct_mutex);
if (ret == -EAGAIN)
schedule();
return ret;
}
drm_mm_put_block(&bm->tt_manager, buf->tt);
buf->tt = NULL;
@ -126,13 +135,31 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
list_del_init(&bo->tt_lru);
list_del_init(&bo->vram_lru);
if (bo->tt) {
if (bo->ttm) {
unsigned long _end = jiffies + DRM_HZ;
int ret;
/*
* This temporarily unlocks struct_mutex.
*/
do {
ret = drm_unbind_ttm(bo->ttm);
if (ret == -EAGAIN) {
mutex_unlock(&dev->struct_mutex);
schedule();
mutex_lock(&dev->struct_mutex);
}
} while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
if (ret) {
DRM_ERROR("Couldn't unbind buffer. "
"Bad. Continuing anyway\n");
}
}
if (bo->tt) {
drm_unbind_ttm(bo->ttm);
drm_mm_put_block(&bm->tt_manager, bo->tt);
bo->tt = NULL;
}
@ -435,6 +462,9 @@ static int drm_bo_evict(drm_buffer_object_t * bo, int tt, int no_wait)
ret = drm_move_vram_to_local(bo);
}
#endif
if (ret)
goto out;
mutex_lock(&dev->struct_mutex);
list_del_init((tt) ? &bo->tt_lru : &bo->vram_lru);
if (list_empty((tt) ? &bo->vram_lru : &bo->tt_lru))
@ -442,7 +472,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, int tt, int no_wait)
mutex_unlock(&dev->struct_mutex);
DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
_DRM_BO_FLAG_EVICTED);
out:
out:
return ret;
}
@ -521,14 +551,18 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
if (ret)
return ret;
DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->tt->start);
mutex_lock(&dev->struct_mutex);
ret = drm_bind_ttm(bo->ttm, bo->tt->start);
if (ret) {
drm_mm_put_block(&bm->tt_manager, bo->tt);
bo->tt = NULL;
}
mutex_unlock(&dev->struct_mutex);
if (ret)
if (ret) {
return ret;
}
be = bo->ttm->be;
if (be->needs_cache_adjust(be))
@ -1296,6 +1330,7 @@ int drm_buffer_object_create(drm_file_t * priv,
}
bo->priv_flags = 0;
bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
atomic_inc(&bm->count);
ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
1, &new_flags, &bo->mask);
if (ret)
@ -1311,12 +1346,11 @@ int drm_buffer_object_create(drm_file_t * priv,
mutex_unlock(&bo->mutex);
*buf_obj = bo;
atomic_inc(&bm->count);
return 0;
out_err:
out_err:
mutex_unlock(&bo->mutex);
drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
drm_bo_usage_deref_unlocked(dev, bo);
return ret;
}

View file

@ -183,3 +183,239 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
}
#endif
#ifdef DRM_ODD_MM_COMPAT
typedef struct p_mm_entry {
struct list_head head;
struct mm_struct *mm;
atomic_t refcount;
int locked;
} p_mm_entry_t;
typedef struct vma_entry {
struct list_head head;
struct vm_area_struct *vma;
} vma_entry_t;
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
drm_buffer_manager_t *bm;
drm_device_t *dev;
/*
* FIXME: Check can't map aperture flag.
*/
if (type)
*type = VM_FAULT_MINOR;
if (!map)
return NOPAGE_OOM;
if (address > vma->vm_end)
return NOPAGE_SIGBUS;
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
mutex_lock(&dev->struct_mutex);
drm_fixup_ttm_caching(ttm);
BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
bm = &dev->bm;
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
if (!page) {
if (bm->cur_pages >= bm->max_pages) {
DRM_ERROR("Maximum locked page count exceeded\n");
page = NOPAGE_OOM;
goto out;
}
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
if (!page) {
page = NOPAGE_OOM;
goto out;
}
++bm->cur_pages;
SetPageLocked(page);
}
get_page(page);
out:
mutex_unlock(&dev->struct_mutex);
return page;
}
int drm_ttm_map_bound(struct vm_area_struct *vma)
{
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
int ret = 0;
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
unsigned long pfn = ttm->aper_offset +
(ttm->be->aperture_base >> PAGE_SHIFT);
pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
vma->vm_end - vma->vm_start,
pgprot);
}
return ret;
}
int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n_entry;
vma_entry_t *v_entry;
drm_local_map_t *map = (drm_local_map_t *)
vma->vm_private_data;
struct mm_struct *mm = vma->vm_mm;
v_entry = drm_alloc(sizeof(*v_entry), DRM_MEM_TTM);
if (!v_entry) {
DRM_ERROR("Allocation of vma pointer entry failed\n");
return -ENOMEM;
}
v_entry->vma = vma;
map->handle = (void *) v_entry;
list_add_tail(&v_entry->head, &ttm->vma_list);
list_for_each_entry(entry, &ttm->p_mm_list, head) {
if (mm == entry->mm) {
atomic_inc(&entry->refcount);
return 0;
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
}
n_entry = drm_alloc(sizeof(*n_entry), DRM_MEM_TTM);
if (!n_entry) {
DRM_ERROR("Allocation of process mm pointer entry failed\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&n_entry->head);
n_entry->mm = mm;
n_entry->locked = 0;
atomic_set(&n_entry->refcount, 0);
list_add_tail(&n_entry->head, &entry->head);
return 0;
}
void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n;
vma_entry_t *v_entry, *v_n;
int found = 0;
struct mm_struct *mm = vma->vm_mm;
list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
if (v_entry->vma == vma) {
found = 1;
list_del(&v_entry->head);
drm_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
break;
}
}
BUG_ON(!found);
list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
if (mm == entry->mm) {
if (atomic_add_negative(-1, &entry->refcount)) {
list_del(&entry->head);
BUG_ON(entry->locked);
drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
}
return;
}
}
BUG_ON(1);
}
int drm_ttm_lock_mm(drm_ttm_t * ttm)
{
p_mm_entry_t *entry;
int lock_ok = 1;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
BUG_ON(entry->locked);
if (!down_write_trylock(&entry->mm->mmap_sem)) {
lock_ok = 0;
break;
}
entry->locked = 1;
}
if (lock_ok)
return 0;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
if (!entry->locked)
break;
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
}
/*
* Possible deadlock. Try again. Our callers should handle this
* and restart.
*/
return -EAGAIN;
}
void drm_ttm_unlock_mm(drm_ttm_t * ttm)
{
p_mm_entry_t *entry;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
BUG_ON(!entry->locked);
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
}
}
int drm_ttm_remap_bound(drm_ttm_t *ttm)
{
vma_entry_t *v_entry;
int ret = 0;
list_for_each_entry(v_entry, &ttm->vma_list, head) {
ret = drm_ttm_map_bound(v_entry->vma);
if (ret)
break;
}
drm_ttm_unlock_mm(ttm);
return ret;
}
void drm_ttm_finish_unmap(drm_ttm_t *ttm)
{
vma_entry_t *v_entry;
if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
return;
list_for_each_entry(v_entry, &ttm->vma_list, head) {
v_entry->vma->vm_flags &= ~VM_PFNMAP;
}
drm_ttm_unlock_mm(ttm);
}
#endif

View file

@ -231,6 +231,13 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from
#include <linux/mm.h>
#include <asm/page.h>
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
#define DRM_ODD_MM_COMPAT
#endif
/*
* Flush relevant caches and clear a VMA structure so that page references
* will cause a page fault. Don't flush tlbs.
@ -303,5 +310,66 @@ extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
struct fault_data *data);
#endif
#ifdef DRM_ODD_MM_COMPAT
struct drm_ttm;
/*
* Add a vma to the ttm vma list, and the
* process mm pointer to the ttm mm list. Needs the ttm mutex.
*/
extern int drm_ttm_add_vma(struct drm_ttm * ttm,
struct vm_area_struct *vma);
/*
* Delete a vma and the corresponding mm pointer from the
* ttm lists. Needs the ttm mutex.
*/
extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
struct vm_area_struct *vma);
/*
* Attempts to lock all relevant mmap_sems for a ttm, while
* not releasing the ttm mutex. May return -EAGAIN to avoid
* deadlocks. In that case the caller shall release the ttm mutex,
* schedule() and try again.
*/
extern int drm_ttm_lock_mm(struct drm_ttm * ttm);
/*
* Unlock all relevant mmap_sems for a ttm.
*/
extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
/*
* If the ttm was bound to the aperture, this function shall be called
* with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
* vmas mapping this ttm. This is needed just after unmapping the ptes of
* the vma, otherwise the do_nopage() function will bug :(. The function
* releases the mmap_sems for this ttm.
*/
extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
/*
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
* fault these pfns in, because the first one will set the vma VM_PFNMAP
* flag, which will make the next fault bug in do_nopage(). The function
* releases the mmap_sems for this ttm.
*/
extern int drm_ttm_remap_bound(struct drm_ttm *ttm);
/*
* Remap a vma for a bound ttm. Call with the ttm mutex held and
* the relevant mmap_sem locked.
*/
extern int drm_ttm_map_bound(struct vm_area_struct *vma);
#endif
#endif

View file

@ -66,8 +66,17 @@ static int unmap_vma_pages(drm_ttm_t * ttm)
drm_device_t *dev = ttm->dev;
loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
#ifdef DRM_ODD_MM_COMPAT
int ret;
ret = drm_ttm_lock_mm(ttm);
if (ret)
return ret;
#endif
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_finish_unmap(ttm);
#endif
return 0;
}
@ -128,8 +137,11 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
DRM_DEBUG("Destroying a ttm\n");
#ifdef DRM_TTM_ODD_COMPAT
BUG_ON(!list_empty(&ttm->vma_list));
BUG_ON(!list_empty(&ttm->p_mm_list));
#endif
be = ttm->be;
if (be) {
be->destroy(be);
ttm->be = NULL;
@ -231,6 +243,11 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size,
if (!ttm)
return NULL;
#ifdef DRM_ODD_MM_COMPAT
INIT_LIST_HEAD(&ttm->p_mm_list);
INIT_LIST_HEAD(&ttm->vma_list);
#endif
ttm->dev = dev;
atomic_set(&ttm->vma_count, 0);
@ -263,11 +280,15 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size,
int drm_evict_ttm(drm_ttm_t * ttm)
{
drm_ttm_backend_t *be = ttm->be;
int ret;
switch (ttm->state) {
case ttm_bound:
if (be->needs_cache_adjust(be)) {
unmap_vma_pages(ttm);
ret = unmap_vma_pages(ttm);
if (ret) {
return ret;
}
}
be->unbind(be);
break;
@ -291,12 +312,18 @@ void drm_fixup_ttm_caching(drm_ttm_t * ttm)
}
void drm_unbind_ttm(drm_ttm_t * ttm)
int drm_unbind_ttm(drm_ttm_t * ttm)
{
int ret = 0;
if (ttm->state == ttm_bound)
drm_evict_ttm(ttm);
ret = drm_evict_ttm(ttm);
if (ret)
return ret;
drm_fixup_ttm_caching(ttm);
return 0;
}
int drm_bind_ttm(drm_ttm_t * ttm,
@ -313,20 +340,45 @@ int drm_bind_ttm(drm_ttm_t * ttm,
be = ttm->be;
drm_ttm_populate(ttm);
ret = drm_ttm_populate(ttm);
if (ret)
return ret;
if (ttm->state == ttm_unbound && be->needs_cache_adjust(be)) {
unmap_vma_pages(ttm);
ret = unmap_vma_pages(ttm);
if (ret)
return ret;
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
}
}
#ifdef DRM_ODD_MM_COMPAT
else if (ttm->state == ttm_evicted && be->needs_cache_adjust(be)) {
ret = drm_ttm_lock_mm(ttm);
if (ret)
return ret;
}
#endif
if ((ret = be->bind(be, aper_offset))) {
drm_unbind_ttm(ttm);
ttm->state = ttm_evicted;
#ifdef DRM_ODD_MM_COMPAT
if (be->needs_cache_adjust(be))
drm_ttm_unlock_mm(ttm);
#endif
DRM_ERROR("Couldn't bind backend.\n");
return ret;
}
ttm->aper_offset = aper_offset;
ttm->state = ttm_bound;
#ifdef DRM_ODD_MM_COMPAT
if (be->needs_cache_adjust(be)) {
ret = drm_ttm_remap_bound(ttm);
if (ret)
return ret;
}
#endif
return 0;
}

View file

@ -74,6 +74,11 @@ typedef struct drm_ttm {
ttm_unbound,
ttm_unpopulated,
} state;
#ifdef DRM_ODD_MM_COMPAT
struct list_head vma_list;
struct list_head p_mm_list;
#endif
} drm_ttm_t;
typedef struct drm_ttm_object {
@ -95,7 +100,7 @@ extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
extern int drm_bind_ttm(drm_ttm_t * ttm,
unsigned long aper_offset);
extern void drm_unbind_ttm(drm_ttm_t * ttm);
extern int drm_unbind_ttm(drm_ttm_t * ttm);
/*
* Evict a ttm region. Keeps Aperture caching policy.

View file

@ -204,15 +204,15 @@ struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
if (!page) {
if (bm->cur_pages >= bm->max_pages) {
DRM_ERROR("Maximum locked page count exceeded\n");
page = NOPAGE_OOM;
data->type = VM_FAULT_OOM;
goto out;
}
++bm->cur_pages;
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
if (!page) {
data->type = VM_FAULT_OOM;
goto out;
}
++bm->cur_pages;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
@ -236,28 +236,6 @@ struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
err = vm_insert_pfn(vma, address, pfn, pgprot);
if (!err && (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) &&
ttm->num_pages > 1) {
/*
* FIXME: Check can't map aperture flag.
*/
/*
* Since we're not racing with anybody else,
* we might as well populate the whole object space.
* Note that we're touching vma->vm_flags with this
* operation, but we are not changing them, so we should be
* OK.
*/
BUG_ON(ttm->state == ttm_unpopulated);
err = io_remap_pfn_range(vma, address + PAGE_SIZE, pfn+1,
(ttm->num_pages - 1) * PAGE_SIZE,
pgprot);
}
if (!err || err == -EBUSY)
data->type = VM_FAULT_MINOR;
else
@ -611,6 +589,9 @@ static int drm_vm_ttm_open(struct vm_area_struct *vma) {
mutex_lock(&dev->struct_mutex);
ttm = (drm_ttm_t *) map->offset;
atomic_inc(&ttm->vma_count);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_add_vma(ttm, vma);
#endif
mutex_unlock(&dev->struct_mutex);
return 0;
}
@ -666,6 +647,9 @@ static void drm_vm_ttm_close(struct vm_area_struct *vma)
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
mutex_lock(&dev->struct_mutex);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_delete_vma(ttm, vma);
#endif
if (atomic_dec_and_test(&ttm->vma_count)) {
if (ttm->destroy) {
ret = drm_destroy_ttm(ttm);
@ -877,6 +861,11 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = (void *) map;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
#ifdef DRM_ODD_MM_COMPAT
mutex_lock(&dev->struct_mutex);
drm_ttm_map_bound(vma);
mutex_unlock(&dev->struct_mutex);
#endif
if (drm_vm_ttm_open(vma))
return -EAGAIN;
return 0;