Rework buffer object vm code to use nopfn() for kernels >= 2.6.19.

This commit is contained in:
Thomas Hellstrom 2007-02-14 10:49:37 +01:00
parent 9efdae317c
commit 7bcb62b45d
3 changed files with 145 additions and 67 deletions

View file

@ -79,54 +79,14 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
/*
* vm code for kernels below 2,6,15 in which version a major vm write
* vm code for kernels below 2.6.15 in which version a major vm write
* occured. This implement a simple straightforward
* version similar to what's going to be
* in kernel 2.6.20+?
* in kernel 2.6.19+
* Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
* nopfn.
*/
static int drm_pte_is_clear(struct vm_area_struct *vma,
unsigned long addr)
{
struct mm_struct *mm = vma->vm_mm;
int ret = 1;
pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
goto unlock;
pud = pud_offset(pgd, addr);
if (pud_none(*pud))
goto unlock;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
goto unlock;
pte = pte_offset_map(pmd, addr);
if (!pte)
goto unlock;
ret = pte_none(*pte);
pte_unmap(pte);
unlock:
spin_unlock(&mm->page_table_lock);
return ret;
}
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot)
{
int ret;
if (!drm_pte_is_clear(vma, addr))
return -EBUSY;
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
return ret;
}
static struct {
spinlock_t lock;
struct page *dummy_page;
@ -186,10 +146,85 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
#endif
#if !defined(DRM_FULL_MM_COMPAT) && \
((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
static int drm_pte_is_clear(struct vm_area_struct *vma,
unsigned long addr)
{
struct mm_struct *mm = vma->vm_mm;
int ret = 1;
pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
goto unlock;
pud = pud_offset(pgd, addr);
if (pud_none(*pud))
goto unlock;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
goto unlock;
pte = pte_offset_map(pmd, addr);
if (!pte)
goto unlock;
ret = pte_none(*pte);
pte_unmap(pte);
unlock:
spin_unlock(&mm->page_table_lock);
return ret;
}
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
int ret;
if (!drm_pte_is_clear(vma, addr))
return -EBUSY;
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
return ret;
}
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) && !defined(DRM_FULL_MM_COMPAT))
/**
* While waiting for the fault() handler to appear in
* we accomplish approximately
* the same wrapping it with nopfn.
*/
unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
unsigned long address)
{
struct fault_data data;
data.address = address;
(void) drm_bo_vm_fault(vma, &data);
if (data.type == VM_FAULT_OOM)
return NOPFN_OOM;
else if (data.type == VM_FAULT_SIGBUS)
return NOPFN_SIGBUS;
/*
* pfn already set.
*/
return 0;
}
#endif
#ifdef DRM_ODD_MM_COMPAT
/*
* VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
* VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
* workaround for a single BUG statement in do_no_page in these versions. The
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to

View file

@ -158,11 +158,14 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
#include <linux/mm.h>
#include <asm/page.h>
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
#define DRM_ODD_MM_COMPAT
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
#define DRM_FULL_MM_COMPAT
#endif
/*
@ -200,18 +203,23 @@ extern int drm_map_page_into_agp(struct page *page);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
extern struct page *get_nopage_retry(void);
extern void free_nopage_retry(void);
struct fault_data;
extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data);
#define NOPAGE_REFAULT get_nopage_retry()
#endif
#if !defined(DRM_FULL_MM_COMPAT) && \
((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
struct fault_data;
extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data);
#endif
#ifndef DRM_FULL_MM_COMPAT
/*
* Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
* Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
* For now, just return a dummy page that we've allocated out of
* static space. The page will be put by do_nopage() since we've already
* filled out the pte.
@ -228,13 +236,17 @@ struct fault_data {
extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
unsigned long pfn);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type);
#endif
#else
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
unsigned long address);
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
#endif /* ndef DRM_FULL_MM_COMPAT */
#ifdef DRM_ODD_MM_COMPAT

View file

@ -720,11 +720,20 @@ EXPORT_SYMBOL(drm_mmap);
* \param vma Virtual memory area.
* \param data Fault data on failure or refault.
* \return Always NULL as we insert pfns directly.
*
* It's important that pfns are inserted while holding the bo->mutex lock.
* otherwise we might race with unmap_mapping_range() which is always
* called with the bo->mutex lock held.
*
* It's not pretty to modify the vma->vm_page_prot variable while not
* holding the mm semaphore in write mode. However, we have it i read mode,
* so we won't be racing with any other writers, and we only actually modify
* it when no ptes are present so it shouldn't be a big deal.
*/
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) || \
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
#ifdef DRM_FULL_MM_COMPAT
static
#endif
struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
@ -738,7 +747,6 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
drm_device_t *dev;
unsigned long pfn;
int err;
pgprot_t pgprot;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
@ -759,14 +767,12 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
* move it to a mappable.
*/
#ifdef DRM_BO_FULL_COMPAT
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
uint32_t mask_save = bo->mem.mask;
uint32_t new_mask = bo->mem.mask |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
bo->mem.mask = mask_save;
if (err) {
data->type = (err == -EAGAIN) ?
@ -774,6 +780,24 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
goto out_unlock;
}
}
#else
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
unsigned long _end = jiffies + 3*DRM_HZ;
uint32_t new_mask = bo->mem.mask |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
do {
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
} while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
if (err) {
DRM_ERROR("Timeout moving buffer to mappable location.\n");
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
}
#endif
if (address > vma->vm_end) {
data->type = VM_FAULT_SIGBUS;
@ -793,7 +817,7 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
if (bus_size) {
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
pgprot = drm_io_prot(_DRM_AGP, vma);
vma->vm_page_prot = drm_io_prot(_DRM_AGP, vma);
} else {
ttm = bo->ttm;
@ -804,10 +828,10 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
goto out_unlock;
}
pfn = page_to_pfn(page);
pgprot = vma->vm_page_prot;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
err = vm_insert_pfn(vma, address, pfn, pgprot);
err = vm_insert_pfn(vma, address, pfn);
if (!err || err == -EBUSY)
data->type = VM_FAULT_MINOR;
@ -870,10 +894,14 @@ static void drm_bo_vm_close(struct vm_area_struct *vma)
}
static struct vm_operations_struct drm_bo_vm_ops = {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
.nopage = drm_bo_vm_nopage,
#else
#ifdef DRM_FULL_MM_COMPAT
.fault = drm_bo_vm_fault,
#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
.nopfn = drm_bo_vm_nopfn,
#else
.nopage = drm_bo_vm_nopage,
#endif
#endif
.open = drm_bo_vm_open,
.close = drm_bo_vm_close,
@ -896,6 +924,9 @@ int drm_bo_mmap_locked(struct vm_area_struct *vma,
vma->vm_private_data = map->handle;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
vma->vm_flags |= VM_PFNMAP;
#endif
drm_bo_vm_open_locked(vma);
#ifdef DRM_ODD_MM_COMPAT
drm_bo_map_bound(vma);