Bring in stripped TTM functionality.

This commit is contained in:
Thomas Hellstrom 2006-08-22 09:47:33 +02:00
parent 8d5b7c77f9
commit 700bf80ca9
7 changed files with 1262 additions and 21 deletions

View file

@ -13,7 +13,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
drm_fence.o
drm_fence.o drm_ttm.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o

View file

@ -586,6 +586,18 @@ typedef struct drm_mm {
drm_mm_node_t root_node;
} drm_mm_t;
#include "drm_ttm.h"
/*
* buffer object driver
*/
typedef struct drm_bo_driver{
int cached_pages;
drm_ttm_backend_t *(*create_ttm_backend_entry)
(struct drm_device *dev, int cached);
} drm_bo_driver_t;
/**
* DRM driver structure. This structure represent the common code for
@ -639,6 +651,7 @@ struct drm_driver {
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
int major;
int minor;
@ -979,6 +992,7 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern unsigned long drm_core_get_map_ofs(drm_map_t * map);
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);
/* Memory management support (drm_memory.h) */
#include "drm_memory.h"

View file

@ -420,6 +420,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
case _DRM_TTM:
BUG_ON(1);
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);

813
linux-core/drm_ttm.c Normal file
View file

@ -0,0 +1,813 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Steamboat Springs, CO.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
#include "drmP.h"
#include <asm/tlbflush.h>
typedef struct p_mm_entry {
struct list_head head;
struct mm_struct *mm;
atomic_t refcount;
} p_mm_entry_t;
typedef struct drm_val_action {
int needs_rx_flush;
int evicted_tt;
int evicted_vram;
int validated;
} drm_val_action_t;
/*
* We may be manipulating other processes page tables, so for each TTM, keep track of
* which mm_structs are currently mapping the ttm so that we can take the appropriate
* locks when we modify their page tables. A typical application is when we evict another
* process' buffers.
*/
int drm_ttm_add_mm_to_list(drm_ttm_t * ttm, struct mm_struct *mm)
{
p_mm_entry_t *entry, *n_entry;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
if (mm == entry->mm) {
atomic_inc(&entry->refcount);
return 0;
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
}
n_entry = drm_alloc(sizeof(*n_entry), DRM_MEM_MM);
if (!entry) {
DRM_ERROR("Allocation of process mm pointer entry failed\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&n_entry->head);
n_entry->mm = mm;
atomic_set(&n_entry->refcount, 0);
atomic_inc(&ttm->shared_count);
ttm->mm_list_seq++;
list_add_tail(&n_entry->head, &entry->head);
return 0;
}
void drm_ttm_delete_mm(drm_ttm_t * ttm, struct mm_struct *mm)
{
p_mm_entry_t *entry, *n;
list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
if (mm == entry->mm) {
if (atomic_add_negative(-1, &entry->refcount)) {
list_del(&entry->head);
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
atomic_dec(&ttm->shared_count);
ttm->mm_list_seq++;
}
return;
}
}
BUG_ON(TRUE);
}
static void drm_ttm_lock_mm(drm_ttm_t * ttm, int mm_sem, int page_table)
{
p_mm_entry_t *entry;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
if (mm_sem) {
down_write(&entry->mm->mmap_sem);
}
if (page_table) {
spin_lock(&entry->mm->page_table_lock);
}
}
}
static void drm_ttm_unlock_mm(drm_ttm_t * ttm, int mm_sem, int page_table)
{
p_mm_entry_t *entry;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
if (page_table) {
spin_unlock(&entry->mm->page_table_lock);
}
if (mm_sem) {
up_write(&entry->mm->mmap_sem);
}
}
}
static int ioremap_vmas(drm_ttm_t * ttm, unsigned long page_offset,
unsigned long num_pages, unsigned long aper_offset)
{
struct list_head *list;
int ret = 0;
list_for_each(list, &ttm->vma_list->head) {
drm_ttm_vma_list_t *entry =
list_entry(list, drm_ttm_vma_list_t, head);
ret = io_remap_pfn_range(entry->vma,
entry->vma->vm_start +
(page_offset << PAGE_SHIFT),
(ttm->aperture_base >> PAGE_SHIFT) +
aper_offset, num_pages << PAGE_SHIFT,
drm_io_prot(_DRM_AGP, entry->vma));
if (ret)
break;
}
global_flush_tlb();
return ret;
}
/*
* Unmap all vma pages from vmas mapping this ttm.
*/
static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
unsigned long num_pages)
{
struct list_head *list;
struct page **first_page = ttm->pages + page_offset;
struct page **last_page = ttm->pages + (page_offset + num_pages);
struct page **cur_page;
list_for_each(list, &ttm->vma_list->head) {
drm_ttm_vma_list_t *entry =
list_entry(list, drm_ttm_vma_list_t, head);
drm_clear_vma(entry->vma,
entry->vma->vm_start +
(page_offset << PAGE_SHIFT),
entry->vma->vm_start +
((page_offset + num_pages) << PAGE_SHIFT));
}
for (cur_page = first_page; cur_page != last_page; ++cur_page) {
if (page_mapcount(*cur_page) != 0) {
DRM_ERROR("Mapped page detected. Map count is %d\n",
page_mapcount(*cur_page));
return -1;
}
}
return 0;
}
/*
* Free all resources associated with a ttm.
*/
int drm_destroy_ttm(drm_ttm_t * ttm)
{
int i;
struct list_head *list, *next;
struct page **cur_page;
if (!ttm)
return 0;
if (atomic_read(&ttm->vma_count) > 0) {
DRM_DEBUG("VMAs are still alive. Skipping destruction.\n");
return -EBUSY;
} else {
DRM_DEBUG("Checking for busy regions.\n");
}
if (ttm->be_list) {
list_for_each_safe(list, next, &ttm->be_list->head) {
drm_ttm_backend_list_t *entry =
list_entry(list, drm_ttm_backend_list_t, head);
#ifdef REMOVED
drm_ht_remove_item(&ttm->dev->ttmreghash,
&entry->hash);
#endif
drm_destroy_ttm_region(entry);
}
drm_free(ttm->be_list, sizeof(*ttm->be_list), DRM_MEM_MAPS);
ttm->be_list = NULL;
}
if (atomic_read(&ttm->unfinished_regions) > 0) {
DRM_DEBUG("Regions are still busy. Skipping destruction.\n");
ttm->destroy = TRUE;
return -EAGAIN;
} else {
DRM_DEBUG("About to really destroy ttm.\n");
}
if (ttm->pages) {
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages + i;
if (ttm->page_flags &&
(ttm->page_flags[i] & DRM_TTM_PAGE_UNCACHED) &&
*cur_page && !PageHighMem(*cur_page)) {
change_page_attr(*cur_page, 1, PAGE_KERNEL);
}
if (*cur_page) {
ClearPageReserved(*cur_page);
__free_page(*cur_page);
}
}
global_flush_tlb();
vfree(ttm->pages);
ttm->pages = NULL;
}
if (ttm->page_flags) {
vfree(ttm->page_flags);
ttm->page_flags = NULL;
}
if (ttm->vma_list) {
list_for_each_safe(list, next, &ttm->vma_list->head) {
drm_ttm_vma_list_t *entry =
list_entry(list, drm_ttm_vma_list_t, head);
list_del(list);
entry->vma->vm_private_data = NULL;
drm_free(entry, sizeof(*entry), DRM_MEM_MAPS);
}
drm_free(ttm->vma_list, sizeof(*ttm->vma_list), DRM_MEM_MAPS);
ttm->vma_list = NULL;
}
drm_free(ttm, sizeof(*ttm), DRM_MEM_MAPS);
return 0;
}
/*
* Initialize a ttm.
* FIXME: Avoid using vmalloc for the page- and page_flags tables?
*/
drm_ttm_t *drm_init_ttm(struct drm_device * dev, unsigned long size)
{
drm_ttm_t *ttm;
if (!dev->driver->bo_driver)
return NULL;
ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_MAPS);
if (!ttm)
return NULL;
ttm->lhandle = 0;
atomic_set(&ttm->vma_count, 0);
atomic_set(&ttm->unfinished_regions, 0);
ttm->destroy = FALSE;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->page_flags = vmalloc(ttm->num_pages * sizeof(*ttm->page_flags));
if (!ttm->page_flags) {
drm_destroy_ttm(ttm);
DRM_ERROR("Failed allocating page_flags table\n");
return NULL;
}
memset(ttm->page_flags, 0, ttm->num_pages * sizeof(*ttm->page_flags));
ttm->pages = vmalloc(ttm->num_pages * sizeof(*ttm->pages));
if (!ttm->pages) {
drm_destroy_ttm(ttm);
DRM_ERROR("Failed allocating page table\n");
return NULL;
}
memset(ttm->pages, 0, ttm->num_pages * sizeof(*ttm->pages));
ttm->be_list = drm_calloc(1, sizeof(*ttm->be_list), DRM_MEM_MAPS);
if (!ttm->be_list) {
DRM_ERROR("Alloc be regions failed\n");
drm_destroy_ttm(ttm);
return NULL;
}
INIT_LIST_HEAD(&ttm->be_list->head);
INIT_LIST_HEAD(&ttm->p_mm_list);
atomic_set(&ttm->shared_count, 0);
ttm->mm_list_seq = 0;
ttm->vma_list = drm_calloc(1, sizeof(*ttm->vma_list), DRM_MEM_MAPS);
if (!ttm->vma_list) {
DRM_ERROR("Alloc vma list failed\n");
drm_destroy_ttm(ttm);
return NULL;
}
INIT_LIST_HEAD(&ttm->vma_list->head);
ttm->lhandle = (unsigned long)ttm;
ttm->dev = dev;
return ttm;
}
/*
* Lock the mmap_sems for processes that are mapping this ttm.
* This looks a bit clumsy, since we need to maintain the correct
* locking order
* mm->mmap_sem
* dev->struct_sem;
* and while we release dev->struct_sem to lock the mmap_sems,
* the mmap_sem list may have been updated. We need to revalidate
* it after relocking dev->struc_sem.
*/
static int drm_ttm_lock_mmap_sem(drm_ttm_t * ttm)
{
struct mm_struct **mm_list = NULL, **mm_list_p;
uint32_t list_seq;
uint32_t cur_count, shared_count;
p_mm_entry_t *entry;
unsigned i;
cur_count = 0;
list_seq = ttm->mm_list_seq;
shared_count = atomic_read(&ttm->shared_count);
do {
if (shared_count > cur_count) {
if (mm_list)
drm_free(mm_list, sizeof(*mm_list) * cur_count,
DRM_MEM_MM);
cur_count = shared_count + 10;
mm_list =
drm_alloc(sizeof(*mm_list) * cur_count, DRM_MEM_MM);
if (!mm_list)
return -ENOMEM;
}
mm_list_p = mm_list;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
*mm_list_p++ = entry->mm;
}
mutex_unlock(&ttm->dev->struct_mutex);
mm_list_p = mm_list;
for (i = 0; i < shared_count; ++i, ++mm_list_p) {
down_write(&((*mm_list_p)->mmap_sem));
}
mutex_lock(&ttm->dev->struct_mutex);
if (list_seq != ttm->mm_list_seq) {
mm_list_p = mm_list;
for (i = 0; i < shared_count; ++i, ++mm_list_p) {
up_write(&((*mm_list_p)->mmap_sem));
}
}
shared_count = atomic_read(&ttm->shared_count);
} while (list_seq != ttm->mm_list_seq);
if (mm_list)
drm_free(mm_list, sizeof(*mm_list) * cur_count, DRM_MEM_MM);
ttm->mmap_sem_locked = TRUE;
return 0;
}
/*
* Change caching policy for range of pages in a ttm.
*/
static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
unsigned long num_pages, int noncached,
int do_tlbflush)
{
int i, cur;
struct page **cur_page;
pgprot_t attr = (noncached) ? PAGE_KERNEL_NOCACHE : PAGE_KERNEL;
drm_ttm_lock_mm(ttm, FALSE, TRUE);
unmap_vma_pages(ttm, page_offset, num_pages);
for (i = 0; i < num_pages; ++i) {
cur = page_offset + i;
cur_page = ttm->pages + cur;
if (*cur_page) {
if (PageHighMem(*cur_page)) {
if (noncached
&& page_address(*cur_page) != NULL) {
DRM_ERROR
("Illegal mapped HighMem Page\n");
drm_ttm_unlock_mm(ttm, FALSE, TRUE);
return -EINVAL;
}
} else if ((ttm->page_flags[cur] &
DRM_TTM_PAGE_UNCACHED) != noncached) {
DRM_MASK_VAL(ttm->page_flags[cur],
DRM_TTM_PAGE_UNCACHED, noncached);
change_page_attr(*cur_page, 1, attr);
}
}
}
if (do_tlbflush)
global_flush_tlb();
drm_ttm_unlock_mm(ttm, FALSE, TRUE);
return 0;
}
/*
* Unbind a ttm region from the aperture.
*/
int drm_evict_ttm_region(drm_ttm_backend_list_t * entry)
{
drm_ttm_backend_t *be = entry->be;
drm_ttm_t *ttm = entry->owner;
int ret;
if (be) {
switch (entry->state) {
case ttm_bound:
if (ttm && be->needs_cache_adjust(be)) {
ret = drm_ttm_lock_mmap_sem(ttm);
if (ret)
return ret;
drm_ttm_lock_mm(ttm, FALSE, TRUE);
unmap_vma_pages(ttm, entry->page_offset,
entry->num_pages);
global_flush_tlb();
drm_ttm_unlock_mm(ttm, FALSE, TRUE);
}
be->unbind(entry->be);
if (ttm && be->needs_cache_adjust(be)) {
drm_set_caching(ttm, entry->page_offset,
entry->num_pages, 0, 1);
drm_ttm_unlock_mm(ttm, TRUE, FALSE);
}
break;
default:
break;
}
}
entry->state = ttm_evicted;
return 0;
}
void drm_unbind_ttm_region(drm_ttm_backend_list_t * entry)
{
drm_evict_ttm_region(entry);
entry->state = ttm_unbound;
}
/*
* Destroy and clean up all resources associated with a ttm region.
* FIXME: release pages to OS when doing this operation.
*/
void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry)
{
drm_ttm_backend_t *be = entry->be;
drm_ttm_t *ttm = entry->owner;
uint32_t *cur_page_flags;
int i;
list_del_init(&entry->head);
drm_unbind_ttm_region(entry);
if (be) {
be->clear(entry->be);
if (be->needs_cache_adjust(be)) {
int ret = drm_ttm_lock_mmap_sem(ttm);
drm_set_caching(ttm, entry->page_offset,
entry->num_pages, 0, 1);
if (!ret)
drm_ttm_unlock_mm(ttm, TRUE, FALSE);
}
be->destroy(be);
}
cur_page_flags = ttm->page_flags + entry->page_offset;
for (i = 0; i < entry->num_pages; ++i) {
DRM_MASK_VAL(*cur_page_flags, DRM_TTM_PAGE_USED, 0);
cur_page_flags++;
}
drm_free(entry, sizeof(*entry), DRM_MEM_MAPS);
}
/*
* Create a ttm region from a range of ttm pages.
*/
int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
unsigned long n_pages, int cached,
drm_ttm_backend_list_t ** region)
{
struct page **cur_page;
uint32_t *cur_page_flags;
drm_ttm_backend_list_t *entry;
drm_ttm_backend_t *be;
int ret, i;
if ((page_offset + n_pages) > ttm->num_pages || n_pages == 0) {
DRM_ERROR("Region Doesn't fit ttm\n");
return -EINVAL;
}
cur_page_flags = ttm->page_flags + page_offset;
for (i = 0; i < n_pages; ++i, ++cur_page_flags) {
if (*cur_page_flags & DRM_TTM_PAGE_USED) {
DRM_ERROR("TTM region overlap\n");
return -EINVAL;
} else {
DRM_MASK_VAL(*cur_page_flags, DRM_TTM_PAGE_USED,
DRM_TTM_PAGE_USED);
}
}
entry = drm_calloc(1, sizeof(*entry), DRM_MEM_MAPS);
if (!entry)
return -ENOMEM;
be = ttm->dev->driver->bo_driver->create_ttm_backend_entry(ttm->dev, cached);
if (!be) {
drm_free(entry, sizeof(*entry), DRM_MEM_MAPS);
DRM_ERROR("Couldn't create backend.\n");
return -EINVAL;
}
entry->state = ttm_unbound;
entry->page_offset = page_offset;
entry->num_pages = n_pages;
entry->be = be;
entry->owner = ttm;
INIT_LIST_HEAD(&entry->head);
list_add_tail(&entry->head, &ttm->be_list->head);
for (i = 0; i < entry->num_pages; ++i) {
cur_page = ttm->pages + (page_offset + i);
if (!*cur_page) {
*cur_page = alloc_page(GFP_KERNEL);
if (!*cur_page) {
DRM_ERROR("Page allocation failed\n");
drm_destroy_ttm_region(entry);
return -ENOMEM;
}
SetPageReserved(*cur_page);
}
}
if ((ret = be->populate(be, n_pages, ttm->pages + page_offset))) {
drm_destroy_ttm_region(entry);
DRM_ERROR("Couldn't populate backend.\n");
return ret;
}
ttm->aperture_base = be->aperture_base;
*region = entry;
return 0;
}
/*
* Bind a ttm region. Set correct caching policy.
*/
int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
unsigned long aper_offset)
{
int i;
uint32_t *cur_page_flag;
int ret = 0;
drm_ttm_backend_t *be;
drm_ttm_t *ttm;
if (!region || region->state == ttm_bound)
return -EINVAL;
be = region->be;
ttm = region->owner;
if (ttm && be->needs_cache_adjust(be)) {
ret = drm_ttm_lock_mmap_sem(ttm);
if (ret)
return ret;
drm_set_caching(ttm, region->page_offset, region->num_pages,
DRM_TTM_PAGE_UNCACHED, TRUE);
} else {
DRM_DEBUG("Binding cached\n");
}
if ((ret = be->bind(be, aper_offset))) {
if (ttm && be->needs_cache_adjust(be))
drm_ttm_unlock_mm(ttm, TRUE, FALSE);
drm_unbind_ttm_region(region);
DRM_ERROR("Couldn't bind backend.\n");
return ret;
}
cur_page_flag = ttm->page_flags + region->page_offset;
for (i = 0; i < region->num_pages; ++i) {
DRM_MASK_VAL(*cur_page_flag, DRM_TTM_MASK_PFN,
(i + aper_offset) << PAGE_SHIFT);
cur_page_flag++;
}
if (ttm && be->needs_cache_adjust(be)) {
ioremap_vmas(ttm, region->page_offset, region->num_pages,
aper_offset);
drm_ttm_unlock_mm(ttm, TRUE, FALSE);
}
region->state = ttm_bound;
return 0;
}
int drm_rebind_ttm_region(drm_ttm_backend_list_t * entry,
unsigned long aper_offset)
{
return drm_bind_ttm_region(entry, aper_offset);
}
/*
* Destroy an anonymous ttm region.
*/
void drm_user_destroy_region(drm_ttm_backend_list_t * entry)
{
drm_ttm_backend_t *be;
struct page **cur_page;
int i;
if (!entry || entry->owner)
return;
be = entry->be;
if (!be) {
drm_free(entry, sizeof(*entry), DRM_MEM_MAPS);
return;
}
be->unbind(be);
if (entry->anon_pages) {
cur_page = entry->anon_pages;
for (i = 0; i < entry->anon_locked; ++i) {
if (!PageReserved(*cur_page))
SetPageDirty(*cur_page);
page_cache_release(*cur_page);
cur_page++;
}
vfree(entry->anon_pages);
}
be->destroy(be);
drm_free(entry, sizeof(*entry), DRM_MEM_MAPS);
return;
}
/*
* Create a ttm region from an arbitrary region of user pages.
* Since this region has no backing ttm, it's owner is set to
* null, and it is registered with the file of the caller.
* Gets destroyed when the file is closed. We call this an
* anonymous ttm region.
*/
int drm_user_create_region(drm_device_t * dev, unsigned long start, int len,
drm_ttm_backend_list_t ** entry)
{
drm_ttm_backend_list_t *tmp;
drm_ttm_backend_t *be;
int ret;
if (len <= 0)
return -EINVAL;
if (!dev->driver->bo_driver->create_ttm_backend_entry)
return -EFAULT;
tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_MAPS);
if (!tmp)
return -ENOMEM;
be = dev->driver->bo_driver->create_ttm_backend_entry(dev, 1);
tmp->be = be;
if (!be) {
drm_user_destroy_region(tmp);
return -ENOMEM;
}
if (be->needs_cache_adjust(be)) {
drm_user_destroy_region(tmp);
return -EFAULT;
}
tmp->anon_pages = vmalloc(sizeof(*(tmp->anon_pages)) * len);
if (!tmp->anon_pages) {
drm_user_destroy_region(tmp);
return -ENOMEM;
}
down_read(&current->mm->mmap_sem);
ret = get_user_pages(current, current->mm, start, len, 1, 0,
tmp->anon_pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret != len) {
drm_user_destroy_region(tmp);
DRM_ERROR("Could not lock %d pages. Return code was %d\n",
len, ret);
return -EPERM;
}
tmp->anon_locked = len;
ret = be->populate(be, len, tmp->anon_pages);
if (ret) {
drm_user_destroy_region(tmp);
return ret;
}
tmp->state = ttm_unbound;
#ifdef REMOVED
tmp->mm = &dev->driver->bo_driver->ttm_mm;
#endif
*entry = tmp;
return 0;
}
/*
* Create a ttm and add it to the drm book-keeping.
*/
int drm_add_ttm(drm_device_t * dev, unsigned size, drm_map_list_t ** maplist)
{
drm_map_list_t *list;
drm_map_t *map;
drm_ttm_t *ttm;
map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
if (!map)
return -ENOMEM;
ttm = drm_init_ttm(dev, size);
if (!ttm) {
DRM_ERROR("Could not create ttm\n");
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -ENOMEM;
}
map->offset = ttm->lhandle;
map->type = _DRM_TTM;
map->flags = _DRM_REMOVABLE;
map->size = size;
list = drm_calloc(1, sizeof(*list), DRM_MEM_MAPS);
if (!list) {
drm_destroy_ttm(ttm);
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -ENOMEM;
}
map->handle = (void *)list;
#ifdef REMOVED
if (drm_ht_just_insert_please(&dev->maphash, &list->hash,
(unsigned long) map->handle,
32 - PAGE_SHIFT)) {
drm_destroy_ttm(ttm);
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
return -ENOMEM;
}
#endif
list->user_token =
(list->hash.key << PAGE_SHIFT) + DRM_MAP_HASH_OFFSET;
list->map = map;
*maplist = list;
return 0;
}

152
linux-core/drm_ttm.h Normal file
View file

@ -0,0 +1,152 @@
#ifndef _DRM_TTM_H
#define _DRM_TTM_H
#define DRM_HAS_TTM
/*
* The backend GART interface. (In our case AGP). Any similar type of device (PCIE?)
* needs only to implement these functions to be usable with the "TTM" interface.
* The AGP backend implementation lives in drm_agpsupport.c
* basically maps these calls to available functions in agpgart. Each drm device driver gets an
* additional function pointer that creates these types,
* so that the device can choose the correct aperture.
* (Multiple AGP apertures, etc.)
* Most device drivers will let this point to the standard AGP implementation.
*/
typedef struct drm_ttm_backend {
unsigned long aperture_base;
void *private;
int (*needs_cache_adjust) (struct drm_ttm_backend * backend);
int (*populate) (struct drm_ttm_backend * backend,
unsigned long num_pages, struct page ** pages);
void (*clear) (struct drm_ttm_backend * backend);
int (*bind) (struct drm_ttm_backend * backend, unsigned long offset);
int (*unbind) (struct drm_ttm_backend * backend);
void (*destroy) (struct drm_ttm_backend * backend);
} drm_ttm_backend_t;
#define DRM_FLUSH_READ (0x01)
#define DRM_FLUSH_WRITE (0x02)
#define DRM_FLUSH_EXE (0x04)
typedef struct drm_ttm_backend_list {
drm_hash_item_t hash;
uint32_t flags;
atomic_t refcount;
struct list_head head;
drm_ttm_backend_t *be;
unsigned page_offset;
unsigned num_pages;
struct drm_ttm *owner;
drm_file_t *anon_owner;
struct page **anon_pages;
int anon_locked;
enum {
ttm_bound,
ttm_evicted,
ttm_unbound
} state;
} drm_ttm_backend_list_t;
typedef struct drm_ttm_vma_list {
struct list_head head;
pgprot_t orig_protection;
struct vm_area_struct *vma;
drm_map_t *map;
} drm_ttm_vma_list_t;
typedef struct drm_ttm {
struct list_head p_mm_list;
atomic_t shared_count;
uint32_t mm_list_seq;
unsigned long aperture_base;
struct page **pages;
uint32_t *page_flags;
unsigned long lhandle;
unsigned long num_pages;
drm_ttm_vma_list_t *vma_list;
struct drm_device *dev;
drm_ttm_backend_list_t *be_list;
atomic_t vma_count;
atomic_t unfinished_regions;
drm_file_t *owner;
int destroy;
int mmap_sem_locked;
} drm_ttm_t;
/*
* Initialize a ttm. Currently the size is fixed. Currently drmAddMap calls this function
* and creates a DRM map of type _DRM_TTM, and returns a reference to that map to the
* caller.
*/
drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size);
/*
* Bind a part of the ttm starting at page_offset size n_pages into the GTT, at
* aperture offset aper_offset. The region handle will be used to reference this
* bound region in the future. Note that the region may be the whole ttm.
* Regions should not overlap.
* This function sets all affected pages as noncacheable and flushes cashes and TLB.
*/
int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
unsigned long n_pages, int cached,
drm_ttm_backend_list_t ** region);
int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
unsigned long aper_offset);
/*
* Unbind a ttm region. Restores caching policy. Flushes caches and TLB.
*/
void drm_unbind_ttm_region(drm_ttm_backend_list_t * entry);
void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry);
/*
* Evict a ttm region. Keeps Aperture caching policy.
*/
int drm_evict_ttm_region(drm_ttm_backend_list_t * entry);
/*
* Rebind an already evicted region into a possibly new location in the aperture.
*/
int drm_rebind_ttm_region(drm_ttm_backend_list_t * entry,
unsigned long aper_offset);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
* when the last vma exits.
*/
extern int drm_destroy_ttm(drm_ttm_t * ttm);
extern void drm_user_destroy_region(drm_ttm_backend_list_t * entry);
extern int drm_ttm_add_mm_to_list(drm_ttm_t * ttm, struct mm_struct *mm);
extern void drm_ttm_delete_mm(drm_ttm_t * ttm, struct mm_struct *mm);
extern void drm_ttm_fence_before_destroy(drm_ttm_t * ttm);
extern void drm_fence_unfenced_region(drm_ttm_backend_list_t * entry);
extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
extern int drm_mm_fence_ioctl(DRM_IOCTL_ARGS);
#define DRM_MASK_VAL(dest, mask, val) \
(dest) = ((dest) & ~(mask)) | ((val) & (mask));
#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
/*
* Page flags.
*/
#define DRM_TTM_PAGE_UNCACHED 0x01
#define DRM_TTM_PAGE_USED 0x02
#define DRM_TTM_PAGE_BOUND 0x04
#define DRM_TTM_PAGE_PRESENT 0x08
#endif

View file

@ -34,12 +34,42 @@
*/
#include "drmP.h"
#if defined(__ia64__)
#include <linux/efi.h>
#endif
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
static void drm_vm_ttm_close(struct vm_area_struct *vma);
static int drm_vm_ttm_open(struct vm_area_struct *vma);
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma);
pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
{
pgprot_t tmp = drm_prot_map(vma->vm_flags);
#if defined(__i386__) || defined(__x86_64__)
if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
pgprot_val(tmp) |= _PAGE_PCD;
pgprot_val(tmp) &= ~_PAGE_PWT;
}
#elif defined(__powerpc__)
pgprot_val(tmp) |= _PAGE_NO_CACHE;
if (map->type == _DRM_REGISTERS)
pgprot_val(tmp) |= _PAGE_GUARDED;
#endif
#if defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start))
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
/**
* \c nopage method for AGP virtual memory.
@ -129,6 +159,131 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
}
#endif /* __OS_HAS_AGP */
static int drm_ttm_remap_bound_pfn(struct vm_area_struct *vma,
unsigned long address,
unsigned long size)
{
unsigned long
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
unsigned long
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
drm_ttm_vma_list_t *entry = (drm_ttm_vma_list_t *)
vma->vm_private_data;
drm_map_t *map = entry->map;
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
unsigned long i, cur_pfn;
unsigned long start = 0;
unsigned long end = 0;
unsigned long last_pfn = 0;
unsigned long start_pfn = 0;
int bound_sequence = FALSE;
int ret = 0;
uint32_t cur_flags;
for (i=page_offset; i<page_offset + num_pages; ++i) {
cur_flags = ttm->page_flags[i];
if (!bound_sequence && (cur_flags & DRM_TTM_PAGE_UNCACHED)) {
start = i;
end = i;
last_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT;
start_pfn = last_pfn;
bound_sequence = TRUE;
} else if (bound_sequence) {
cur_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT;
if ( !(cur_flags & DRM_TTM_PAGE_UNCACHED) ||
(cur_pfn != last_pfn + 1)) {
ret = io_remap_pfn_range(vma,
vma->vm_start + (start << PAGE_SHIFT),
(ttm->aperture_base >> PAGE_SHIFT)
+ start_pfn,
(end - start + 1) << PAGE_SHIFT,
drm_io_prot(_DRM_AGP, vma));
if (ret)
break;
bound_sequence = (cur_flags & DRM_TTM_PAGE_UNCACHED);
if (!bound_sequence)
continue;
start = i;
end = i;
last_pfn = cur_pfn;
start_pfn = last_pfn;
} else {
end++;
last_pfn = cur_pfn;
}
}
}
if (!ret && bound_sequence) {
ret = io_remap_pfn_range(vma,
vma->vm_start + (start << PAGE_SHIFT),
(ttm->aperture_base >> PAGE_SHIFT)
+ start_pfn,
(end - start + 1) << PAGE_SHIFT,
drm_io_prot(_DRM_AGP, vma));
}
if (ret) {
DRM_ERROR("Map returned %c\n", ret);
}
return ret;
}
static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma,
unsigned long address)
{
drm_ttm_vma_list_t *entry = (drm_ttm_vma_list_t *)
vma->vm_private_data;
drm_map_t *map;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
pgprot_t default_prot;
uint32_t page_flags;
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!entry)
return NOPAGE_OOM; /* Nothing allocated */
map = (drm_map_t *) entry->map;
ttm = (drm_ttm_t *) map->offset;
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
page_flags = ttm->page_flags[page_offset];
if (!page) {
page = ttm->pages[page_offset] =
alloc_page(GFP_KERNEL);
SetPageReserved(page);
}
if (!page)
return NOPAGE_OOM;
get_page(page);
default_prot = drm_prot_map(vma->vm_flags);
BUG_ON(page_flags & DRM_TTM_PAGE_UNCACHED);
vma->vm_page_prot = default_prot;
return page;
}
/**
* \c nopage method for shared virtual memory.
*
@ -243,6 +398,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
case _DRM_TTM:
BUG_ON(1);
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
@ -358,6 +516,15 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
return drm_do_vm_sg_nopage(vma, address);
}
static struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
if (type)
*type = VM_FAULT_MINOR;
return drm_do_vm_ttm_nopage(vma, address);
}
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
@ -384,6 +551,13 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
return drm_do_vm_sg_nopage(vma, address);
}
static struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
unsigned long address, int unused)
{
return drm_do_vm_ttm_nopage(vma, address);
}
#endif
/** AGP virtual memory operations */
@ -414,6 +588,12 @@ static struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close,
};
static struct vm_operations_struct drm_vm_ttm_ops = {
.nopage = drm_vm_ttm_nopage,
.open = drm_vm_ttm_open_wrapper,
.close = drm_vm_ttm_close,
};
/**
* \c open method for shared virtual memory.
*
@ -443,6 +623,46 @@ static void drm_vm_open(struct vm_area_struct *vma)
}
}
static int drm_vm_ttm_open(struct vm_area_struct *vma) {
drm_ttm_vma_list_t *entry, *tmp_vma =
(drm_ttm_vma_list_t *) vma->vm_private_data;
drm_map_t *map;
drm_ttm_t *ttm;
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
int ret = 0;
drm_vm_open(vma);
mutex_lock(&dev->struct_mutex);
entry = drm_calloc(1, sizeof(*entry), DRM_MEM_VMAS);
if (entry) {
*entry = *tmp_vma;
map = (drm_map_t *) entry->map;
ttm = (drm_ttm_t *) map->offset;
/* ret = drm_ttm_add_mm_to_list(ttm, vma->vm_mm); */
if (!ret) {
atomic_inc(&ttm->vma_count);
INIT_LIST_HEAD(&entry->head);
entry->vma = vma;
entry->orig_protection = vma->vm_page_prot;
list_add_tail(&entry->head, &ttm->vma_list->head);
vma->vm_private_data = (void *) entry;
DRM_DEBUG("Added VMA to ttm at 0x%016lx\n",
(unsigned long) ttm);
}
} else {
ret = -ENOMEM;
}
mutex_unlock(&dev->struct_mutex);
return ret;
}
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma)
{
drm_vm_ttm_open(vma);
}
/**
* \c close method for all virtual memory types.
*
@ -476,6 +696,47 @@ static void drm_vm_close(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
static void drm_vm_ttm_close(struct vm_area_struct *vma)
{
drm_ttm_vma_list_t *ttm_vma =
(drm_ttm_vma_list_t *) vma->vm_private_data;
drm_map_t *map;
drm_ttm_t *ttm;
int found_maps;
struct list_head *list;
drm_device_t *dev;
drm_vm_close(vma);
if (ttm_vma) {
map = (drm_map_t *) ttm_vma->map;
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
mutex_lock(&dev->struct_mutex);
list_del(&ttm_vma->head);
/* drm_ttm_delete_mm(ttm, vma->vm_mm); */
drm_free(ttm_vma, sizeof(*ttm_vma), DRM_MEM_VMAS);
atomic_dec(&ttm->vma_count);
found_maps = 0;
list = NULL;
#if 0 /* Reimplement with vma_count */
list_for_each(list, &ttm->owner->ttms) {
r_list = list_entry(list, drm_map_list_t, head);
if (r_list->map == map)
found_maps++;
}
if (!found_maps) {
if (drm_destroy_ttm(ttm) != -EBUSY) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
}
#endif
mutex_unlock(&dev->struct_mutex);
}
return;
}
/**
* mmap DMA memory.
*
@ -620,27 +881,9 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
/* fall through to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
#if defined(__i386__) || defined(__x86_64__)
if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
}
#elif defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
if (map->type == _DRM_REGISTERS)
pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
#endif
vma->vm_flags |= VM_IO; /* not in core dump */
#if defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start))
vma->vm_page_prot =
pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot =
pgprot_noncached(vma->vm_page_prot);
#endif
offset = dev->driver->get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
#ifdef __sparc__
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >>PAGE_SHIFT,
@ -687,6 +930,22 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED;
#endif
break;
case _DRM_TTM: {
drm_ttm_vma_list_t tmp_vma;
tmp_vma.orig_protection = vma->vm_page_prot;
tmp_vma.map = map;
vma->vm_ops = &drm_vm_ttm_ops;
vma->vm_private_data = (void *) &tmp_vma;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
if (drm_ttm_remap_bound_pfn(vma,
vma->vm_start,
vma->vm_end - vma->vm_start))
return -EAGAIN;
if (drm_vm_ttm_open(vma))
return -EAGAIN;
return 0;
}
default:
return -EINVAL; /* This should never happen. */
}

View file

@ -260,6 +260,7 @@ typedef enum drm_map_type {
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
_DRM_TTM = 6
} drm_map_type_t;
/**