drm: remove drm_follow_page, and drm_ioremap and ioremapfree

This comes from the Re: [patch] paravirt: isolate module ops on lkml
It needs some testing, please report any regressions caused.

Signed-off-by: Dave Airlie <airlied@linux.ie>
This commit is contained in:
Christoph Hellwig 2007-01-08 10:55:49 +11:00 committed by Dave Airlie
parent fe5770b89e
commit b147c39263
7 changed files with 32 additions and 240 deletions

View file

@ -1457,26 +1457,8 @@ extern int drm_fence_buffer_objects(drm_file_t * priv,
drm_fence_object_t *fence,
drm_fence_object_t **used_fence);
/* Inline replacements for DRM_IOREMAP macros */
static __inline__ void drm_core_ioremap(struct drm_map *map,
struct drm_device *dev)
{
map->handle = drm_ioremap(map->offset, map->size, dev);
}
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map,
struct drm_device *dev)
{
map->handle = drm_ioremap_nocache(map->offset, map->size, dev);
}
static __inline__ void drm_core_ioremapfree(struct drm_map *map,
struct drm_device *dev)
{
if (map->handle && map->size)
drm_ioremapfree(map->handle, map->size, dev);
}
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
unsigned int token)

View file

@ -179,7 +179,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
}
}
if (map->type == _DRM_REGISTERS)
map->handle = drm_ioremap(map->offset, map->size, dev);
map->handle = ioremap(map->offset, map->size);
break;
case _DRM_SHM:
list = drm_find_matching_map(dev, map);
@ -279,6 +279,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
if (!list) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
}
@ -295,6 +297,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
ret = drm_map_handle(dev, &list->hash, user_token, 0);
if (ret) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
mutex_unlock(&dev->struct_mutex);
@ -402,7 +406,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
switch (map->type) {
case _DRM_REGISTERS:
drm_ioremapfree(map->handle, map->size, dev);
iounmap(map->handle);
/* FALLTHROUGH */
case _DRM_FRAME_BUFFER:
if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {

View file

@ -244,3 +244,26 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
}
#endif /* agp */
#endif /* debug_memory */
void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
{
if (drm_core_has_AGP(dev) &&
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap(map->offset, map->size);
}
EXPORT_SYMBOL_GPL(drm_core_ioremap);
void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
{
if (!map->handle || !map->size)
return;
if (drm_core_has_AGP(dev) &&
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
vunmap(map->handle);
else
iounmap(map->handle);
}
EXPORT_SYMBOL_GPL(drm_core_ioremapfree);

View file

@ -122,19 +122,6 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
return addr;
}
static inline unsigned long drm_follow_page(void *vaddr)
{
pgd_t *pgd = pgd_offset_k((unsigned long) vaddr);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)
pmd_t *pmd = pmd_offset(pgd, (unsigned long)vaddr);
#else
pud_t *pud = pud_offset(pgd, (unsigned long) vaddr);
pmd_t *pmd = pmd_offset(pud, (unsigned long) vaddr);
#endif
pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr);
return pte_pfn(*ptep) << PAGE_SHIFT;
}
#else /* __OS_HAS_AGP */
static inline drm_map_t *drm_lookup_map(unsigned long offset,
@ -149,67 +136,4 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
return NULL;
}
static inline unsigned long drm_follow_page(void *vaddr)
{
return 0;
}
#endif
#ifndef DEBUG_MEMORY
static inline void *drm_ioremap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
drm_map_t *map = drm_lookup_map(offset, size, dev);
if (map && map->type == _DRM_AGP)
return agp_remap(offset, size, dev);
}
return ioremap(offset, size);
}
static inline void *drm_ioremap_nocache(unsigned long offset,
unsigned long size, drm_device_t * dev)
{
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
drm_map_t *map = drm_lookup_map(offset, size, dev);
if (map && map->type == _DRM_AGP)
return agp_remap(offset, size, dev);
}
return ioremap_nocache(offset, size);
}
static inline void drm_ioremapfree(void *pt, unsigned long size,
drm_device_t * dev)
{
/*
* This is a bit ugly. It would be much cleaner if the DRM API would use separate
* routines for handling mappings in the AGP space. Hopefully this can be done in
* a future revision of the interface...
*/
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
&& ((unsigned long)pt >= VMALLOC_START
&& (unsigned long)pt < VMALLOC_END)) {
unsigned long offset;
drm_map_t *map;
offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
map = drm_lookup_map(offset, size, dev);
if (map && map->type == _DRM_AGP) {
vunmap(pt);
return;
}
}
iounmap(pt);
}
#else
extern void *drm_ioremap(unsigned long offset, unsigned long size,
drm_device_t * dev);
extern void *drm_ioremap_nocache(unsigned long offset,
unsigned long size, drm_device_t * dev);
extern void drm_ioremapfree(void *pt, unsigned long size,
drm_device_t * dev);
#endif

View file

@ -289,79 +289,6 @@ void drm_free_pages(unsigned long address, int order, int area)
}
}
void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t * dev)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = drm_ioremap(offset, size, dev))) {
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&drm_mem_lock);
return NULL;
}
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&drm_mem_lock);
return pt;
}
EXPORT_SYMBOL(drm_ioremap);
void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&drm_mem_lock);
return NULL;
}
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&drm_mem_lock);
return pt;
}
EXPORT_SYMBOL(drm_ioremap_nocache);
void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev)
{
int alloc_count;
int free_count;
if (!pt)
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Attempt to free NULL pointer\n");
else
drm_ioremapfree(pt, size, dev);
spin_lock(&drm_mem_lock);
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
EXPORT_SYMBOL(drm_ioremapfree);
#if __OS_HAS_AGP
DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)

View file

@ -275,74 +275,6 @@ void drm_free_pages (unsigned long address, int order, int area) {
}
}
void *drm_ioremap (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = drm_ioremap(offset, size, dev))) {
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&drm_mem_lock);
return NULL;
}
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&drm_mem_lock);
return pt;
}
void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&drm_mem_lock);
return NULL;
}
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&drm_mem_lock);
return pt;
}
void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
int alloc_count;
int free_count;
if (!pt)
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Attempt to free NULL pointer\n");
else
drm_ioremapfree(pt, size, dev);
spin_lock(&drm_mem_lock);
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
#if __OS_HAS_AGP
DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {

View file

@ -349,7 +349,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
drm_ioremapfree(map->handle, map->size, dev);
iounmap(map->handle);
break;
case _DRM_SHM:
vfree(map->handle);