Revert "Rename drm_mm.c and its fuctions to drm_memrange."

This reverts commit 3ad8db2071.

We ended up not needing that namespace, and I'd rather not have the churn
for producing diffs.
This commit is contained in:
Eric Anholt 2008-07-30 11:22:48 -07:00
parent 8fc72aef70
commit 33c8e03787
14 changed files with 115 additions and 122 deletions

View file

@ -12,7 +12,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_memrange.o drm_object.o drm_compat.o \
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
drm_regman.o drm_vm_nopage_compat.o drm_gem.o
tdfx-objs := tdfx_drv.o

View file

@ -554,17 +554,17 @@ struct drm_sigdata {
* Generic memory manager structs
*/
struct drm_memrange_node {
struct drm_mm_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
unsigned long start;
unsigned long size;
struct drm_memrange *mm;
struct drm_mm *mm;
void *private;
};
struct drm_memrange {
struct drm_mm {
struct list_head fl_entry;
struct list_head ml_entry;
};
@ -578,7 +578,7 @@ struct drm_map_list {
struct drm_hash_item hash;
struct drm_map *map; /**< mapping */
uint64_t user_token;
struct drm_memrange_node *file_offset_node;
struct drm_mm_node *file_offset_node;
};
typedef struct drm_map drm_local_map_t;
@ -864,7 +864,7 @@ struct drm_device {
struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
struct drm_open_hash map_hash; /**< User token hash table for maps */
struct drm_memrange offset_manager; /**< User token manager */
struct drm_mm offset_manager; /**< User token manager */
struct drm_open_hash object_hash; /**< User token hash table for objects */
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
struct page *ttm_dummy_page;
@ -1336,27 +1336,22 @@ extern int drm_sysfs_device_add(struct drm_minor *minor);
extern void drm_sysfs_device_remove(struct drm_minor *minor);
/*
* Basic memory manager support (drm_memrange.c)
* Basic memory manager support (drm_mm.c)
*/
extern struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
unsigned long size,
unsigned alignment);
extern void drm_memrange_put_block(struct drm_memrange_node *cur);
extern struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange *mm,
unsigned long size,
unsigned alignment, int best_match);
extern int drm_memrange_init(struct drm_memrange *mm,
unsigned long start, unsigned long size);
extern void drm_memrange_takedown(struct drm_memrange *mm);
extern int drm_memrange_clean(struct drm_memrange *mm);
extern unsigned long drm_memrange_tail_space(struct drm_memrange *mm);
extern int drm_memrange_remove_space_from_tail(struct drm_memrange *mm,
unsigned long size);
extern int drm_memrange_add_space_to_tail(struct drm_memrange *mm,
unsigned long size);
extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
unsigned alignment);
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
unsigned alignment, int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block)
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
{
return block->mm;
}

View file

@ -418,14 +418,14 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
if (!bo->fence) {
list_del_init(&bo->lru);
if (bo->mem.mm_node) {
drm_memrange_put_block(bo->mem.mm_node);
drm_mm_put_block(bo->mem.mm_node);
if (bo->pinned_node == bo->mem.mm_node)
bo->pinned_node = NULL;
bo->mem.mm_node = NULL;
}
list_del_init(&bo->pinned_lru);
if (bo->pinned_node) {
drm_memrange_put_block(bo->pinned_node);
drm_mm_put_block(bo->pinned_node);
bo->pinned_node = NULL;
}
list_del_init(&bo->ddestroy);
@ -791,7 +791,7 @@ out:
mutex_lock(&dev->struct_mutex);
if (evict_mem.mm_node) {
if (evict_mem.mm_node != bo->pinned_node)
drm_memrange_put_block(evict_mem.mm_node);
drm_mm_put_block(evict_mem.mm_node);
evict_mem.mm_node = NULL;
}
drm_bo_add_to_lru(bo);
@ -810,7 +810,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
struct drm_bo_mem_reg *mem,
uint32_t mem_type, int no_wait)
{
struct drm_memrange_node *node;
struct drm_mm_node *node;
struct drm_buffer_manager *bm = &dev->bm;
struct drm_buffer_object *entry;
struct drm_mem_type_manager *man = &bm->man[mem_type];
@ -820,7 +820,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
mutex_lock(&dev->struct_mutex);
do {
node = drm_memrange_search_free(&man->manager, num_pages,
node = drm_mm_search_free(&man->manager, num_pages,
mem->page_alignment, 1);
if (node)
break;
@ -846,7 +846,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
return -ENOMEM;
}
node = drm_memrange_get_block(node, num_pages, mem->page_alignment);
node = drm_mm_get_block(node, num_pages, mem->page_alignment);
if (unlikely(!node)) {
mutex_unlock(&dev->struct_mutex);
return -ENOMEM;
@ -924,7 +924,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
int type_found = 0;
int type_ok = 0;
int has_eagain = 0;
struct drm_memrange_node *node = NULL;
struct drm_mm_node *node = NULL;
int ret;
mem->mm_node = NULL;
@ -952,10 +952,10 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
mutex_lock(&dev->struct_mutex);
if (man->has_type && man->use_type) {
type_found = 1;
node = drm_memrange_search_free(&man->manager, mem->num_pages,
node = drm_mm_search_free(&man->manager, mem->num_pages,
mem->page_alignment, 1);
if (node)
node = drm_memrange_get_block(node, mem->num_pages,
node = drm_mm_get_block(node, mem->num_pages,
mem->page_alignment);
}
mutex_unlock(&dev->struct_mutex);
@ -1340,7 +1340,7 @@ out_unlock:
if (ret || !move_unfenced) {
if (mem.mm_node) {
if (mem.mm_node != bo->pinned_node)
drm_memrange_put_block(mem.mm_node);
drm_mm_put_block(mem.mm_node);
mem.mm_node = NULL;
}
drm_bo_add_to_lru(bo);
@ -1432,7 +1432,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
if (bo->pinned_node != bo->mem.mm_node) {
if (bo->pinned_node != NULL)
drm_memrange_put_block(bo->pinned_node);
drm_mm_put_block(bo->pinned_node);
bo->pinned_node = bo->mem.mm_node;
}
@ -1443,7 +1443,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
mutex_lock(&dev->struct_mutex);
if (bo->pinned_node != bo->mem.mm_node)
drm_memrange_put_block(bo->pinned_node);
drm_mm_put_block(bo->pinned_node);
list_del_init(&bo->pinned_lru);
bo->pinned_node = NULL;
@ -2082,7 +2082,7 @@ static int drm_bo_leave_list(struct drm_buffer_object *bo,
if (bo->pinned_node == bo->mem.mm_node)
bo->pinned_node = NULL;
if (bo->pinned_node != NULL) {
drm_memrange_put_block(bo->pinned_node);
drm_mm_put_block(bo->pinned_node);
bo->pinned_node = NULL;
}
mutex_unlock(&dev->struct_mutex);
@ -2223,8 +2223,8 @@ int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
if (drm_memrange_clean(&man->manager)) {
drm_memrange_takedown(&man->manager);
if (drm_mm_clean(&man->manager)) {
drm_mm_takedown(&man->manager);
} else {
ret = -EBUSY;
}
@ -2295,7 +2295,7 @@ int drm_bo_init_mm(struct drm_device *dev, unsigned type,
DRM_ERROR("Zero size memory manager type %d\n", type);
return ret;
}
ret = drm_memrange_init(&man->manager, p_offset, p_size);
ret = drm_mm_init(&man->manager, p_offset, p_size);
if (ret)
return ret;
}
@ -2713,7 +2713,7 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
list->user_token = 0;
}
if (list->file_offset_node) {
drm_memrange_put_block(list->file_offset_node);
drm_mm_put_block(list->file_offset_node);
list->file_offset_node = NULL;
}
@ -2756,7 +2756,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
atomic_inc(&bo->usage);
map->handle = (void *)bo;
list->file_offset_node = drm_memrange_search_free(&dev->offset_manager,
list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
bo->mem.num_pages, 0, 0);
if (unlikely(!list->file_offset_node)) {
@ -2764,7 +2764,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
return -ENOMEM;
}
list->file_offset_node = drm_memrange_get_block(list->file_offset_node,
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
bo->mem.num_pages, 0);
if (unlikely(!list->file_offset_node)) {

View file

@ -41,7 +41,7 @@ static void drm_bo_free_old_node(struct drm_buffer_object *bo)
if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
mutex_lock(&bo->dev->struct_mutex);
drm_memrange_put_block(old_mem->mm_node);
drm_mm_put_block(old_mem->mm_node);
mutex_unlock(&bo->dev->struct_mutex);
}
old_mem->mm_node = NULL;

View file

@ -419,7 +419,7 @@ static void drm_cleanup(struct drm_device * dev)
drm_ctxbitmap_cleanup(dev);
drm_ht_remove(&dev->map_hash);
drm_memrange_takedown(&dev->offset_manager);
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
drm_put_minor(dev);

View file

@ -44,26 +44,26 @@
#include "drmP.h"
#include <linux/slab.h>
unsigned long drm_memrange_tail_space(struct drm_memrange *mm)
unsigned long drm_mm_tail_space(struct drm_mm *mm)
{
struct list_head *tail_node;
struct drm_memrange_node *entry;
struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return 0;
return entry->size;
}
int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, unsigned long size)
int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
{
struct list_head *tail_node;
struct drm_memrange_node *entry;
struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return -ENOMEM;
@ -75,13 +75,13 @@ int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, unsigned long s
}
static int drm_memrange_create_tail_node(struct drm_memrange *mm,
static int drm_mm_create_tail_node(struct drm_mm *mm,
unsigned long start,
unsigned long size)
{
struct drm_memrange_node *child;
struct drm_mm_node *child;
child = (struct drm_memrange_node *)
child = (struct drm_mm_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return -ENOMEM;
@ -98,26 +98,26 @@ static int drm_memrange_create_tail_node(struct drm_memrange *mm,
}
int drm_memrange_add_space_to_tail(struct drm_memrange *mm, unsigned long size)
int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
{
struct list_head *tail_node;
struct drm_memrange_node *entry;
struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free) {
return drm_memrange_create_tail_node(mm, entry->start + entry->size, size);
return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
}
entry->size += size;
return 0;
}
static struct drm_memrange_node *drm_memrange_split_at_start(struct drm_memrange_node *parent,
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
unsigned long size)
{
struct drm_memrange_node *child;
struct drm_mm_node *child;
child = (struct drm_memrange_node *)
child = (struct drm_mm_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return NULL;
@ -137,19 +137,19 @@ static struct drm_memrange_node *drm_memrange_split_at_start(struct drm_memrange
return child;
}
struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
unsigned long size, unsigned alignment)
{
struct drm_memrange_node *align_splitoff = NULL;
struct drm_memrange_node *child;
struct drm_mm_node *align_splitoff = NULL;
struct drm_mm_node *child;
unsigned tmp = 0;
if (alignment)
tmp = parent->start % alignment;
if (tmp) {
align_splitoff = drm_memrange_split_at_start(parent, alignment - tmp);
align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
if (!align_splitoff)
return NULL;
}
@ -159,41 +159,41 @@ struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * pare
parent->free = 0;
return parent;
} else {
child = drm_memrange_split_at_start(parent, size);
child = drm_mm_split_at_start(parent, size);
}
if (align_splitoff)
drm_memrange_put_block(align_splitoff);
drm_mm_put_block(align_splitoff);
return child;
}
EXPORT_SYMBOL(drm_memrange_get_block);
EXPORT_SYMBOL(drm_mm_get_block);
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
*/
void drm_memrange_put_block(struct drm_memrange_node * cur)
void drm_mm_put_block(struct drm_mm_node * cur)
{
struct drm_memrange *mm = cur->mm;
struct drm_mm *mm = cur->mm;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &mm->ml_entry;
struct drm_memrange_node *prev_node = NULL;
struct drm_memrange_node *next_node;
struct drm_mm_node *prev_node = NULL;
struct drm_mm_node *next_node;
int merged = 0;
if (cur_head->prev != root_head) {
prev_node = list_entry(cur_head->prev, struct drm_memrange_node, ml_entry);
prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
}
}
if (cur_head->next != root_head) {
next_node = list_entry(cur_head->next, struct drm_memrange_node, ml_entry);
next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
@ -216,16 +216,16 @@ void drm_memrange_put_block(struct drm_memrange_node * cur)
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
}
}
EXPORT_SYMBOL(drm_memrange_put_block);
EXPORT_SYMBOL(drm_mm_put_block);
struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * mm,
struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
unsigned long size,
unsigned alignment, int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
struct drm_memrange_node *entry;
struct drm_memrange_node *best;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
unsigned wasted;
@ -233,7 +233,7 @@ struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * m
best_size = ~0UL;
list_for_each(list, free_stack) {
entry = list_entry(list, struct drm_memrange_node, fl_entry);
entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
if (entry->size < size)
@ -258,31 +258,31 @@ struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * m
return best;
}
EXPORT_SYMBOL(drm_memrange_search_free);
EXPORT_SYMBOL(drm_mm_search_free);
int drm_memrange_clean(struct drm_memrange * mm)
int drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->ml_entry;
return (head->next->next == head);
}
int drm_memrange_init(struct drm_memrange * mm, unsigned long start, unsigned long size)
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->ml_entry);
INIT_LIST_HEAD(&mm->fl_entry);
return drm_memrange_create_tail_node(mm, start, size);
return drm_mm_create_tail_node(mm, start, size);
}
EXPORT_SYMBOL(drm_memrange_init);
EXPORT_SYMBOL(drm_mm_init);
void drm_memrange_takedown(struct drm_memrange * mm)
void drm_mm_takedown(struct drm_mm * mm)
{
struct list_head *bnode = mm->fl_entry.next;
struct drm_memrange_node *entry;
struct drm_mm_node *entry;
entry = list_entry(bnode, struct drm_memrange_node, fl_entry);
entry = list_entry(bnode, struct drm_mm_node, fl_entry);
if (entry->ml_entry.next != &mm->ml_entry ||
entry->fl_entry.next != &mm->fl_entry) {
@ -295,4 +295,4 @@ void drm_memrange_takedown(struct drm_memrange * mm)
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
}
EXPORT_SYMBOL(drm_memrange_takedown);
EXPORT_SYMBOL(drm_mm_takedown);

View file

@ -418,7 +418,7 @@ extern int drm_ttm_destroy(struct drm_ttm *ttm);
*/
struct drm_bo_mem_reg {
struct drm_memrange_node *mm_node;
struct drm_mm_node *mm_node;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
@ -499,7 +499,7 @@ struct drm_buffer_object {
unsigned long num_pages;
/* For pinned buffers */
struct drm_memrange_node *pinned_node;
struct drm_mm_node *pinned_node;
uint32_t pinned_mem_type;
struct list_head pinned_lru;
@ -534,7 +534,7 @@ struct drm_mem_type_manager {
int has_type;
int use_type;
int kern_init_type;
struct drm_memrange manager;
struct drm_mm manager;
struct list_head lru;
struct list_head pinned;
uint32_t flags;

View file

@ -88,34 +88,34 @@ EXPORT_SYMBOL(drm_sman_init);
static void *drm_sman_mm_allocate(void *private, unsigned long size,
unsigned alignment)
{
struct drm_memrange *mm = (struct drm_memrange *) private;
struct drm_memrange_node *tmp;
struct drm_mm *mm = (struct drm_mm *) private;
struct drm_mm_node *tmp;
tmp = drm_memrange_search_free(mm, size, alignment, 1);
tmp = drm_mm_search_free(mm, size, alignment, 1);
if (!tmp) {
return NULL;
}
tmp = drm_memrange_get_block(tmp, size, alignment);
tmp = drm_mm_get_block(tmp, size, alignment);
return tmp;
}
static void drm_sman_mm_free(void *private, void *ref)
{
struct drm_memrange_node *node = (struct drm_memrange_node *) ref;
struct drm_mm_node *node = (struct drm_mm_node *) ref;
drm_memrange_put_block(node);
drm_mm_put_block(node);
}
static void drm_sman_mm_destroy(void *private)
{
struct drm_memrange *mm = (struct drm_memrange *) private;
drm_memrange_takedown(mm);
struct drm_mm *mm = (struct drm_mm *) private;
drm_mm_takedown(mm);
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
}
static unsigned long drm_sman_mm_offset(void *private, void *ref)
{
struct drm_memrange_node *node = (struct drm_memrange_node *) ref;
struct drm_mm_node *node = (struct drm_mm_node *) ref;
return node->start;
}
@ -124,7 +124,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size)
{
struct drm_sman_mm *sman_mm;
struct drm_memrange *mm;
struct drm_mm *mm;
int ret;
BUG_ON(manager >= sman->num_managers);
@ -135,7 +135,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
return -ENOMEM;
}
sman_mm->private = mm;
ret = drm_memrange_init(mm, start, size);
ret = drm_mm_init(mm, start, size);
if (ret) {
drm_free(mm, sizeof(*mm), DRM_MEM_MM);

View file

@ -45,7 +45,7 @@
/*
* A class that is an abstration of a simple memory allocator.
* The sman implementation provides a default such allocator
* using the drm_memrange.c implementation. But the user can replace it.
* using the drm_mm.c implementation. But the user can replace it.
* See the SiS implementation, which may use the SiS FB kernel module
* for memory management.
*/
@ -116,7 +116,7 @@ extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order);
/*
* Initialize a drm_memrange.c allocator. Should be called only once for each
* Initialize a drm_mm.c allocator. Should be called only once for each
* manager unless a customized allogator is used.
*/

View file

@ -115,15 +115,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
return -ENOMEM;
}
if (drm_memrange_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_ht_remove(&dev->map_hash);
return -ENOMEM;
}
if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
drm_ht_remove(&dev->map_hash);
drm_memrange_takedown(&dev->offset_manager);
drm_mm_takedown(&dev->offset_manager);
return -ENOMEM;
}

View file

@ -67,7 +67,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
args->gtt_end - args->gtt_start);
dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
@ -947,7 +947,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
atomic_dec(&dev->gtt_count);
atomic_sub(obj->size, &dev->gtt_memory);
drm_memrange_put_block(obj_priv->gtt_space);
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
}
@ -1101,7 +1101,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_memrange_node *free_space;
struct drm_mm_node *free_space;
int page_count, ret;
if (alignment == 0)
@ -1112,13 +1112,11 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
}
search_free:
free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
obj->size,
alignment, 0);
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
obj->size, alignment, 0);
if (free_space != NULL) {
obj_priv->gtt_space =
drm_memrange_get_block(free_space, obj->size,
alignment);
obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
alignment);
if (obj_priv->gtt_space != NULL) {
obj_priv->gtt_space->private = obj;
obj_priv->gtt_offset = obj_priv->gtt_space->start;
@ -1152,7 +1150,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
#endif
ret = i915_gem_object_get_page_list(obj);
if (ret) {
drm_memrange_put_block(obj_priv->gtt_space);
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
return ret;
}
@ -1167,7 +1165,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
obj_priv->gtt_offset);
if (obj_priv->agp_mem == NULL) {
i915_gem_object_free_page_list(obj);
drm_memrange_put_block(obj_priv->gtt_space);
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
return -ENOMEM;
}

View file

@ -229,7 +229,7 @@ out_cleanup:
if (tmp_mem.mm_node) {
mutex_lock(&dev->struct_mutex);
if (tmp_mem.mm_node != bo->pinned_node)
drm_memrange_put_block(tmp_mem.mm_node);
drm_mm_put_block(tmp_mem.mm_node);
tmp_mem.mm_node = NULL;
mutex_unlock(&dev->struct_mutex);
}

View file

@ -280,7 +280,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_ttm_backend *be;
struct drm_scatter_gather sgreq;
struct drm_memrange_node mm_node;
struct drm_mm_node mm_node;
struct drm_bo_mem_reg mem;
int ret;

View file

@ -277,7 +277,7 @@ typedef struct drm_i915_private {
u8 saveCR[37];
struct {
struct drm_memrange gtt_space;
struct drm_mm gtt_space;
/**
* List of objects currently involved in rendering from the
@ -378,7 +378,7 @@ struct drm_i915_gem_object {
struct drm_gem_object *obj;
/** Current space allocated to this object in the GTT, if any. */
struct drm_memrange_node *gtt_space;
struct drm_mm_node *gtt_space;
/** This object's place on the active/flushing/inactive lists */
struct list_head list;