Bugfixes,

Memory allocation optimizations.
Buffer manager takedown.
This commit is contained in:
Thomas Hellstrom 2006-09-01 15:41:55 +02:00
parent 4edb95d6e0
commit 11f51a9a87
6 changed files with 216 additions and 36 deletions

View file

@ -84,6 +84,7 @@
#include <linux/poll.h>
#include <asm/pgalloc.h>
#include "drm.h"
#include <linux/slab.h>
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
@ -779,6 +780,8 @@ typedef struct drm_buffer_manager{
int initialized;
int has_vram;
int has_tt;
int use_vram;
int use_tt;
drm_mm_t tt_manager;
struct list_head tt_lru;
drm_mm_t vram_manager;
@ -922,6 +925,12 @@ typedef struct drm_device {
drm_fence_manager_t fm;
drm_buffer_manager_t bm;
/*
* Memory caches
*/
kmem_cache_t *mm_cache;
kmem_cache_t *fence_object_cache;
} drm_device_t;
@ -1293,7 +1302,8 @@ extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
unsigned alignment, int best_match);
extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
extern void drm_mm_takedown(drm_mm_t *mm);
extern void drm_mm_set_cache(kmem_cache_t *cache);
extern int drm_mm_clean(drm_mm_t *mm);
/*
* User space object bookkeeping (drm_object.c)
@ -1377,6 +1387,7 @@ extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
extern int drm_bo_clean_mm(drm_device_t *dev);
/* Inline replacements for DRM_IOREMAP macros */

View file

@ -231,7 +231,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
goto out;
}
} else {
fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
fence = kmem_cache_alloc(dev->fence_object_cache, GFP_KERNEL);
if (!fence) {
ret = -ENOMEM;
@ -241,7 +241,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
ret = drm_fence_object_init(dev, fence_flags, 1, fence);
if (ret) {
drm_free(fence, sizeof(*fence), DRM_MEM_FENCE);
kmem_cache_free(dev->fence_object_cache, fence);
goto out;
}
}
@ -468,18 +468,24 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
return 0;
}
static int drm_bo_new_flags(drm_bo_driver_t * driver,
static int drm_bo_new_flags(drm_device_t *dev,
uint32_t flags, uint32_t new_mask, uint32_t hint,
int init, uint32_t * n_flags, uint32_t * n_mask)
{
uint32_t new_flags = 0;
uint32_t new_props;
drm_bo_driver_t *driver = dev->driver->bo_driver;
drm_buffer_manager_t *bm = &dev->bm;
/*
* First adjust the mask. Vram is not supported yet.
* First adjust the mask.
*/
new_mask &= ~DRM_BO_FLAG_MEM_VRAM;
if (!bm->use_vram)
new_mask &= ~DRM_BO_FLAG_MEM_VRAM;
if (!bm->use_tt)
new_mask &= ~DRM_BO_FLAG_MEM_TT;
if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
if (((new_mask & DRM_BO_FLAG_MEM_TT) && !driver->cached_tt) &&
@ -986,7 +992,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
if (ret)
goto out;
ret = drm_bo_new_flags(dev->driver->bo_driver, bo->flags,
ret = drm_bo_new_flags(dev, bo->flags,
(flags & mask) | (bo->flags & ~mask), hint,
0, &new_flags, &bo->mask);
@ -1112,7 +1118,7 @@ int drm_buffer_object_create(drm_file_t * priv,
bo->buffer_start = buffer_start;
bo->priv_flags = 0;
bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
ret = drm_bo_new_flags(dev->driver->bo_driver, bo->flags, mask, hint,
ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
1, &new_flags, &bo->mask);
DRM_ERROR("New flags: 0x%08x\n", new_flags);
if (ret)
@ -1194,7 +1200,12 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
unsigned long next;
drm_user_object_t *uo;
drm_buffer_object_t *entry;
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized.\n");
return -EINVAL;
}
do {
DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
@ -1307,9 +1318,87 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
return 0;
}
int drm_bo_clean_mm(drm_file_t *priv)
/*
* dev->struct_sem locked.
*/
static void drm_bo_force_clean(drm_device_t * dev)
{
return 0;
drm_buffer_manager_t *bm = &dev->bm;
drm_buffer_object_t *entry, *next;
int nice_mode = 1;
int ret = 0;
list_for_each_entry_safe(entry, next, &bm->ddestroy, ddestroy) {
if (entry->fence) {
if (nice_mode) {
unsigned long _end = jiffies + 3*DRM_HZ;
do {
ret = drm_bo_wait(entry, 0, 0);
} while ((ret == -EINTR) &&
!time_after_eq(jiffies, _end));
} else {
drm_fence_usage_deref_locked(dev, entry->fence);
entry->fence = NULL;
}
if (entry->fence) {
DRM_ERROR("Detected GPU hang. "
"Removing waiting buffers.\n");
nice_mode = 0;
drm_fence_usage_deref_locked(dev, entry->fence);
entry->fence = NULL;
}
}
DRM_DEBUG("Destroying delayed buffer object\n");
list_del(&entry->ddestroy);
drm_bo_destroy_locked(dev, entry);
}
}
int drm_bo_clean_mm(drm_device_t *dev)
{
drm_buffer_manager_t *bm = &dev->bm;
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (!bm->initialized)
goto out;
drm_bo_force_clean(dev);
bm->use_vram = 0;
bm->use_tt = 0;
if (bm->has_vram) {
if (drm_mm_clean(&bm->vram_manager)) {
drm_mm_takedown(&bm->vram_manager);
bm->has_vram = 0;
} else
ret = -EBUSY;
}
if (bm->has_tt) {
if (drm_mm_clean(&bm->tt_manager)) {
drm_mm_takedown(&bm->tt_manager);
bm->has_tt = 0;
} else
ret = -EBUSY;
if (!ret)
bm->initialized = 0;
}
out:
mutex_unlock(&dev->struct_mutex);
return ret;
}
@ -1331,33 +1420,38 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
switch (arg.req.op) {
case mm_init:
#if 0
if (bm->initialized) {
DRM_ERROR("Memory manager already initialized\n");
return -EINVAL;
}
#endif
mutex_lock(&dev->struct_mutex);
bm->has_vram = 0;
bm->has_tt = 0;
if (arg.req.vr_p_size) {
ret = drm_mm_init(&bm->vram_manager,
arg.req.vr_p_offset,
arg.req.vr_p_size);
bm->has_vram = 1;
/*
* VRAM not supported yet.
*/
bm->use_vram = 0;
if (ret)
break;
}
if (arg.req.tt_p_size) {
DRM_ERROR("Initializing TT 0x%08x 0x%08x\n",
DRM_ERROR("Initializing TT 0x%08x 0x%08x\n",
arg.req.tt_p_offset,
arg.req.tt_p_size);
ret = drm_mm_init(&bm->tt_manager,
arg.req.tt_p_offset,
arg.req.tt_p_size);
bm->has_tt = 1;
bm->use_tt = 1;
if (ret) {
if (bm->has_vram)
drm_mm_takedown(&bm->vram_manager);
@ -1379,17 +1473,10 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
bm->initialized = 1;
break;
case mm_takedown:
if (!bm->initialized) {
DRM_ERROR("Memory manager was not initialized\n");
return -EINVAL;
if (drm_bo_clean_mm(dev)) {
DRM_ERROR("Memory manager not clean. "
"Delaying takedown\n");
}
mutex_lock(&dev->struct_mutex);
drm_bo_clean_mm(priv);
if (bm->has_vram)
drm_mm_takedown(&bm->vram_manager);
if (bm->has_tt)
drm_mm_takedown(&bm->tt_manager);
bm->initialized = 0;
break;
default:
DRM_ERROR("Function not implemented yet\n");

View file

@ -128,6 +128,29 @@ static drm_ioctl_desc_t drm_ioctls[] = {
#define DRIVER_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
static void drm_free_mem_cache(kmem_cache_t *cache,
const char *name)
{
if (!cache)
return;
if (kmem_cache_destroy(cache)) {
DRM_ERROR("Warning! DRM is leaking %s memory.\n",
name);
}
}
static void drm_free_memory_caches(drm_device_t *dev)
{
drm_free_mem_cache(dev->fence_object_cache, "fence object");
dev->fence_object_cache = NULL;
drm_mm_set_cache(NULL);
drm_free_mem_cache(dev->mm_cache, "memory manager block");
dev->mm_cache = NULL;
}
/**
* Take down the DRM device.
*
@ -249,6 +272,10 @@ int drm_lastclose(drm_device_t * dev)
}
mutex_unlock(&dev->struct_mutex);
if (drm_bo_clean_mm(dev)) {
DRM_ERROR("DRM memory manager still busy. "
"System is unstable. Please reboot.\n");
}
DRM_DEBUG("lastclose completed\n");
return 0;
}
@ -351,7 +378,7 @@ static void drm_cleanup(drm_device_t * dev)
}
drm_lastclose(dev);
drm_free_memory_caches(dev);
drm_fence_manager_takedown(dev);
if (dev->maplist) {

View file

@ -172,7 +172,7 @@ void drm_fence_usage_deref_locked(drm_device_t * dev,
{
if (atomic_dec_and_test(&fence->usage)) {
drm_fence_unring(dev, &fence->ring);
drm_free(fence, sizeof(*fence), DRM_MEM_FENCE);
kmem_cache_free(dev->fence_object_cache, fence);
}
}
@ -183,7 +183,7 @@ void drm_fence_usage_deref_unlocked(drm_device_t * dev,
mutex_lock(&dev->struct_mutex);
if (atomic_read(&fence->usage) == 0) {
drm_fence_unring(dev, &fence->ring);
drm_free(fence, sizeof(*fence), DRM_MEM_FENCE);
kmem_cache_free(dev->fence_object_cache, fence);
}
mutex_unlock(&dev->struct_mutex);
}
@ -426,7 +426,7 @@ static int drm_fence_object_create(drm_file_t * priv, uint32_t type,
int ret;
drm_fence_object_t *fence;
fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
fence = kmem_cache_alloc(dev->fence_object_cache, GFP_KERNEL);
if (!fence)
return -ENOMEM;
ret = drm_fence_object_init(dev, type, emit, fence);

View file

@ -42,6 +42,9 @@
*/
#include "drmP.h"
#include <linux/slab.h>
static kmem_cache_t *mm_cache = NULL;
drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
unsigned long size, unsigned alignment)
@ -57,7 +60,9 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
parent->free = 0;
return parent;
} else {
child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
child = (drm_mm_node_t *) kmem_cache_alloc(mm_cache,
GFP_KERNEL);
if (!child)
return NULL;
@ -105,8 +110,8 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
drm_free(next_node, sizeof(*next_node),
DRM_MEM_MM);
kmem_cache_free(mm_cache, next_node);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
@ -119,7 +124,7 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
list_add(&cur->fl_entry, &list_root->fl_entry);
} else {
list_del(&cur->ml_entry);
drm_free(cur, sizeof(*cur), DRM_MEM_MM);
kmem_cache_free(mm_cache, cur);
}
}
@ -154,13 +159,34 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
return best;
}
void drm_mm_set_cache(kmem_cache_t *cache)
{
mm_cache = cache;
}
int drm_mm_clean(drm_mm_t *mm)
{
struct list_head *head = &mm->root_node.ml_entry;
return (head->next->next == head);
}
int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
{
drm_mm_node_t *child;
if (!mm_cache) {
DRM_ERROR("Memory manager memory cache "
"is not initialized.\n");
return -EINVAL;
}
INIT_LIST_HEAD(&mm->root_node.ml_entry);
INIT_LIST_HEAD(&mm->root_node.fl_entry);
child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
child = (drm_mm_node_t *) kmem_cache_alloc(mm_cache, GFP_KERNEL);
if (!child)
return -ENOMEM;
@ -194,8 +220,7 @@ void drm_mm_takedown(drm_mm_t * mm)
list_del(&entry->fl_entry);
list_del(&entry->ml_entry);
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
kmem_cache_free(mm_cache, entry);
}
EXPORT_SYMBOL(drm_mm_takedown);

View file

@ -54,6 +54,30 @@ drm_head_t **drm_heads;
struct drm_sysfs_class *drm_class;
struct proc_dir_entry *drm_proc_root;
static int drm_create_memory_caches(drm_device_t *dev)
{
dev->mm_cache = kmem_cache_create("drm_mm_node_t",
sizeof(drm_mm_node_t),
0,
SLAB_HWCACHE_ALIGN,
NULL,NULL);
if (!dev->mm_cache)
return -ENOMEM;
drm_mm_set_cache(dev->mm_cache);
dev->fence_object_cache = kmem_cache_create("drm_fence_object_t",
sizeof(drm_fence_object_t),
0,
SLAB_HWCACHE_ALIGN,
NULL,NULL);
if (!dev->fence_object_cache)
return -ENOMEM;
return 0;
}
static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)
@ -127,6 +151,12 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
goto error_out_unreg;
}
retcode = drm_create_memory_caches(dev);
if (retcode) {
DRM_ERROR("Failed creating memory caches\n");
goto error_out_unreg;
}
drm_fence_manager_init(dev);
return 0;