mirror of
https://gitlab.freedesktop.org/mesa/drm.git
synced 2025-12-27 23:20:15 +01:00
use krh's idr mods to remove lists from idr code
This commit is contained in:
parent
3181573073
commit
280083d4a2
7 changed files with 176 additions and 51 deletions
|
|
@ -571,8 +571,6 @@ typedef struct drm_ctx_list {
|
|||
} drm_ctx_list_t;
|
||||
|
||||
struct drm_ctx_sarea_list {
|
||||
struct list_head head;
|
||||
int ctx_id;
|
||||
drm_map_t *map;
|
||||
};
|
||||
|
||||
|
|
@ -601,8 +599,6 @@ typedef struct ati_pcigart_info {
|
|||
} drm_ati_pcigart_info;
|
||||
|
||||
struct drm_drawable_list {
|
||||
struct list_head head;
|
||||
int id;
|
||||
drm_drawable_info_t info;
|
||||
};
|
||||
|
||||
|
|
@ -753,7 +749,6 @@ typedef struct drm_device {
|
|||
struct mutex ctxlist_mutex; /**< For ctxlist */
|
||||
|
||||
struct idr ctx_idr;
|
||||
struct list_head context_sarealist;
|
||||
|
||||
struct list_head vmalist; /**< List of vmas (for debugging) */
|
||||
drm_lock_data_t lock; /**< Information on hardware lock */
|
||||
|
|
@ -830,7 +825,6 @@ typedef struct drm_device {
|
|||
/*@{ */
|
||||
spinlock_t drw_lock;
|
||||
struct idr drw_idr;
|
||||
struct list_head drwlist;
|
||||
/*@} */
|
||||
} drm_device_t;
|
||||
|
||||
|
|
@ -1008,6 +1002,7 @@ extern int drm_update_drawable_info(struct inode *inode, struct file *filp,
|
|||
unsigned int cmd, unsigned long arg);
|
||||
extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev,
|
||||
drm_drawable_t id);
|
||||
extern void drm_drawable_free_all(drm_device_t *dev);
|
||||
|
||||
/* Authentication IOCTL support (drm_auth.h) */
|
||||
extern int drm_getmagic(struct inode *inode, struct file *filp,
|
||||
|
|
|
|||
|
|
@ -555,3 +555,126 @@ void drm_bo_finish_unmap(drm_buffer_object_t *bo)
|
|||
|
||||
#endif
|
||||
|
||||
#ifdef DRM_IDR_COMPAT_FN
|
||||
/* only called when idp->lock is held */
|
||||
static void __free_layer(struct idr *idp, struct idr_layer *p)
|
||||
{
|
||||
p->ary[0] = idp->id_free;
|
||||
idp->id_free = p;
|
||||
idp->id_free_cnt++;
|
||||
}
|
||||
|
||||
static void free_layer(struct idr *idp, struct idr_layer *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Depends on the return element being zeroed.
|
||||
*/
|
||||
spin_lock_irqsave(&idp->lock, flags);
|
||||
__free_layer(idp, p);
|
||||
spin_unlock_irqrestore(&idp->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_for_each - iterate through all stored pointers
|
||||
* @idp: idr handle
|
||||
* @fn: function to be called for each pointer
|
||||
* @data: data passed back to callback function
|
||||
*
|
||||
* Iterate over the pointers registered with the given idr. The
|
||||
* callback function will be called for each pointer currently
|
||||
* registered, passing the id, the pointer and the data pointer passed
|
||||
* to this function. It is not safe to modify the idr tree while in
|
||||
* the callback, so functions such as idr_get_new and idr_remove are
|
||||
* not allowed.
|
||||
*
|
||||
* We check the return of @fn each time. If it returns anything other
|
||||
* than 0, we break out and return that value.
|
||||
*
|
||||
* The caller must serialize idr_find() vs idr_get_new() and idr_remove().
|
||||
*/
|
||||
int idr_for_each(struct idr *idp,
|
||||
int (*fn)(int id, void *p, void *data), void *data)
|
||||
{
|
||||
int n, id, max, error = 0;
|
||||
struct idr_layer *p;
|
||||
struct idr_layer *pa[MAX_LEVEL];
|
||||
struct idr_layer **paa = &pa[0];
|
||||
|
||||
n = idp->layers * IDR_BITS;
|
||||
p = idp->top;
|
||||
max = 1 << n;
|
||||
|
||||
id = 0;
|
||||
while (id < max) {
|
||||
while (n > 0 && p) {
|
||||
n -= IDR_BITS;
|
||||
*paa++ = p;
|
||||
p = p->ary[(id >> n) & IDR_MASK];
|
||||
}
|
||||
|
||||
if (p) {
|
||||
error = fn(id, (void *)p, data);
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
|
||||
id += 1 << n;
|
||||
while (n < fls(id)) {
|
||||
n += IDR_BITS;
|
||||
p = *--paa;
|
||||
}
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL(idr_for_each);
|
||||
|
||||
/**
|
||||
* idr_remove_all - remove all ids from the given idr tree
|
||||
* @idp: idr handle
|
||||
*
|
||||
* idr_destroy() only frees up unused, cached idp_layers, but this
|
||||
* function will remove all id mappings and leave all idp_layers
|
||||
* unused.
|
||||
*
|
||||
* A typical clean-up sequence for objects stored in an idr tree, will
|
||||
* use idr_for_each() to free all objects, if necessay, then
|
||||
* idr_remove_all() to remove all ids, and idr_destroy() to free
|
||||
* up the cached idr_layers.
|
||||
*/
|
||||
void idr_remove_all(struct idr *idp)
|
||||
{
|
||||
int n, id, max, error = 0;
|
||||
struct idr_layer *p;
|
||||
struct idr_layer *pa[MAX_LEVEL];
|
||||
struct idr_layer **paa = &pa[0];
|
||||
|
||||
n = idp->layers * IDR_BITS;
|
||||
p = idp->top;
|
||||
max = 1 << n;
|
||||
|
||||
id = 0;
|
||||
while (id < max && !error) {
|
||||
while (n > IDR_BITS && p) {
|
||||
n -= IDR_BITS;
|
||||
*paa++ = p;
|
||||
p = p->ary[(id >> n) & IDR_MASK];
|
||||
}
|
||||
|
||||
id += 1 << n;
|
||||
while (n < fls(id)) {
|
||||
if (p) {
|
||||
memset(p, 0, sizeof *p);
|
||||
free_layer(idp, p);
|
||||
}
|
||||
n += IDR_BITS;
|
||||
p = *--paa;
|
||||
}
|
||||
}
|
||||
idp->top = NULL;
|
||||
idp->layers = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(idr_remove_all);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -305,4 +305,13 @@ extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
|
|||
extern int drm_bo_map_bound(struct vm_area_struct *vma);
|
||||
|
||||
#endif
|
||||
|
||||
/* fixme when functions are upstreamed */
|
||||
#define DRM_IDR_COMPAT_FN
|
||||
#ifdef DRM_IDR_COMPAT_FN
|
||||
int idr_for_each(struct idr *idp,
|
||||
int (*fn)(int id, void *p, void *data), void *data);
|
||||
void idr_remove_all(struct idr *idp);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -53,19 +53,18 @@
|
|||
* \param ctx_handle context handle.
|
||||
*
|
||||
* Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
|
||||
* in drm_device::context_sareas, while holding the drm_device::struct_mutex
|
||||
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
|
||||
* lock.
|
||||
*/
|
||||
void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
|
||||
{
|
||||
struct drm_ctx_sarea_list *ctx_sarea;
|
||||
struct drm_ctx_sarea_list *ctx;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ctx_sarea = idr_find(&dev->ctx_idr, ctx_handle);
|
||||
if (ctx_sarea) {
|
||||
ctx = idr_find(&dev->ctx_idr, ctx_handle);
|
||||
if (ctx) {
|
||||
idr_remove(&dev->ctx_idr, ctx_handle);
|
||||
list_del(&ctx_sarea->head);
|
||||
drm_free(ctx_sarea, sizeof(struct drm_ctx_sarea_list), DRM_MEM_BUFS);
|
||||
drm_free(ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
|
||||
} else
|
||||
DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
|
@ -78,8 +77,7 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
|
|||
* \param dev DRM device.
|
||||
* \return (non-negative) context handle on success or a negative number on failure.
|
||||
*
|
||||
* Find the first zero bit in drm_device::ctx_bitmap and (re)allocates
|
||||
* drm_device::context_sareas to accommodate the new entry while holding the
|
||||
* Allocate a new idr from drm_device::ctx_idr while holding the
|
||||
* drm_device::struct_mutex lock.
|
||||
*/
|
||||
static int drm_ctxbitmap_next(drm_device_t * dev)
|
||||
|
|
@ -88,14 +86,14 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
|
|||
int ret;
|
||||
struct drm_ctx_sarea_list *new_ctx;
|
||||
|
||||
new_ctx = drm_calloc(1, sizeof(struct drm_ctx_sarea_list), DRM_MEM_BUFS);
|
||||
new_ctx = drm_calloc(1, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
|
||||
if (!new_ctx)
|
||||
return -1;
|
||||
|
||||
again:
|
||||
if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
|
||||
DRM_ERROR("Out of memory expanding drawable idr\n");
|
||||
drm_free(new_ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_BUFS);
|
||||
drm_free(new_ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
|
||||
return -ENOMEM;
|
||||
}
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
|
@ -106,9 +104,6 @@ again:
|
|||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
new_ctx->ctx_id = new_id;
|
||||
list_add(&new_ctx->head, &dev->context_sarealist);
|
||||
return new_id;
|
||||
}
|
||||
|
||||
|
|
@ -117,14 +112,20 @@ again:
|
|||
*
|
||||
* \param dev DRM device.
|
||||
*
|
||||
* Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
|
||||
* the drm_device::struct_mutex lock.
|
||||
* Initialise the drm_device::ctx_idr
|
||||
*/
|
||||
int drm_ctxbitmap_init(drm_device_t * dev)
|
||||
{
|
||||
idr_init(&dev->ctx_idr);
|
||||
INIT_LIST_HEAD(&dev->context_sarealist);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static int drm_ctx_sarea_free(int id, void *p, void *data)
|
||||
{
|
||||
struct drm_ctx_sarea_list *ctx_entry = p;
|
||||
drm_free(ctx_entry, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -133,18 +134,14 @@ int drm_ctxbitmap_init(drm_device_t * dev)
|
|||
*
|
||||
* \param dev DRM device.
|
||||
*
|
||||
* Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
|
||||
* the drm_device::struct_mutex lock.
|
||||
* Free all idr members using drm_ctx_sarea_free helper function
|
||||
* while holding the drm_device::struct_mutex lock.
|
||||
*/
|
||||
void drm_ctxbitmap_cleanup(drm_device_t * dev)
|
||||
{
|
||||
struct drm_ctx_sarea_list *ctx_entry, *ctx_temp;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_for_each_entry_safe(ctx_entry, ctx_temp, &dev->context_sarealist, head) {
|
||||
idr_remove(&dev->ctx_idr, ctx_entry->ctx_id);
|
||||
drm_free(ctx_entry, sizeof(struct drm_ctx_sarea_list), DRM_MEM_MAPS);
|
||||
}
|
||||
idr_for_each(&dev->ctx_idr, drm_ctx_sarea_free, NULL);
|
||||
idr_remove_all(&dev->ctx_idr);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
|
|
@ -163,7 +160,7 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev)
|
|||
* \param arg user argument pointing to a drm_ctx_priv_map structure.
|
||||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* Gets the map from drm_device::context_sareas with the handle specified and
|
||||
* Gets the map from drm_device::ctx_idr with the handle specified and
|
||||
* returns its handle.
|
||||
*/
|
||||
int drm_getsareactx(struct inode *inode, struct file *filp,
|
||||
|
|
@ -187,8 +184,8 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
|
|||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
map = ctx_sarea->map;
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
request.handle = NULL;
|
||||
|
|
@ -217,7 +214,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
|
|||
* \return zero on success or a negative number on failure.
|
||||
*
|
||||
* Searches the mapping specified in \p arg and update the entry in
|
||||
* drm_device::context_sareas with it.
|
||||
* drm_device::ctx_idr with it.
|
||||
*/
|
||||
int drm_setsareactx(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
|
|
@ -251,10 +248,9 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
|
|||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id);
|
||||
if (!ctx_sarea) {
|
||||
if (!ctx_sarea)
|
||||
goto bad;
|
||||
}
|
||||
|
||||
|
||||
ctx_sarea->map = map;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -67,11 +67,8 @@ again:
|
|||
goto again;
|
||||
}
|
||||
|
||||
list_add(&draw_info->head, &dev->drwlist);
|
||||
|
||||
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
|
||||
|
||||
draw_info->id = new_id;
|
||||
draw.handle = new_id;
|
||||
|
||||
DRM_DEBUG("%d\n", draw.handle);
|
||||
|
|
@ -102,7 +99,6 @@ int drm_rmdraw(DRM_IOCTL_ARGS)
|
|||
|
||||
spin_lock_irqsave(&dev->drw_lock, irqflags);
|
||||
|
||||
list_del(&draw_info->head);
|
||||
idr_remove(&dev->drw_idr, draw.handle);
|
||||
drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
|
||||
|
||||
|
|
@ -199,3 +195,18 @@ drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id)
|
|||
return &draw_info->info;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_drawable_info);
|
||||
|
||||
static int drm_drawable_free(int idr, void *p, void *data)
|
||||
{
|
||||
struct drm_drawable_list *drw_entry = p;
|
||||
drm_free(drw_entry->info.rects, drw_entry->info.num_rects *
|
||||
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
|
||||
drm_free(drw_entry, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void drm_drawable_free_all(drm_device_t *dev)
|
||||
{
|
||||
idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
|
||||
idr_remove_all(&dev->drw_idr);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -142,7 +142,6 @@ int drm_lastclose(drm_device_t * dev)
|
|||
drm_magic_entry_t *pt, *next;
|
||||
drm_map_list_t *r_list, *list_t;
|
||||
drm_vma_entry_t *vma, *vma_temp;
|
||||
struct drm_drawable_list *drw_entry, *drw_temp;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
|
@ -167,16 +166,9 @@ int drm_lastclose(drm_device_t * dev)
|
|||
drm_irq_uninstall(dev);
|
||||
|
||||
/* Free drawable information memory */
|
||||
list_for_each_entry_safe(drw_entry, drw_temp, &dev->drwlist, head) {
|
||||
drm_free(drw_entry->info.rects, drw_entry->info.num_rects *
|
||||
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
|
||||
|
||||
idr_remove(&dev->drw_idr, drw_entry->id);
|
||||
list_del(&drw_entry->head);
|
||||
drm_free(drw_entry, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
drm_drawable_free_all(dev);
|
||||
del_timer(&dev->timer);
|
||||
|
||||
if (dev->unique) {
|
||||
|
|
|
|||
|
|
@ -60,7 +60,6 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
|
|||
{
|
||||
int retcode;
|
||||
|
||||
INIT_LIST_HEAD(&dev->drwlist);
|
||||
INIT_LIST_HEAD(&dev->filelist);
|
||||
INIT_LIST_HEAD(&dev->ctxlist);
|
||||
INIT_LIST_HEAD(&dev->vmalist);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue