Move mmfs ioctls into the DRM. Untested.

This commit is contained in:
Eric Anholt 2008-04-29 13:30:44 -07:00
parent 3ad8db2071
commit dabd056bf3
6 changed files with 482 additions and 2 deletions

View file

@ -14,7 +14,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_memrange.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
drm_regman.o
drm_regman.o drm_mm.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o

View file

@ -107,7 +107,7 @@ struct drm_file;
#define DRIVER_IRQ_SHARED 0x80
#define DRIVER_DMA_QUEUE 0x100
#define DRIVER_FB_DMA 0x200
#define DRIVER_MM 0x400
/*@}*/
@ -427,6 +427,11 @@ struct drm_file {
struct list_head refd_objects;
/** Mapping of mm object handles to object pointers. */
struct idr object_idr;
/** Lock for synchronization of access to object_idr. */
spinlock_t table_lock;
struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
struct file *filp;
void *driver_priv;
@ -604,6 +609,26 @@ struct drm_ati_pcigart_info {
int table_size;
};
/**
* This structure defines the drm_mm memory object, which will be used by the
* DRM for its buffer objects.
*/
struct drm_mm_object {
/** File representing the shmem storage */
struct file *filp;
spinlock_t lock;
/**
* Size of the object, in bytes. Immutable over the object's
* lifetime.
*/
size_t size;
/** Reference count of this object, protected by object_lock */
int refcount;
};
#include "drm_objects.h"
/**
@ -1259,6 +1284,22 @@ static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block)
return block->mm;
}
/* Memory manager (drm_mm.c) */
void drm_mm_object_reference(struct drm_mm_object *obj);
void drm_mm_object_unreference(struct drm_mm_object *obj);
int drm_mm_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_mm_unreference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_mm_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_mm_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_mm_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_mm_open(struct drm_file *file_private);
void drm_mm_release(struct drm_file *file_private);
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);

View file

@ -150,6 +150,12 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_ALLOC, drm_mm_alloc_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_UNREFERENCE, drm_mm_unreference_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_PREAD, drm_mm_pread_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_PWRITE, drm_mm_pwrite_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_MMAP, drm_mm_mmap_ioctl, 0),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )

View file

@ -274,6 +274,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free;
}
if (dev->driver->driver_features & DRIVER_MM)
drm_mm_open(priv);
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@ -447,6 +450,9 @@ int drm_release(struct inode *inode, struct file *filp)
dev->driver->reclaim_buffers(dev, file_priv);
}
if (dev->driver->driver_features & DRIVER_MM)
drm_mm_release(file_priv);
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);

359
linux-core/drm_mm.c Normal file
View file

@ -0,0 +1,359 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include "drmP.h"
/** @file drm_mm.c
*
* This file provides some of the base ioctls and library routines for
* the graphics memory manager implemented by each device driver.
*
* Because various devices have different requirements in terms of
* synchronization and migration strategies, implementing that is left up to
* the driver, and all that the general API provides should be generic --
* allocating objects, reading/writing data with the cpu, freeing objects.
* Even there, platform-dependent optimizations for reading/writing data with
* the CPU mean we'll likely hook those out to driver-specific calls. However,
* the DRI2 implementation wants to have at least allocate/mmap be generic.
*
* The goal was to have swap-backed object allocation managed through
* struct file. However, file descriptors as handles to a struct file have
* two major failings:
* - Process limits prevent more than 1024 or so being used at a time by
* default.
* - Inability to allocate high fds will aggravate the X Server's select()
* handling, and likely that of many GL client applications as well.
*
* This led to a plan of using our own integer IDs (called handles, following
* DRM terminology) to mimic fds, and implement the fd syscalls we need as
* ioctls. The objects themselves will still include the struct file so
* that we can transition to fds if the required kernel infrastructure shows
* up at a later data, and as our interface with shmfs for memory allocation.
*/
static struct drm_mm_object *
drm_mm_object_alloc(size_t size)
{
struct drm_mm_object *obj;
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
obj->filp = shmem_file_setup("drm mm object", size, 0);
if (IS_ERR(obj->filp)) {
kfree(obj);
return NULL;
}
obj->refcount = 1;
return obj;
}
/**
* Removes the mapping from handle to filp for this object.
*/
static int
drm_mm_handle_delete(struct drm_file *filp, int handle)
{
struct drm_mm_object *obj;
/* This is gross. The idr system doesn't let us try a delete and
* return an error code. It just spews if you fail at deleting.
* So, we have to grab a lock around finding the object and then
* doing the delete on it and dropping the refcount, or the user
* could race us to double-decrement the refcount and cause a
* use-after-free later. Given the frequency of our handle lookups,
* we may want to use ida for number allocation and a hash table
* for the pointers, anyway.
*/
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
return -EINVAL;
}
/* Release reference and decrement refcount. */
idr_remove(&filp->object_idr, handle);
drm_mm_object_unreference(obj);
spin_unlock(&filp->table_lock);
return 0;
}
/** Returns a reference to the object named by the handle. */
static struct drm_mm_object *
drm_mm_object_lookup(struct drm_file *filp, int handle)
{
struct drm_mm_object *obj;
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
return NULL;
}
drm_mm_object_reference(obj);
spin_unlock(&filp->table_lock);
return obj;
}
/**
* Allocates a new mm object and returns a handle to it.
*/
int
drm_mm_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mm_alloc_args *args = data;
struct drm_mm_object *obj;
int handle, ret;
/* Round requested size up to page size */
args->size = (args->size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
/* Allocate the new object */
obj = drm_mm_object_alloc(args->size);
if (obj == NULL)
return -ENOMEM;
/* Get the user-visible handle using idr.
*
* I'm not really sure why the idr api needs us to do this in two
* repeating steps. It handles internal locking of its data
* structure, yet insists that we keep its memory allocation step
* separate from its slot-finding step for locking purposes.
*/
do {
if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) {
kfree(obj);
return -EFAULT;
}
ret = idr_get_new(&file_priv->object_idr, obj, &handle);
} while (ret == -EAGAIN);
if (ret != 0) {
drm_mm_object_unreference(obj);
return -EFAULT;
}
args->handle = handle;
return 0;
}
/**
* Releases the handle to an mm object.
*/
int
drm_mm_unreference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mm_unreference_args *args = data;
int ret;
ret = drm_mm_handle_delete(file_priv, args->handle);
return ret;
}
/**
* Reads data from the object referenced by handle.
*
* On error, the contents of *data are undefined.
*/
int
drm_mm_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mm_pread_args *args = data;
struct drm_mm_object *obj;
ssize_t read;
loff_t offset;
obj = drm_mm_object_lookup(file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
offset = args->offset;
read = obj->filp->f_op->read(obj->filp, (char __user *)args->data,
args->size, &offset);
if (read != args->size) {
drm_mm_object_unreference(obj);
if (read < 0)
return read;
else
return -EINVAL;
}
drm_mm_object_unreference(obj);
return 0;
}
/**
* Maps the contents of an object, returning the address it is mapped
* into.
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
*/
int
drm_mm_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mm_mmap_args *args = data;
struct drm_mm_object *obj;
loff_t offset;
obj = drm_mm_object_lookup(file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
offset = args->offset;
down_write(&current->mm->mmap_sem);
args->addr = (void *)do_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
up_write(&current->mm->mmap_sem);
drm_mm_object_unreference(obj);
return 0;
}
/**
* Writes data to the object referenced by handle.
*
* On error, the contents of the buffer that were to be modified are undefined.
*/
int
drm_mm_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mm_pwrite_args *args = data;
struct drm_mm_object *obj;
ssize_t written;
loff_t offset;
obj = drm_mm_object_lookup(file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
offset = args->offset;
written = obj->filp->f_op->write(obj->filp, (char __user *)args->data,
args->size, &offset);
if (written != args->size) {
drm_mm_object_unreference(obj);
if (written < 0)
return written;
else
return -EINVAL;
}
drm_mm_object_unreference(obj);
return 0;
}
/**
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
void
drm_mm_open(struct drm_file *file_private)
{
idr_init(&file_private->object_idr);
}
/** Called at device close to release the file's references on objects. */
static int
drm_mm_object_release(int id, void *ptr, void *data)
{
struct drm_mm_object *obj = ptr;
drm_mm_object_unreference(obj);
return 0;
}
/**
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
*/
void
drm_mm_release(struct drm_file *file_private)
{
idr_for_each(&file_private->object_idr, &drm_mm_object_release, NULL);
idr_destroy(&file_private->object_idr);
}
void
drm_mm_object_reference(struct drm_mm_object *obj)
{
spin_lock(&obj->lock);
obj->refcount++;
spin_unlock(&obj->lock);
}
void
drm_mm_object_unreference(struct drm_mm_object *obj)
{
spin_lock(&obj->lock);
obj->refcount--;
spin_unlock(&obj->lock);
if (obj->refcount == 0) {
fput(obj->filp);
kfree(obj);
}
}

View file

@ -960,6 +960,69 @@ struct drm_mm_info_arg {
uint64_t p_size;
};
struct drm_mm_alloc_args {
/**
* Requested size for the object.
*
* The (page-aligned) allocated size for the object will be returned.
*/
uint32_t size;
/** Returned handle for the object. */
uint32_t handle;
};
struct drm_mm_unreference_args {
/** Handle of the object to be unreferenced. */
uint32_t handle;
};
struct drm_mm_link_args {
/** Handle for the object being given a name. */
uint32_t handle;
/** Requested file name to export the object under. */
char *name;
/** Requested file mode to export the object under. */
mode_t mode;
};
struct drm_mm_pread_args {
/** Handle for the object being read. */
uint32_t handle;
/** Offset into the object to read from */
off_t offset;
/** Length of data to read */
size_t size;
/** Pointer to write the data into. */
void *data;
};
struct drm_mm_pwrite_args {
/** Handle for the object being written to. */
uint32_t handle;
/** Offset into the object to write to */
off_t offset;
/** Length of data to write */
size_t size;
/** Pointer to read the data from. */
void *data;
};
struct drm_mm_mmap_args {
/** Handle for the object being mapped. */
uint32_t handle;
/** Offset in the object to map. */
off_t offset;
/**
* Length of data to map.
*
* The value will be page-aligned.
*/
size_t size;
/** Returned pointer the data was mapped at */
void *addr;
};
/**
* \name Ioctls Definitions
*/
@ -980,6 +1043,11 @@ struct drm_mm_info_arg {
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_MM_ALLOC DRM_IOWR(0x09, struct drm_mm_alloc_args)
#define DRM_IOCTL_MM_UNREFERENCE DRM_IOW(0x0a, struct drm_mm_unreference_args)
#define DRM_IOCTL_MM_PREAD DRM_IOW(0x0b, struct drm_mm_pread_args)
#define DRM_IOCTL_MM_PWRITE DRM_IOW(0x0c, struct drm_mm_pwrite_args)
#define DRM_IOCTL_MM_MMAP DRM_IOWR(0x0d, struct drm_mm_mmap_args)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)