TTM: remove API and userspace objects.

This removes all the TTM userspace API and all userspace objects.

It also removes the drm_bo_lock.c code
This commit is contained in:
Dave Airlie 2008-07-31 12:54:48 +10:00
parent fb5542aaa8
commit 9b8d71b5eb
18 changed files with 202 additions and 3696 deletions

View file

@ -12,16 +12,15 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_memrange.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
drm_hashtab.o drm_memrange.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o \
drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \
drm_regman.o drm_vm_nopage_compat.o drm_gem.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o i915_execbuf.o i915_gem.o \
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_gem.o \
intel_display.o intel_crt.o intel_lvds.o intel_bios.o \
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
intel_tv.o i915_compat.o intel_dvo.o dvo_ch7xxx.o \

View file

@ -405,14 +405,6 @@ struct drm_buf_entry {
struct drm_freelist freelist;
};
enum drm_ref_type {
_DRM_REF_USE = 0,
_DRM_REF_TYPE1,
_DRM_NO_REF_TYPES
};
/** File private data */
struct drm_file {
int authenticated;
@ -424,21 +416,11 @@ struct drm_file {
struct drm_minor *minor;
unsigned long lock_count;
/*
* The user object hash table is global and resides in the
* drm_device structure. We protect the lists and hash tables with the
* device struct_mutex. A bit coarse-grained but probably the best
* option.
*/
struct list_head refd_objects;
/** Mapping of mm object handles to object pointers. */
struct idr object_idr;
/** Lock for synchronization of access to object_idr. */
spinlock_t table_lock;
struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
struct file *filp;
void *driver_priv;

View file

@ -565,18 +565,6 @@ void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
}
EXPORT_SYMBOL(drm_bo_usage_deref_locked);
static void drm_bo_base_deref_locked(struct drm_file *file_priv,
struct drm_user_object *uo)
{
struct drm_buffer_object *bo =
drm_user_object_entry(uo, struct drm_buffer_object, base);
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
drm_bo_takedown_vm_locked(bo);
drm_bo_usage_deref_locked(&bo);
}
void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
{
struct drm_buffer_object *tmp_bo = *bo;
@ -1067,34 +1055,6 @@ static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
return 0;
}
/*
* Call dev->struct_mutex locked.
*/
struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
uint32_t handle, int check_owner)
{
struct drm_user_object *uo;
struct drm_buffer_object *bo;
uo = drm_lookup_user_object(file_priv, handle);
if (!uo || (uo->type != drm_buffer_type)) {
DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
return NULL;
}
if (check_owner && file_priv != uo->owner) {
if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
return NULL;
}
bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
atomic_inc(&bo->usage);
return bo;
}
EXPORT_SYMBOL(drm_lookup_buffer_object);
/*
* Call bo->mutex locked.
* Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
@ -1157,149 +1117,6 @@ static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
return ret;
}
/*
* Fill in the ioctl reply argument with buffer info.
* Bo locked.
*/
void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
struct drm_bo_info_rep *rep)
{
if (!rep)
return;
rep->handle = bo->base.hash.key;
rep->flags = bo->mem.flags;
rep->size = bo->num_pages * PAGE_SIZE;
rep->offset = bo->offset;
/*
* drm_bo_type_device buffers have user-visible
* handles which can be used to share across
* processes. Hand that back to the application
*/
if (bo->type == drm_bo_type_device)
rep->arg_handle = bo->map_list.user_token;
else
rep->arg_handle = 0;
rep->proposed_flags = bo->mem.proposed_flags;
rep->buffer_start = bo->buffer_start;
rep->fence_flags = bo->fence_type;
rep->rep_flags = 0;
rep->page_alignment = bo->mem.page_alignment;
if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
DRM_BO_REP_BUSY);
}
}
EXPORT_SYMBOL(drm_bo_fill_rep_arg);
/*
* Wait for buffer idle and register that we've mapped the buffer.
* Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
* so that if the client dies, the mapping is automatically
* unregistered.
*/
static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
uint32_t map_flags, unsigned hint,
struct drm_bo_info_rep *rep)
{
struct drm_buffer_object *bo;
struct drm_device *dev = file_priv->minor->dev;
int ret = 0;
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo)
return -EINVAL;
mutex_lock(&bo->mutex);
do {
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
if (unlikely(ret))
goto out;
if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
drm_bo_evict_cached(bo);
} while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
atomic_inc(&bo->mapped);
mutex_lock(&dev->struct_mutex);
ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
mutex_unlock(&dev->struct_mutex);
if (ret) {
if (atomic_dec_and_test(&bo->mapped))
wake_up_all(&bo->event_queue);
} else
drm_bo_fill_rep_arg(bo, rep);
out:
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(&bo);
return ret;
}
static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_buffer_object *bo;
struct drm_ref_object *ro;
int ret = 0;
mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(file_priv, handle, 1);
if (!bo) {
ret = -EINVAL;
goto out;
}
ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
if (!ro) {
ret = -EINVAL;
goto out;
}
drm_remove_ref_object(file_priv, ro);
drm_bo_usage_deref_locked(&bo);
out:
mutex_unlock(&dev->struct_mutex);
return ret;
}
/*
* Call struct-sem locked.
*/
static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
struct drm_user_object *uo,
enum drm_ref_type action)
{
struct drm_buffer_object *bo =
drm_user_object_entry(uo, struct drm_buffer_object, base);
/*
* We DON'T want to take the bo->lock here, because we want to
* hold it when we wait for unmapped buffer.
*/
BUG_ON(action != _DRM_REF_TYPE1);
if (atomic_dec_and_test(&bo->mapped))
wake_up_all(&bo->event_queue);
}
/*
* bo->mutex locked.
* Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
@ -1594,8 +1411,7 @@ static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
int drm_bo_do_validate(struct drm_buffer_object *bo,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class,
struct drm_bo_info_rep *rep)
uint32_t fence_class)
{
int ret;
int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
@ -1622,132 +1438,12 @@ int drm_bo_do_validate(struct drm_buffer_object *bo,
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
out:
if (rep)
drm_bo_fill_rep_arg(bo, rep);
mutex_unlock(&bo->mutex);
return ret;
}
EXPORT_SYMBOL(drm_bo_do_validate);
/**
* drm_bo_handle_validate
*
* @file_priv: the drm file private, used to get a handle to the user context
*
* @handle: the buffer object handle
*
* @flags: access rights, mapping parameters and cacheability. See
* the DRM_BO_FLAG_* values in drm.h
*
* @mask: Which flag values to change; this allows callers to modify
* things without knowing the current state of other flags.
*
* @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
* values in drm.h.
*
* @fence_class: a driver-specific way of doing fences. Presumably,
* this would be used if the driver had more than one submission and
* fencing mechanism. At this point, there isn't any use of this
* from the user mode code.
*
* @rep: To be stuffed with the reply from validation
*
* @bp_rep: To be stuffed with the buffer object pointer
*
* Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
* of a pointer to a buffer object. Optionally return a pointer to the buffer object.
* This is a convenience wrapper only.
*/
int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
uint64_t flags, uint64_t mask,
uint32_t hint,
uint32_t fence_class,
struct drm_bo_info_rep *rep,
struct drm_buffer_object **bo_rep)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_buffer_object *bo;
int ret;
mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo)
return -EINVAL;
if (bo->base.owner != file_priv)
mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
if (!ret && bo_rep)
*bo_rep = bo;
else
drm_bo_usage_deref_unlocked(&bo);
return ret;
}
EXPORT_SYMBOL(drm_bo_handle_validate);
static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
struct drm_bo_info_rep *rep)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_buffer_object *bo;
mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo)
return -EINVAL;
mutex_lock(&bo->mutex);
/*
* FIXME: Quick busy here?
*/
drm_bo_busy(bo, 1);
drm_bo_fill_rep_arg(bo, rep);
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(&bo);
return 0;
}
static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
uint32_t hint,
struct drm_bo_info_rep *rep)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_buffer_object *bo;
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
int ret;
mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo)
return -EINVAL;
mutex_lock(&bo->mutex);
ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
if (ret)
goto out;
drm_bo_fill_rep_arg(bo, rep);
out:
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(&bo);
return ret;
}
int drm_buffer_object_create(struct drm_device *dev,
unsigned long size,
enum drm_bo_type type,
@ -1822,7 +1518,7 @@ int drm_buffer_object_create(struct drm_device *dev,
mutex_unlock(&bo->mutex);
ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
0, NULL);
0);
if (ret)
goto out_err_unlocked;
@ -1837,230 +1533,6 @@ out_err_unlocked:
}
EXPORT_SYMBOL(drm_buffer_object_create);
int drm_bo_add_user_object(struct drm_file *file_priv,
struct drm_buffer_object *bo, int shareable)
{
struct drm_device *dev = file_priv->minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_add_user_object(file_priv, &bo->base, shareable);
if (ret)
goto out;
bo->base.remove = drm_bo_base_deref_locked;
bo->base.type = drm_buffer_type;
bo->base.ref_struct_locked = NULL;
bo->base.unref = drm_buffer_user_object_unmap;
out:
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_bo_add_user_object);
int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_bo_create_arg *arg = data;
struct drm_bo_create_req *req = &arg->d.req;
struct drm_bo_info_rep *rep = &arg->d.rep;
struct drm_buffer_object *entry;
enum drm_bo_type bo_type;
int ret = 0;
DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
(int)(req->size / 1024), req->page_alignment * 4);
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized.\n");
return -EINVAL;
}
/*
* If the buffer creation request comes in with a starting address,
* that points at the desired user pages to map. Otherwise, create
* a drm_bo_type_device buffer, which uses pages allocated from the kernel
*/
bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
/*
* User buffers cannot be shared
*/
if (bo_type == drm_bo_type_user)
req->flags &= ~DRM_BO_FLAG_SHAREABLE;
ret = drm_buffer_object_create(file_priv->minor->dev,
req->size, bo_type, req->flags,
req->hint, req->page_alignment,
req->buffer_start, &entry);
if (ret)
goto out;
ret = drm_bo_add_user_object(file_priv, entry,
req->flags & DRM_BO_FLAG_SHAREABLE);
if (ret) {
drm_bo_usage_deref_unlocked(&entry);
goto out;
}
mutex_lock(&entry->mutex);
drm_bo_fill_rep_arg(entry, rep);
mutex_unlock(&entry->mutex);
out:
return ret;
}
int drm_bo_setstatus_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_bo_map_wait_idle_arg *arg = data;
struct drm_bo_info_req *req = &arg->d.req;
struct drm_bo_info_rep *rep = &arg->d.rep;
struct drm_buffer_object *bo;
int ret;
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized.\n");
return -EINVAL;
}
ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
if (ret)
return ret;
mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo)
return -EINVAL;
if (bo->base.owner != file_priv)
req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
ret = drm_bo_do_validate(bo, req->flags, req->mask,
req->hint | DRM_BO_HINT_DONT_FENCE,
bo->fence_class, rep);
drm_bo_usage_deref_unlocked(&bo);
(void) drm_bo_read_unlock(&dev->bm.bm_lock);
return ret;
}
int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_bo_map_wait_idle_arg *arg = data;
struct drm_bo_info_req *req = &arg->d.req;
struct drm_bo_info_rep *rep = &arg->d.rep;
int ret;
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized.\n");
return -EINVAL;
}
ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
req->hint, rep);
if (ret)
return ret;
return 0;
}
int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_bo_handle_arg *arg = data;
int ret;
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized.\n");
return -EINVAL;
}
ret = drm_buffer_object_unmap(file_priv, arg->handle);
return ret;
}
int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_bo_reference_info_arg *arg = data;
struct drm_bo_handle_arg *req = &arg->d.req;
struct drm_bo_info_rep *rep = &arg->d.rep;
struct drm_user_object *uo;
int ret;
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized.\n");
return -EINVAL;
}
ret = drm_user_object_ref(file_priv, req->handle,
drm_buffer_type, &uo);
if (ret)
return ret;
ret = drm_bo_handle_info(file_priv, req->handle, rep);
if (ret)
return ret;
return 0;
}
int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_bo_handle_arg *arg = data;
int ret = 0;
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized.\n");
return -EINVAL;
}
ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
return ret;
}
int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_bo_reference_info_arg *arg = data;
struct drm_bo_handle_arg *req = &arg->d.req;
struct drm_bo_info_rep *rep = &arg->d.rep;
int ret;
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized.\n");
return -EINVAL;
}
ret = drm_bo_handle_info(file_priv, req->handle, rep);
if (ret)
return ret;
return 0;
}
int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_bo_map_wait_idle_arg *arg = data;
struct drm_bo_info_req *req = &arg->d.req;
struct drm_bo_info_rep *rep = &arg->d.rep;
int ret;
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized.\n");
return -EINVAL;
}
ret = drm_bo_handle_wait(file_priv, req->handle,
req->hint, rep);
if (ret)
return ret;
return 0;
}
static int drm_bo_leave_list(struct drm_buffer_object *bo,
uint32_t mem_type,
int free_pinned,
@ -2435,191 +1907,6 @@ out_unlock:
}
EXPORT_SYMBOL(drm_bo_driver_init);
int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_mm_init_arg *arg = data;
struct drm_buffer_manager *bm = &dev->bm;
struct drm_bo_driver *driver = dev->driver->bo_driver;
int ret;
if (!driver) {
DRM_ERROR("Buffer objects are not supported by this driver\n");
return -EINVAL;
}
ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
if (ret)
return ret;
ret = -EINVAL;
if (arg->magic != DRM_BO_INIT_MAGIC) {
DRM_ERROR("You are using an old libdrm that is not compatible with\n"
"\tthe kernel DRM module. Please upgrade your libdrm.\n");
return -EINVAL;
}
if (arg->major != DRM_BO_INIT_MAJOR) {
DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
"\tversion don't match. Got %d, expected %d.\n",
arg->major, DRM_BO_INIT_MAJOR);
return -EINVAL;
}
mutex_lock(&dev->struct_mutex);
if (!bm->initialized) {
DRM_ERROR("DRM memory manager was not initialized.\n");
goto out;
}
if (arg->mem_type == 0) {
DRM_ERROR("System memory buffers already initialized.\n");
goto out;
}
ret = drm_bo_init_mm(dev, arg->mem_type,
arg->p_offset, arg->p_size, 0);
out:
mutex_unlock(&dev->struct_mutex);
(void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
if (ret)
return ret;
return 0;
}
int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_mm_type_arg *arg = data;
struct drm_buffer_manager *bm = &dev->bm;
struct drm_bo_driver *driver = dev->driver->bo_driver;
int ret;
if (!driver) {
DRM_ERROR("Buffer objects are not supported by this driver\n");
return -EINVAL;
}
ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
if (ret)
return ret;
mutex_lock(&dev->struct_mutex);
ret = -EINVAL;
if (!bm->initialized) {
DRM_ERROR("DRM memory manager was not initialized\n");
goto out;
}
if (arg->mem_type == 0) {
DRM_ERROR("No takedown for System memory buffers.\n");
goto out;
}
ret = 0;
if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
if (ret == -EINVAL)
DRM_ERROR("Memory manager type %d not clean. "
"Delaying takedown\n", arg->mem_type);
ret = 0;
}
out:
mutex_unlock(&dev->struct_mutex);
(void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
if (ret)
return ret;
return 0;
}
int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_mm_type_arg *arg = data;
struct drm_bo_driver *driver = dev->driver->bo_driver;
int ret;
if (!driver) {
DRM_ERROR("Buffer objects are not supported by this driver\n");
return -EINVAL;
}
if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
return -EINVAL;
}
if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
if (ret)
return ret;
}
mutex_lock(&dev->struct_mutex);
ret = drm_bo_lock_mm(dev, arg->mem_type);
mutex_unlock(&dev->struct_mutex);
if (ret) {
(void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
return ret;
}
return 0;
}
int drm_mm_unlock_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file_priv)
{
struct drm_mm_type_arg *arg = data;
struct drm_bo_driver *driver = dev->driver->bo_driver;
int ret;
if (!driver) {
DRM_ERROR("Buffer objects are not supported by this driver\n");
return -EINVAL;
}
if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
if (ret)
return ret;
}
return 0;
}
int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_mm_info_arg *arg = data;
struct drm_buffer_manager *bm = &dev->bm;
struct drm_bo_driver *driver = dev->driver->bo_driver;
struct drm_mem_type_manager *man;
int ret = 0;
int mem_type = arg->mem_type;
if (!driver) {
DRM_ERROR("Buffer objects are not supported by this driver\n");
return -EINVAL;
}
if (mem_type >= DRM_BO_MEM_TYPES) {
DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
return -EINVAL;
}
mutex_lock(&dev->struct_mutex);
if (!bm->initialized) {
DRM_ERROR("DRM memory manager was not initialized\n");
ret = -EINVAL;
goto out;
}
man = &bm->man[arg->mem_type];
arg->p_size = man->size;
out:
mutex_unlock(&dev->struct_mutex);
return ret;
}
/*
* buffer object vm functions.
*/
@ -2792,15 +2079,3 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
return 0;
}
int drm_bo_version_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
arg->major = DRM_BO_INIT_MAJOR;
arg->minor = DRM_BO_INIT_MINOR;
arg->patchlevel = DRM_BO_INIT_PATCH;
return 0;
}

View file

@ -1,189 +0,0 @@
/**************************************************************************
*
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
/*
* This file implements a simple replacement for the buffer manager use
* of the heavyweight hardware lock.
* The lock is a read-write lock. Taking it in read mode is fast, and
* intended for in-kernel use only.
* Taking it in write mode is slow.
*
* The write mode is used only when there is a need to block all
* user-space processes from allocating a
* new memory area.
* Typical use in write mode is X server VT switching, and it's allowed
* to leave kernel space with the write lock held. If a user-space process
* dies while having the write-lock, it will be released during the file
* descriptor release.
*
* The read lock is typically placed at the start of an IOCTL- or
* user-space callable function that may end up allocating a memory area.
* This includes setstatus, super-ioctls and no_pfn; the latter may move
* unmappable regions to mappable. It's a bug to leave kernel space with the
* read lock held.
*
* Both read- and write lock taking may be interruptible for low signal-delivery
* latency. The locking functions will return -EAGAIN if interrupted by a
* signal.
*
* Locking order: The lock should be taken BEFORE any kernel mutexes
* or spinlocks.
*/
#include "drmP.h"
void drm_bo_init_lock(struct drm_bo_lock *lock)
{
DRM_INIT_WAITQUEUE(&lock->queue);
atomic_set(&lock->write_lock_pending, 0);
atomic_set(&lock->readers, 0);
}
void drm_bo_read_unlock(struct drm_bo_lock *lock)
{
if (atomic_dec_and_test(&lock->readers))
wake_up_all(&lock->queue);
}
EXPORT_SYMBOL(drm_bo_read_unlock);
int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible)
{
while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
int ret;
if (!interruptible) {
wait_event(lock->queue,
atomic_read(&lock->write_lock_pending) == 0);
continue;
}
ret = wait_event_interruptible
(lock->queue, atomic_read(&lock->write_lock_pending) == 0);
if (ret)
return -EAGAIN;
}
while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
int ret;
if (!interruptible) {
wait_event(lock->queue,
atomic_read(&lock->readers) != -1);
continue;
}
ret = wait_event_interruptible
(lock->queue, atomic_read(&lock->readers) != -1);
if (ret)
return -EAGAIN;
}
return 0;
}
EXPORT_SYMBOL(drm_bo_read_lock);
static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
{
if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
return -EINVAL;
wake_up_all(&lock->queue);
return 0;
}
static void drm_bo_write_lock_remove(struct drm_file *file_priv,
struct drm_user_object *item)
{
struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base);
int ret;
ret = __drm_bo_write_unlock(lock);
BUG_ON(ret);
}
int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible,
struct drm_file *file_priv)
{
int ret = 0;
struct drm_device *dev;
atomic_inc(&lock->write_lock_pending);
while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
if (!interruptible) {
wait_event(lock->queue,
atomic_read(&lock->readers) == 0);
continue;
}
ret = wait_event_interruptible
(lock->queue, atomic_read(&lock->readers) == 0);
if (ret) {
atomic_dec(&lock->write_lock_pending);
wake_up_all(&lock->queue);
return -EAGAIN;
}
}
/*
* Add a dummy user-object, the destructor of which will
* make sure the lock is released if the client dies
* while holding it.
*/
if (atomic_dec_and_test(&lock->write_lock_pending))
wake_up_all(&lock->queue);
dev = file_priv->minor->dev;
mutex_lock(&dev->struct_mutex);
ret = drm_add_user_object(file_priv, &lock->base, 0);
lock->base.remove = &drm_bo_write_lock_remove;
lock->base.type = drm_lock_type;
if (ret)
(void)__drm_bo_write_unlock(lock);
mutex_unlock(&dev->struct_mutex);
return ret;
}
int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_ref_object *ro;
mutex_lock(&dev->struct_mutex);
if (lock->base.owner != file_priv) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE);
BUG_ON(!ro);
drm_remove_ref_object(file_priv, ro);
lock->base.owner = NULL;
mutex_unlock(&dev->struct_mutex);
return 0;
}

View file

@ -715,48 +715,4 @@ int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
/**
* drm_get_buffer_object - find the buffer object for a given handle
* @dev: DRM device
* @bo: pointer to caller's buffer_object pointer
* @handle: handle to lookup
*
* LOCKING:
* Must take @dev's struct_mutex to protect buffer object lookup.
*
* Given @handle, lookup the buffer object in @dev and put it in the caller's
* @bo pointer.
*
* RETURNS:
* Zero on success, -EINVAL if the handle couldn't be found.
*/
int drm_get_buffer_object(struct drm_device *dev, struct drm_buffer_object **bo, unsigned long handle)
{
struct drm_user_object *uo;
struct drm_hash_item *hash;
int ret;
*bo = NULL;
mutex_lock(&dev->struct_mutex);
ret = drm_ht_find_item(&dev->object_hash, handle, &hash);
if (ret) {
DRM_ERROR("Couldn't find handle.\n");
ret = -EINVAL;
goto out_err;
}
uo = drm_hash_entry(hash, struct drm_user_object, hash);
if (uo->type != drm_buffer_type) {
ret = -EINVAL;
goto out_err;
}
*bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
ret = 0;
out_err:
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_buffer_object);

View file

@ -146,36 +146,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),

View file

@ -134,8 +134,8 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
if (new_type) {
fence->signaled_types |= new_type;
DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
fence->base.hash.key, fence->signaled_types);
DRM_DEBUG("Fence %p signaled 0x%08x\n",
fence, fence->signaled_types);
if (driver->needed_flush)
fc->pending_flush |= driver->needed_flush(fence);
@ -147,8 +147,8 @@ void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
if (!(fence->type & ~fence->signaled_types)) {
DRM_DEBUG("Fence completely signaled 0x%08lx\n",
fence->base.hash.key);
DRM_DEBUG("Fence completely signaled %p\n",
fence);
list_del_init(&fence->ring);
}
}
@ -196,10 +196,9 @@ void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
*fence = NULL;
if (atomic_dec_and_test(&tmp_fence->usage)) {
drm_fence_unring(dev, &tmp_fence->ring);
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
tmp_fence->base.hash.key);
DRM_DEBUG("Destroyed a fence object %p\n",
tmp_fence);
atomic_dec(&fm->count);
BUG_ON(!list_empty(&tmp_fence->base.list));
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
}
}
@ -217,7 +216,6 @@ void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
if (atomic_read(&tmp_fence->usage) == 0) {
drm_fence_unring(dev, &tmp_fence->ring);
atomic_dec(&fm->count);
BUG_ON(!list_empty(&tmp_fence->base.list));
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
}
mutex_unlock(&dev->struct_mutex);
@ -244,15 +242,6 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst,
}
EXPORT_SYMBOL(drm_fence_reference_unlocked);
static void drm_fence_object_destroy(struct drm_file *priv,
struct drm_user_object *base)
{
struct drm_fence_object *fence =
drm_user_object_entry(base, struct drm_fence_object, base);
drm_fence_usage_deref_locked(&fence);
}
int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
{
unsigned long flags;
@ -477,7 +466,6 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
* Avoid hitting BUG() for kernel-only fence objects.
*/
INIT_LIST_HEAD(&fence->base.list);
fence->fence_class = fence_class;
fence->type = type;
fence->signaled_types = 0;
@ -493,26 +481,6 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
return ret;
}
int drm_fence_add_user_object(struct drm_file *priv,
struct drm_fence_object *fence, int shareable)
{
struct drm_device *dev = priv->minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_add_user_object(priv, &fence->base, shareable);
if (ret)
goto out;
atomic_inc(&fence->usage);
fence->base.type = drm_fence_type;
fence->base.remove = &drm_fence_object_destroy;
DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
out:
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_fence_add_user_object);
int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
uint32_t type, unsigned flags,
struct drm_fence_object **c_fence)
@ -569,261 +537,7 @@ void drm_fence_manager_init(struct drm_device *dev)
write_unlock_irqrestore(&fm->lock, flags);
}
void drm_fence_fill_arg(struct drm_fence_object *fence,
struct drm_fence_arg *arg)
{
struct drm_device *dev = fence->dev;
struct drm_fence_manager *fm = &dev->fm;
unsigned long irq_flags;
read_lock_irqsave(&fm->lock, irq_flags);
arg->handle = fence->base.hash.key;
arg->fence_class = fence->fence_class;
arg->type = fence->type;
arg->signaled = fence->signaled_types;
arg->error = fence->error;
arg->sequence = fence->sequence;
read_unlock_irqrestore(&fm->lock, irq_flags);
}
EXPORT_SYMBOL(drm_fence_fill_arg);
void drm_fence_manager_takedown(struct drm_device *dev)
{
}
struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
uint32_t handle)
{
struct drm_device *dev = priv->minor->dev;
struct drm_user_object *uo;
struct drm_fence_object *fence;
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, handle);
if (!uo || (uo->type != drm_fence_type)) {
mutex_unlock(&dev->struct_mutex);
return NULL;
}
fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
mutex_unlock(&dev->struct_mutex);
return fence;
}
int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
if (arg->flags & DRM_FENCE_FLAG_EMIT)
LOCK_TEST_WITH_RETURN(dev, file_priv);
ret = drm_fence_object_create(dev, arg->fence_class,
arg->type, arg->flags, &fence);
if (ret)
return ret;
ret = drm_fence_add_user_object(file_priv, fence,
arg->flags &
DRM_FENCE_FLAG_SHAREABLE);
if (ret) {
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
/*
* usage > 0. No need to lock dev->struct_mutex;
*/
arg->handle = fence->base.hash.key;
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
struct drm_user_object *uo;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
if (ret)
return ret;
fence = drm_lookup_fence_object(file_priv, arg->handle);
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
}
int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
fence = drm_lookup_fence_object(file_priv, arg->handle);
if (!fence)
return -EINVAL;
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
fence = drm_lookup_fence_object(file_priv, arg->handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_flush(fence, arg->type);
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
fence = drm_lookup_fence_object(file_priv, arg->handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_wait(fence,
arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
0, arg->type);
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
LOCK_TEST_WITH_RETURN(dev, file_priv);
fence = drm_lookup_fence_object(file_priv, arg->handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
arg->type);
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
int ret;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_arg *arg = data;
struct drm_fence_object *fence;
ret = 0;
if (!fm->initialized) {
DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized\n");
return -EINVAL;
}
LOCK_TEST_WITH_RETURN(dev, file_priv);
ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
NULL, &fence);
if (ret)
return ret;
if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
ret = drm_fence_add_user_object(file_priv, fence,
arg->flags &
DRM_FENCE_FLAG_SHAREABLE);
if (ret)
return ret;
}
arg->handle = fence->base.hash.key;
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
return ret;
}

View file

@ -221,7 +221,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
int minor_id = iminor(inode);
struct drm_file *priv;
int ret;
int i, j;
if (filp->f_flags & O_EXCL)
return -EBUSY; /* No exclusive opens */
@ -246,22 +245,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->refd_objects);
INIT_LIST_HEAD(&priv->fbs);
for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
ret = drm_ht_create(&priv->refd_object_hash[i],
DRM_FILE_HASH_ORDER);
if (ret)
break;
}
if (ret) {
for (j = 0; j < i; ++j)
drm_ht_remove(&priv->refd_object_hash[j]);
goto out_free;
}
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
@ -346,33 +331,6 @@ int drm_fasync(int fd, struct file *filp, int on)
}
EXPORT_SYMBOL(drm_fasync);
static void drm_object_release(struct file *filp)
{
struct drm_file *priv = filp->private_data;
struct list_head *head;
struct drm_ref_object *ref_object;
int i;
/*
* Free leftover ref objects created by me. Note that we cannot use
* list_for_each() here, as the struct_mutex may be temporarily
* released by the remove_() functions, and thus the lists may be
* altered.
* Also, a drm_remove_ref_object() will not remove it
* from the list unless its refcount is 1.
*/
head = &priv->refd_objects;
while (head->next != head) {
ref_object = list_entry(head->next, struct drm_ref_object, list);
drm_remove_ref_object(priv, ref_object);
head = &priv->refd_objects;
}
for (i = 0; i < _DRM_NO_REF_TYPES; ++i)
drm_ht_remove(&priv->refd_object_hash[i]);
}
/**
* Release file.
*
@ -512,7 +470,6 @@ int drm_release(struct inode *inode, struct file *filp)
file_priv->is_master = 0;
mutex_lock(&dev->struct_mutex);
drm_object_release(filp);
list_del(&file_priv->lhead);

View file

@ -1,294 +0,0 @@
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
int shareable)
{
struct drm_device *dev = priv->minor->dev;
int ret;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
/* The refcount will be bumped to 1 when we add the ref object below. */
atomic_set(&item->refcount, 0);
item->shareable = shareable;
item->owner = priv;
ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
(unsigned long)item, 31, 0, 0);
if (ret)
return ret;
ret = drm_add_ref_object(priv, item, _DRM_REF_USE);
if (ret)
ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
return ret;
}
EXPORT_SYMBOL(drm_add_user_object);
struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
{
struct drm_device *dev = priv->minor->dev;
struct drm_hash_item *hash;
int ret;
struct drm_user_object *item;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
if (ret)
return NULL;
item = drm_hash_entry(hash, struct drm_user_object, hash);
if (priv != item->owner) {
struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
if (ret) {
DRM_ERROR("Object not registered for usage\n");
return NULL;
}
}
return item;
}
EXPORT_SYMBOL(drm_lookup_user_object);
static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
{
struct drm_device *dev = priv->minor->dev;
int ret;
if (atomic_dec_and_test(&item->refcount)) {
ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
BUG_ON(ret);
item->remove(priv, item);
}
}
static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
enum drm_ref_type action)
{
int ret = 0;
switch (action) {
case _DRM_REF_USE:
atomic_inc(&ro->refcount);
break;
default:
if (!ro->ref_struct_locked) {
break;
} else {
ro->ref_struct_locked(priv, ro, action);
}
}
return ret;
}
int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
enum drm_ref_type ref_action)
{
int ret = 0;
struct drm_ref_object *item;
struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
if (!referenced_object->shareable && priv != referenced_object->owner) {
DRM_ERROR("Not allowed to reference this object\n");
return -EINVAL;
}
/*
* If this is not a usage reference, Check that usage has been registered
* first. Otherwise strange things may happen on destruction.
*/
if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
item =
drm_lookup_ref_object(priv, referenced_object,
_DRM_REF_USE);
if (!item) {
DRM_ERROR
("Object not registered for usage by this client\n");
return -EINVAL;
}
}
if (NULL !=
(item =
drm_lookup_ref_object(priv, referenced_object, ref_action))) {
atomic_inc(&item->refcount);
return drm_object_ref_action(priv, referenced_object,
ref_action);
}
item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
if (item == NULL) {
DRM_ERROR("Could not allocate reference object\n");
return -ENOMEM;
}
atomic_set(&item->refcount, 1);
item->hash.key = (unsigned long)referenced_object;
ret = drm_ht_insert_item(ht, &item->hash);
item->unref_action = ref_action;
if (ret)
goto out;
list_add(&item->list, &priv->refd_objects);
ret = drm_object_ref_action(priv, referenced_object, ref_action);
out:
return ret;
}
struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
struct drm_user_object *referenced_object,
enum drm_ref_type ref_action)
{
struct drm_hash_item *hash;
int ret;
DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
(unsigned long)referenced_object, &hash);
if (ret)
return NULL;
return drm_hash_entry(hash, struct drm_ref_object, hash);
}
EXPORT_SYMBOL(drm_lookup_ref_object);
static void drm_remove_other_references(struct drm_file *priv,
struct drm_user_object *ro)
{
int i;
struct drm_open_hash *ht;
struct drm_hash_item *hash;
struct drm_ref_object *item;
for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
ht = &priv->refd_object_hash[i];
while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
item = drm_hash_entry(hash, struct drm_ref_object, hash);
drm_remove_ref_object(priv, item);
}
}
}
void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
{
int ret;
struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
enum drm_ref_type unref_action;
DRM_ASSERT_LOCKED(&priv->minor->dev->struct_mutex);
unref_action = item->unref_action;
if (atomic_dec_and_test(&item->refcount)) {
ret = drm_ht_remove_item(ht, &item->hash);
BUG_ON(ret);
list_del_init(&item->list);
if (unref_action == _DRM_REF_USE)
drm_remove_other_references(priv, user_object);
drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
}
switch (unref_action) {
case _DRM_REF_USE:
drm_deref_user_object(priv, user_object);
break;
default:
BUG_ON(!user_object->unref);
user_object->unref(priv, user_object, unref_action);
break;
}
}
EXPORT_SYMBOL(drm_remove_ref_object);
int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
enum drm_object_type type, struct drm_user_object **object)
{
struct drm_device *dev = priv->minor->dev;
struct drm_user_object *uo;
struct drm_hash_item *hash;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
if (ret) {
DRM_ERROR("Could not find user object to reference.\n");
goto out_err;
}
uo = drm_hash_entry(hash, struct drm_user_object, hash);
if (uo->type != type) {
ret = -EINVAL;
goto out_err;
}
ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
if (ret)
goto out_err;
mutex_unlock(&dev->struct_mutex);
*object = uo;
return 0;
out_err:
mutex_unlock(&dev->struct_mutex);
return ret;
}
int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
enum drm_object_type type)
{
struct drm_device *dev = priv->minor->dev;
struct drm_user_object *uo;
struct drm_ref_object *ro;
int ret;
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, user_token);
if (!uo || (uo->type != type)) {
ret = -EINVAL;
goto out_err;
}
ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
if (!ro) {
ret = -EINVAL;
goto out_err;
}
drm_remove_ref_object(priv, ro);
mutex_unlock(&dev->struct_mutex);
return 0;
out_err:
mutex_unlock(&dev->struct_mutex);
return ret;
}

View file

@ -34,110 +34,201 @@
struct drm_device;
struct drm_bo_mem_reg;
/***************************************************
* User space objects. (drm_object.c)
*/
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
enum drm_object_type {
drm_fence_type,
drm_buffer_type,
drm_lock_type,
/*
* Add other user space object types here.
*/
drm_driver_type0 = 256,
drm_driver_type1,
drm_driver_type2,
drm_driver_type3,
drm_driver_type4
};
/*
* A user object is a structure that helps the drm give out user handles
* to kernel internal objects and to keep track of these objects so that
* they can be destroyed, for example when the user space process exits.
* Designed to be accessible using a user space 32-bit handle.
*/
struct drm_user_object {
struct drm_hash_item hash;
struct list_head list;
enum drm_object_type type;
atomic_t refcount;
int shareable;
struct drm_file *owner;
void (*ref_struct_locked) (struct drm_file *priv,
struct drm_user_object *obj,
enum drm_ref_type ref_action);
void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
enum drm_ref_type unref_action);
void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
};
/*
* A ref object is a structure which is used to
* keep track of references to user objects and to keep track of these
* references so that they can be destroyed for example when the user space
* process exits. Designed to be accessible using a pointer to the _user_ object.
*/
struct drm_ref_object {
struct drm_hash_item hash;
struct list_head list;
atomic_t refcount;
enum drm_ref_type unref_action;
};
#define DRM_FENCE_FLAG_EMIT 0x00000001
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
/**
* Must be called with the struct_mutex held.
* On hardware with no interrupt events for operation completion,
* indicates that the kernel should sleep while waiting for any blocking
* operation to complete rather than spinning.
*
* Has no effect otherwise.
*/
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
#define DRM_FENCE_FLAG_NO_USER 0x00000010
/* Reserved for driver use */
#define DRM_FENCE_MASK_DRIVER 0xFF000000
#define DRM_FENCE_TYPE_EXE 0x00000001
struct drm_fence_arg {
unsigned int handle;
unsigned int fence_class;
unsigned int type;
unsigned int flags;
unsigned int signaled;
unsigned int error;
unsigned int sequence;
unsigned int pad64;
uint64_t expand_pad[2]; /*Future expansion */
};
/* Buffer permissions, referring to how the GPU uses the buffers.
* these translate to fence types used for the buffers.
* Typically a texture buffer is read, A destination buffer is write and
* a command (batch-) buffer is exe. Can be or-ed together.
*/
extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
int shareable);
#define DRM_BO_FLAG_READ (1ULL << 0)
#define DRM_BO_FLAG_WRITE (1ULL << 1)
#define DRM_BO_FLAG_EXE (1ULL << 2)
/*
* All of the bits related to access mode
*/
#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
/*
* Status flags. Can be read to determine the actual state of a buffer.
* Can also be set in the buffer mask before validation.
*/
/*
* Mask: Never evict this buffer. Not even with force. This type of buffer is only
* available to root and must be manually removed before buffer manager shutdown
* or lock.
* Flags: Acknowledge
*/
#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
/*
* Mask: Require that the buffer is placed in mappable memory when validated.
* If not set the buffer may or may not be in mappable memory when validated.
* Flags: If set, the buffer is in mappable memory.
*/
#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
/* Mask: The buffer should be shareable with other processes.
* Flags: The buffer is shareable with other processes.
*/
#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
/* Mask: If set, place the buffer in cache-coherent memory if available.
* If clear, never place the buffer in cache coherent memory if validated.
* Flags: The buffer is currently in cache-coherent memory.
*/
#define DRM_BO_FLAG_CACHED (1ULL << 7)
/* Mask: Make sure that every time this buffer is validated,
* it ends up on the same location provided that the memory mask is the same.
* The buffer will also not be evicted when claiming space for
* other buffers. Basically a pinned buffer but it may be thrown out as
* part of buffer manager shutdown or locking.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
* with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
* with unsnooped PTEs instead of snooped, by using chipset-specific cache
* flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
* as the eviction to local memory (TTM unbind) on map is just a side effect
* to prevent aggressive cache prefetch from the GPU disturbing the cache
* management that the DRM is doing.
*
* Flags: Acknowledge.
* Buffers allocated with this flag should not be used for suballocators
* This type may have issues on CPUs with over-aggressive caching
* http://marc.info/?l=linux-kernel&m=102376926732464&w=2
*/
#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
/*
* Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
#define DRM_BO_FLAG_TILE (1ULL << 15)
/*
* Memory type flags that can be or'ed together in the mask, but only
* one appears in flags.
*/
/* System memory */
#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
/* Translation table memory */
#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
/* Vram memory */
#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
/* Up to the driver to define. */
#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
/* We can add more of these now with a 64-bit flag type */
/*
* This is a mask covering all of the memory type flags; easier to just
* use a single constant than a bunch of | values. It covers
* DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
*/
#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
/*
* This adds all of the CPU-mapping options in with the memory
* type to label all bits which change how the page gets mapped
*/
#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
DRM_BO_FLAG_CACHED_MAPPED | \
DRM_BO_FLAG_CACHED | \
DRM_BO_FLAG_MAPPABLE)
/* Driver-private flags */
#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
/*
* Don't block on validate and map. Instead, return EBUSY.
*/
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
/*
* Don't place this buffer on the unfenced list. This means
* that the buffer will not end up having a fence associated
* with it as a result of this operation
*/
#define DRM_BO_HINT_DONT_FENCE 0x00000004
/**
* Must be called with the struct_mutex held.
* On hardware with no interrupt events for operation completion,
* indicates that the kernel should sleep while waiting for any blocking
* operation to complete rather than spinning.
*
* Has no effect otherwise.
*/
extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
uint32_t key);
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
/*
* Must be called with the struct_mutex held. May temporarily release it.
* The client has compute relocations refering to this buffer using the
* offset in the presumed_offset field. If that offset ends up matching
* where this buffer lands, the kernel is free to skip executing those
* relocations
*/
#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
extern int drm_add_ref_object(struct drm_file *priv,
struct drm_user_object *referenced_object,
enum drm_ref_type ref_action);
/*
* Must be called with the struct_mutex held.
*/
#define DRM_BO_MEM_LOCAL 0
#define DRM_BO_MEM_TT 1
#define DRM_BO_MEM_VRAM 2
#define DRM_BO_MEM_PRIV0 3
#define DRM_BO_MEM_PRIV1 4
#define DRM_BO_MEM_PRIV2 5
#define DRM_BO_MEM_PRIV3 6
#define DRM_BO_MEM_PRIV4 7
struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
struct drm_user_object *referenced_object,
enum drm_ref_type ref_action);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
#define DRM_BO_MEM_TYPES 8 /* For now. */
#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
enum drm_object_type type,
struct drm_user_object **object);
extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
enum drm_object_type type);
/***************************************************
* Fence objects. (drm_fence.c)
*/
struct drm_fence_object {
struct drm_user_object base;
struct drm_device *dev;
atomic_t usage;
@ -470,7 +561,6 @@ enum drm_bo_type {
struct drm_buffer_object {
struct drm_device *dev;
struct drm_user_object base;
/*
* If there is a possibility that the usage variable is zero,
@ -546,7 +636,7 @@ struct drm_mem_type_manager {
};
struct drm_bo_lock {
struct drm_user_object base;
// struct drm_user_object base;
wait_queue_head_t queue;
atomic_t write_lock_pending;
atomic_t readers;
@ -655,22 +745,10 @@ struct drm_bo_driver {
/*
* buffer objects (drm_bo.c)
*/
extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int drm_bo_do_validate(struct drm_buffer_object *bo,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class);
extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_driver_finish(struct drm_device *dev);
extern int drm_bo_driver_init(struct drm_device *dev);
extern int drm_bo_pci_offset(struct drm_device *dev,
@ -707,18 +785,9 @@ extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_c
extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
unsigned long p_offset, unsigned long p_size,
int kern_init);
extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class,
struct drm_bo_info_rep *rep,
struct drm_buffer_object **bo_rep);
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
uint32_t handle,
int check_owner);
extern int drm_bo_do_validate(struct drm_buffer_object *bo,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class,
struct drm_bo_info_rep *rep);
extern int drm_bo_evict_cached(struct drm_buffer_object *bo);
extern void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
@ -766,8 +835,6 @@ extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
unsigned long dst_offset,
unsigned long *pfn,
pgprot_t *prot);
extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
struct drm_bo_info_rep *rep);
/*

View file

@ -72,7 +72,7 @@ void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages)
return;
}
#endif
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
if (on_each_cpu(drm_ttm_ipi_handler, NULL,1) != 0)
DRM_ERROR("Timed out waiting for drm cache flush.\n");
}
EXPORT_SYMBOL(drm_ttm_cache_flush);

View file

@ -1,311 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "i915_drm.h"
#include "i915_drv.h"
struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
{
return drm_agp_init_ttm(dev);
}
int i915_fence_type(struct drm_buffer_object *bo,
uint32_t *fclass,
uint32_t *type)
{
if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
return 0;
}
int i915_invalidate_caches(struct drm_device *dev, uint64_t flags)
{
/*
* FIXME: Only emit once per batchbuffer submission.
*/
uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
if (flags & DRM_BO_FLAG_READ)
flush_cmd |= MI_READ_FLUSH;
if (flags & DRM_BO_FLAG_EXE)
flush_cmd |= MI_EXE_FLUSH;
return i915_emit_mi_flush(dev, flush_cmd);
}
int i915_init_mem_type(struct drm_device *dev, uint32_t type,
struct drm_mem_type_manager *man)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_CACHED;
man->drm_bus_maptype = 0;
man->gpu_offset = 0;
break;
case DRM_BO_MEM_TT:
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_AGP;
man->gpu_offset = 0;
break;
case DRM_BO_MEM_VRAM:
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_AGP;
man->gpu_offset = 0;
break;
case DRM_BO_MEM_PRIV0: /* for OS preallocated space */
DRM_ERROR("PRIV0 not used yet.\n");
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
/*
* i915_evict_flags:
*
* @bo: the buffer object to be evicted
*
* Return the bo flags for a buffer which is not mapped to the hardware.
* These will be placed in proposed_flags so that when the move is
* finished, they'll end up in bo->mem.flags
*/
uint64_t i915_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
case DRM_BO_MEM_TT:
return DRM_BO_FLAG_MEM_LOCAL;
default:
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
}
}
#if 0 /* See comment below */
static void i915_emit_copy_blit(struct drm_device * dev,
uint32_t src_offset,
uint32_t dst_offset,
uint32_t pages, int direction)
{
uint32_t cur_pages;
uint32_t stride = PAGE_SIZE;
struct drm_i915_private *dev_priv = dev->dev_private;
RING_LOCALS;
if (!dev_priv)
return;
i915_kernel_lost_context(dev);
while (pages > 0) {
cur_pages = pages;
if (cur_pages > 2048)
cur_pages = 2048;
pages -= cur_pages;
BEGIN_LP_RING(6);
OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
(1 << 25) | (direction ? (1 << 30) : 0));
OUT_RING((cur_pages << 16) | PAGE_SIZE);
OUT_RING(dst_offset);
OUT_RING(stride & 0xffff);
OUT_RING(src_offset);
ADVANCE_LP_RING();
}
return;
}
static int i915_move_blit(struct drm_buffer_object * bo,
int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
{
struct drm_bo_mem_reg *old_mem = &bo->mem;
int dir = 0;
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->mm_node->start <
old_mem->mm_node->start + old_mem->mm_node->size)) {
dir = 1;
}
i915_emit_copy_blit(bo->dev,
old_mem->mm_node->start << PAGE_SHIFT,
new_mem->mm_node->start << PAGE_SHIFT,
new_mem->num_pages, dir);
i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
DRM_FENCE_TYPE_EXE |
DRM_I915_FENCE_TYPE_RW,
DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
}
/*
* Flip destination ttm into cached-coherent AGP,
* then blit and subsequently move out again.
*/
static int i915_move_flip(struct drm_buffer_object * bo,
int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
{
struct drm_device *dev = bo->dev;
struct drm_bo_mem_reg tmp_mem;
int ret;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
if (ret)
return ret;
ret = drm_bind_ttm(bo->ttm, &tmp_mem);
if (ret)
goto out_cleanup;
ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
if (ret)
goto out_cleanup;
ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
mutex_lock(&dev->struct_mutex);
if (tmp_mem.mm_node != bo->pinned_node)
drm_mm_put_block(tmp_mem.mm_node);
tmp_mem.mm_node = NULL;
mutex_unlock(&dev->struct_mutex);
}
return ret;
}
#endif
/*
* Disable i915_move_flip for now, since we can't guarantee that the hardware
* lock is held here. To re-enable we need to make sure either
* a) The X server is using DRM to submit commands to the ring, or
* b) DRM can use the HP ring for these blits. This means i915 needs to
* implement a new ring submission mechanism and fence class.
*/
int i915_move(struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
{
struct drm_bo_mem_reg *old_mem = &bo->mem;
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else {
if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
}
return 0;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(char __force *)__p));
}
#endif
static inline void drm_cache_flush_addr(void *virt)
{
#ifdef cpu_has_clflush
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
clflush(virt+i);
#endif
}
static inline void drm_cache_flush_page(struct page *p)
{
drm_cache_flush_addr(page_address(p));
}
void i915_flush_ttm(struct drm_ttm *ttm)
{
int i;
if (!ttm)
return;
DRM_MEMORYBARRIER();
#ifdef CONFIG_X86_32
#ifndef cpu_has_clflush
#define cpu_has_clflush 0
#endif
/* Hopefully nobody has built an x86-64 processor without clflush */
if (!cpu_has_clflush) {
wbinvd();
DRM_MEMORYBARRIER();
return;
}
#endif
for (i = ttm->num_pages - 1; i >= 0; i--)
drm_cache_flush_page(drm_ttm_get_page(ttm, i));
DRM_MEMORYBARRIER();
}

View file

@ -1,921 +0,0 @@
/*
* Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
* Keith Packard
* ... ?
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#if DRM_DEBUG_CODE
#define DRM_DEBUG_RELOCATION (drm_debug != 0)
#else
#define DRM_DEBUG_RELOCATION 0
#endif
enum i915_buf_idle {
I915_RELOC_UNCHECKED,
I915_RELOC_IDLE,
I915_RELOC_BUSY
};
struct i915_relocatee_info {
struct drm_buffer_object *buf;
unsigned long offset;
uint32_t *data_page;
unsigned page_offset;
struct drm_bo_kmap_obj kmap;
int is_iomem;
int dst;
int idle;
int performed_ring_relocs;
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
unsigned long pfn;
pgprot_t pg_prot;
#endif
};
struct drm_i915_validate_buffer {
struct drm_buffer_object *buffer;
int presumed_offset_correct;
void __user *data;
int ret;
enum i915_buf_idle idle;
};
/*
* I'd like to use MI_STORE_DATA_IMM here, but I can't make
* it work. Seems like GART writes are broken with that
* instruction. Also I'm not sure that MI_FLUSH will
* act as a memory barrier for that instruction. It will
* for this single dword 2D blit.
*/
static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset,
uint32_t value)
{
struct drm_i915_private *dev_priv =
(struct drm_i915_private *)dev->dev_private;
RING_LOCALS;
i915_kernel_lost_context(dev);
BEGIN_LP_RING(6);
OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3));
OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40));
OUT_RING((0x1 << 16) | (0x4));
OUT_RING(offset);
OUT_RING(value);
OUT_RING(0);
ADVANCE_LP_RING();
}
static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer
*buffers, unsigned num_buffers)
{
while (num_buffers--)
drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
}
int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
struct drm_i915_validate_buffer *buffers,
struct i915_relocatee_info *relocatee, uint32_t * reloc)
{
unsigned index;
unsigned long new_cmd_offset;
u32 val;
int ret, i;
int buf_index = -1;
/*
* FIXME: O(relocs * buffers) complexity.
*/
for (i = 0; i <= num_buffers; i++)
if (buffers[i].buffer)
if (reloc[2] == buffers[i].buffer->base.hash.key)
buf_index = i;
if (buf_index == -1) {
DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
return -EINVAL;
}
/*
* Short-circuit relocations that were correctly
* guessed by the client
*/
if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
return 0;
new_cmd_offset = reloc[0];
if (!relocatee->data_page ||
!drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
struct drm_bo_mem_reg *mem = &relocatee->buf->mem;
drm_bo_kunmap(&relocatee->kmap);
relocatee->data_page = NULL;
relocatee->offset = new_cmd_offset;
if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) {
ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0);
if (ret)
return ret;
relocatee->idle = I915_RELOC_IDLE;
}
if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) &&
(mem->flags & DRM_BO_FLAG_CACHED_MAPPED)))
drm_bo_evict_cached(relocatee->buf);
ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
1, &relocatee->kmap);
if (ret) {
DRM_ERROR
("Could not map command buffer to apply relocs\n %08lx",
new_cmd_offset);
return ret;
}
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
&relocatee->is_iomem);
relocatee->page_offset = (relocatee->offset & PAGE_MASK);
}
val = buffers[buf_index].buffer->offset;
index = (reloc[0] - relocatee->page_offset) >> 2;
/* add in validate */
val = val + reloc[1];
if (DRM_DEBUG_RELOCATION) {
if (buffers[buf_index].presumed_offset_correct &&
relocatee->data_page[index] != val) {
DRM_DEBUG
("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
reloc[0], reloc[1], buf_index,
relocatee->data_page[index], val);
}
}
if (relocatee->is_iomem)
iowrite32(val, relocatee->data_page + index);
else
relocatee->data_page[index] = val;
return 0;
}
int i915_process_relocs(struct drm_file *file_priv,
uint32_t buf_handle,
uint32_t __user ** reloc_user_ptr,
struct i915_relocatee_info *relocatee,
struct drm_i915_validate_buffer *buffers,
uint32_t num_buffers)
{
int ret, reloc_stride;
uint32_t cur_offset;
uint32_t reloc_count;
uint32_t reloc_type;
uint32_t reloc_buf_size;
uint32_t *reloc_buf = NULL;
int i;
/* do a copy from user from the user ptr */
ret = get_user(reloc_count, *reloc_user_ptr);
if (ret) {
DRM_ERROR("Could not map relocation buffer.\n");
goto out;
}
ret = get_user(reloc_type, (*reloc_user_ptr) + 1);
if (ret) {
DRM_ERROR("Could not map relocation buffer.\n");
goto out;
}
if (reloc_type != 0) {
DRM_ERROR("Unsupported relocation type requested\n");
ret = -EINVAL;
goto out;
}
reloc_buf_size =
(I915_RELOC_HEADER +
(reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
if (!reloc_buf) {
DRM_ERROR("Out of memory for reloc buffer\n");
ret = -ENOMEM;
goto out;
}
if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
ret = -EFAULT;
goto out;
}
/* get next relocate buffer handle */
*reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2];
reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count,
*reloc_user_ptr);
for (i = 0; i < reloc_count; i++) {
cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
ret = i915_apply_reloc(file_priv, num_buffers, buffers,
relocatee, reloc_buf + cur_offset);
if (ret)
goto out;
}
out:
if (reloc_buf)
kfree(reloc_buf);
if (relocatee->data_page) {
drm_bo_kunmap(&relocatee->kmap);
relocatee->data_page = NULL;
}
return ret;
}
static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
uint32_t __user * reloc_user_ptr,
struct drm_i915_validate_buffer *buffers,
uint32_t buf_count)
{
struct drm_device *dev = file_priv->minor->dev;
struct i915_relocatee_info relocatee;
int ret = 0;
int b;
/*
* Short circuit relocations when all previous
* buffers offsets were correctly guessed by
* the client
*/
if (!DRM_DEBUG_RELOCATION) {
for (b = 0; b < buf_count; b++)
if (!buffers[b].presumed_offset_correct)
break;
if (b == buf_count)
return 0;
}
memset(&relocatee, 0, sizeof(relocatee));
relocatee.idle = I915_RELOC_UNCHECKED;
mutex_lock(&dev->struct_mutex);
relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!relocatee.buf) {
DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
ret = -EINVAL;
goto out_err;
}
mutex_lock(&relocatee.buf->mutex);
while (reloc_user_ptr) {
ret =
i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr,
&relocatee, buffers, buf_count);
if (ret) {
DRM_ERROR("process relocs failed\n");
goto out_err1;
}
}
out_err1:
mutex_unlock(&relocatee.buf->mutex);
drm_bo_usage_deref_unlocked(&relocatee.buf);
out_err:
return ret;
}
static void i915_clear_relocatee(struct i915_relocatee_info *relocatee)
{
if (relocatee->data_page) {
#ifndef DRM_KMAP_ATOMIC_PROT_PFN
drm_bo_kunmap(&relocatee->kmap);
#else
kunmap_atomic(relocatee->data_page, KM_USER0);
#endif
relocatee->data_page = NULL;
}
relocatee->buf = NULL;
relocatee->dst = ~0;
}
static int i915_update_relocatee(struct i915_relocatee_info *relocatee,
struct drm_i915_validate_buffer *buffers,
unsigned int dst, unsigned long dst_offset)
{
int ret;
if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) {
i915_clear_relocatee(relocatee);
relocatee->dst = dst;
relocatee->buf = buffers[dst].buffer;
relocatee->idle = buffers[dst].idle;
/*
* Check for buffer idle. If the buffer is busy, revert to
* ring relocations.
*/
if (relocatee->idle == I915_RELOC_UNCHECKED) {
preempt_enable();
mutex_lock(&relocatee->buf->mutex);
ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0);
if (ret == 0)
relocatee->idle = I915_RELOC_IDLE;
else {
relocatee->idle = I915_RELOC_BUSY;
relocatee->performed_ring_relocs = 1;
}
mutex_unlock(&relocatee->buf->mutex);
preempt_disable();
buffers[dst].idle = relocatee->idle;
}
}
if (relocatee->idle == I915_RELOC_BUSY)
return 0;
if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) {
DRM_ERROR("Relocation destination out of bounds.\n");
return -EINVAL;
}
if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) ||
NULL == relocatee->data_page)) {
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
if (NULL != relocatee->data_page) {
kunmap_atomic(relocatee->data_page, KM_USER0);
relocatee->data_page = NULL;
}
ret = drm_bo_pfn_prot(relocatee->buf, dst_offset,
&relocatee->pfn, &relocatee->pg_prot);
if (ret) {
DRM_ERROR("Can't map relocation destination.\n");
return -EINVAL;
}
relocatee->data_page =
kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0,
relocatee->pg_prot);
#else
if (NULL != relocatee->data_page) {
drm_bo_kunmap(&relocatee->kmap);
relocatee->data_page = NULL;
}
ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT,
1, &relocatee->kmap);
if (ret) {
DRM_ERROR("Can't map relocation destination.\n");
return ret;
}
relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
&relocatee->is_iomem);
#endif
relocatee->page_offset = dst_offset & PAGE_MASK;
}
return 0;
}
static int i915_apply_post_reloc(uint32_t reloc[],
struct drm_i915_validate_buffer *buffers,
uint32_t num_buffers,
struct i915_relocatee_info *relocatee)
{
uint32_t reloc_buffer = reloc[2];
uint32_t dst_buffer = reloc[3];
uint32_t val;
uint32_t index;
int ret;
if (likely(buffers[reloc_buffer].presumed_offset_correct))
return 0;
if (unlikely(reloc_buffer >= num_buffers)) {
DRM_ERROR("Invalid reloc buffer index.\n");
return -EINVAL;
}
if (unlikely(dst_buffer >= num_buffers)) {
DRM_ERROR("Invalid dest buffer index.\n");
return -EINVAL;
}
ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]);
if (unlikely(ret))
return ret;
val = buffers[reloc_buffer].buffer->offset;
index = (reloc[0] - relocatee->page_offset) >> 2;
val = val + reloc[1];
if (relocatee->idle == I915_RELOC_BUSY) {
i915_emit_ring_reloc(relocatee->buf->dev,
relocatee->buf->offset + reloc[0], val);
return 0;
}
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
relocatee->data_page[index] = val;
#else
if (likely(relocatee->is_iomem))
iowrite32(val, relocatee->data_page + index);
else
relocatee->data_page[index] = val;
#endif
return 0;
}
static int i915_post_relocs(struct drm_file *file_priv,
uint32_t __user * new_reloc_ptr,
struct drm_i915_validate_buffer *buffers,
unsigned int num_buffers)
{
uint32_t *reloc;
uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);
uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t);
struct i915_relocatee_info relocatee;
uint32_t reloc_type;
uint32_t num_relocs;
uint32_t count;
int ret = 0;
int i;
int short_circuit = 1;
uint32_t __user *reloc_ptr;
uint64_t new_reloc_data;
uint32_t reloc_buf_size;
uint32_t *reloc_buf;
for (i = 0; i < num_buffers; ++i) {
if (unlikely(!buffers[i].presumed_offset_correct)) {
short_circuit = 0;
break;
}
}
if (likely(short_circuit))
return 0;
memset(&relocatee, 0, sizeof(relocatee));
while (new_reloc_ptr) {
reloc_ptr = new_reloc_ptr;
ret = get_user(num_relocs, reloc_ptr);
if (unlikely(ret))
goto out;
if (unlikely(!access_ok(VERIFY_READ, reloc_ptr,
header_size +
num_relocs * reloc_stride)))
return -EFAULT;
ret = __get_user(reloc_type, reloc_ptr + 1);
if (unlikely(ret))
goto out;
if (unlikely(reloc_type != 1)) {
DRM_ERROR("Unsupported relocation type requested.\n");
ret = -EINVAL;
goto out;
}
ret = __get_user(new_reloc_data, reloc_ptr + 2);
new_reloc_ptr = (uint32_t __user *) (unsigned long)
new_reloc_data;
reloc_ptr += I915_RELOC_HEADER;
if (num_relocs == 0)
goto out;
reloc_buf_size =
(num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t);
reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
if (!reloc_buf) {
DRM_ERROR("Out of memory for reloc buffer\n");
ret = -ENOMEM;
goto out;
}
if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) {
ret = -EFAULT;
goto out;
}
reloc = reloc_buf;
preempt_disable();
for (count = 0; count < num_relocs; ++count) {
ret = i915_apply_post_reloc(reloc, buffers,
num_buffers, &relocatee);
if (unlikely(ret)) {
preempt_enable();
goto out;
}
reloc += I915_RELOC0_STRIDE;
}
preempt_enable();
if (reloc_buf) {
kfree(reloc_buf);
reloc_buf = NULL;
}
i915_clear_relocatee(&relocatee);
}
out:
/*
* Flush ring relocs so the command parser will pick them up.
*/
if (relocatee.performed_ring_relocs)
(void)i915_emit_mi_flush(file_priv->minor->dev, 0);
i915_clear_relocatee(&relocatee);
if (reloc_buf) {
kfree(reloc_buf);
reloc_buf = NULL;
}
return ret;
}
static int i915_check_presumed(struct drm_i915_op_arg *arg,
struct drm_buffer_object *bo,
uint32_t __user * data, int *presumed_ok)
{
struct drm_bo_op_req *req = &arg->d.req;
uint32_t hint_offset;
uint32_t hint = req->bo_req.hint;
*presumed_ok = 0;
if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
return 0;
if (bo->offset == req->bo_req.presumed_offset) {
*presumed_ok = 1;
return 0;
}
/*
* We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
* the user-space IOCTL argument list, since the buffer has moved,
* we're about to apply relocations and we might subsequently
* hit an -EAGAIN. In that case the argument list will be reused by
* user-space, but the presumed offset is no longer valid.
*
* Needless to say, this is a bit ugly.
*/
hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
return __put_user(hint, data + hint_offset);
}
/*
* Validate, add fence and relocate a block of bos from a userspace list
*/
int i915_validate_buffer_list(struct drm_file *file_priv,
unsigned int fence_class, uint64_t data,
struct drm_i915_validate_buffer *buffers,
uint32_t * num_buffers,
uint32_t __user ** post_relocs)
{
struct drm_i915_op_arg arg;
struct drm_bo_op_req *req = &arg.d.req;
int ret = 0;
unsigned buf_count = 0;
uint32_t buf_handle;
uint32_t __user *reloc_user_ptr;
struct drm_i915_validate_buffer *item = buffers;
*post_relocs = NULL;
do {
if (buf_count >= *num_buffers) {
DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
ret = -EINVAL;
goto out_err;
}
item = buffers + buf_count;
item->buffer = NULL;
item->presumed_offset_correct = 0;
item->idle = I915_RELOC_UNCHECKED;
if (copy_from_user
(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
ret = -EFAULT;
goto out_err;
}
ret = 0;
if (req->op != drm_bo_validate) {
DRM_ERROR
("Buffer object operation wasn't \"validate\".\n");
ret = -EINVAL;
goto out_err;
}
item->ret = 0;
item->data = (void __user *)(unsigned long)data;
buf_handle = req->bo_req.handle;
reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr;
/*
* Switch mode to post-validation relocations?
*/
if (unlikely((buf_count == 0) && (*post_relocs == NULL) &&
(reloc_user_ptr != NULL))) {
uint32_t reloc_type;
ret = get_user(reloc_type, reloc_user_ptr + 1);
if (ret)
goto out_err;
if (reloc_type == 1)
*post_relocs = reloc_user_ptr;
}
if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) {
ret =
i915_exec_reloc(file_priv, buf_handle,
reloc_user_ptr, buffers, buf_count);
if (ret)
goto out_err;
DRM_MEMORYBARRIER();
}
ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
req->bo_req.flags,
req->bo_req.mask, req->bo_req.hint,
req->bo_req.fence_class,
NULL, &item->buffer);
if (ret) {
DRM_ERROR("error on handle validate %d\n", ret);
goto out_err;
}
buf_count++;
ret = i915_check_presumed(&arg, item->buffer,
(uint32_t __user *)
(unsigned long)data,
&item->presumed_offset_correct);
if (ret)
goto out_err;
data = arg.next;
} while (data != 0);
out_err:
*num_buffers = buf_count;
item->ret = (ret != -EAGAIN) ? ret : 0;
return ret;
}
/*
* Remove all buffers from the unfenced list.
* If the execbuffer operation was aborted, for example due to a signal,
* this also make sure that buffers retain their original state and
* fence pointers.
* Copy back buffer information to user-space unless we were interrupted
* by a signal. In which case the IOCTL must be rerun.
*/
static int i915_handle_copyback(struct drm_device *dev,
struct drm_i915_validate_buffer *buffers,
unsigned int num_buffers, int ret)
{
int err = ret;
int i;
struct drm_i915_op_arg arg;
struct drm_buffer_object *bo;
if (ret)
drm_putback_buffer_objects(dev);
if (ret != -EAGAIN) {
for (i = 0; i < num_buffers; ++i) {
arg.handled = 1;
arg.d.rep.ret = buffers->ret;
bo = buffers->buffer;
mutex_lock(&bo->mutex);
drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
mutex_unlock(&bo->mutex);
if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
err = -EFAULT;
buffers++;
}
}
return err;
}
/*
* Create a fence object, and if that fails, pretend that everything is
* OK and just idle the GPU.
*/
void i915_fence_or_sync(struct drm_file *file_priv,
uint32_t fence_flags,
struct drm_fence_arg *fence_arg,
struct drm_fence_object **fence_p)
{
struct drm_device *dev = file_priv->minor->dev;
int ret;
struct drm_fence_object *fence;
ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence);
if (ret) {
/*
* Fence creation failed.
* Fall back to synchronous operation and idle the engine.
*/
(void)i915_emit_mi_flush(dev, MI_READ_FLUSH);
(void)i915_quiescent(dev);
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
/*
* Communicate to user-space that
* fence creation has failed and that
* the engine is idle.
*/
fence_arg->handle = ~0;
fence_arg->error = ret;
}
drm_putback_buffer_objects(dev);
if (fence_p)
*fence_p = NULL;
return;
}
if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
ret = drm_fence_add_user_object(file_priv, fence,
fence_flags &
DRM_FENCE_FLAG_SHAREABLE);
if (!ret)
drm_fence_fill_arg(fence, fence_arg);
else {
/*
* Fence user object creation failed.
* We must idle the engine here as well, as user-
* space expects a fence object to wait on. Since we
* have a fence object we wait for it to signal
* to indicate engine "sufficiently" idle.
*/
(void)drm_fence_object_wait(fence, 0, 1, fence->type);
drm_fence_usage_deref_unlocked(&fence);
fence_arg->handle = ~0;
fence_arg->error = ret;
}
}
if (fence_p)
*fence_p = fence;
else if (fence)
drm_fence_usage_deref_unlocked(&fence);
}
int i915_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *)
dev->dev_private;
struct drm_i915_master_private *master_priv =
(struct drm_i915_master_private *)
dev->primary->master->driver_priv;
struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *)
master_priv->sarea_priv;
struct drm_i915_execbuffer *exec_buf = data;
struct drm_i915_batchbuffer *batch = &exec_buf->batch;
struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
int num_buffers;
int ret;
uint32_t __user *post_relocs;
if (!dev_priv->allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
batch->num_cliprects *
sizeof(struct
drm_clip_rect)))
return -EFAULT;
if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
return -EINVAL;
ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
if (ret)
return ret;
/*
* The cmdbuf_mutex makes sure the validate-submit-fence
* operation is atomic.
*/
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (ret) {
drm_bo_read_unlock(&dev->bm.bm_lock);
return -EAGAIN;
}
num_buffers = exec_buf->num_buffers;
if (!dev_priv->val_bufs) {
dev_priv->val_bufs =
vmalloc(sizeof(struct drm_i915_validate_buffer) *
dev_priv->max_validate_buffers);
}
if (!dev_priv->val_bufs) {
drm_bo_read_unlock(&dev->bm.bm_lock);
mutex_unlock(&dev_priv->cmdbuf_mutex);
return -ENOMEM;
}
/* validate buffer list + fixup relocations */
ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
dev_priv->val_bufs, &num_buffers,
&post_relocs);
if (ret)
goto out_err0;
if (post_relocs) {
ret = i915_post_relocs(file_priv, post_relocs,
dev_priv->val_bufs, num_buffers);
if (ret)
goto out_err0;
}
/* make sure all previous memory operations have passed */
DRM_MEMORYBARRIER();
if (!post_relocs) {
drm_agp_chipset_flush(dev);
batch->start =
dev_priv->val_bufs[num_buffers - 1].buffer->offset;
} else {
batch->start += dev_priv->val_bufs[0].buffer->offset;
}
DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
ret = i915_dispatch_batchbuffer(dev, batch);
if (ret)
goto out_err0;
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
out_err0:
ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
mutex_lock(&dev->struct_mutex);
i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->cmdbuf_mutex);
drm_bo_read_unlock(&dev->bm.bm_lock);
return ret;
}

View file

@ -1,273 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
/*
* Initiate a sync flush if it's not already pending.
*/
static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv,
struct drm_fence_class_manager *fc)
{
if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) &&
!dev_priv->flush_pending) {
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
dev_priv->flush_flags = fc->pending_flush;
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
dev_priv->flush_pending = 1;
fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
}
}
static inline void i915_report_rwflush(struct drm_device *dev,
struct drm_i915_private *dev_priv)
{
if (unlikely(dev_priv->flush_pending)) {
uint32_t flush_flags;
uint32_t i_status;
uint32_t flush_sequence;
i_status = READ_HWSP(dev_priv, 0);
if ((i_status & (1 << 12)) !=
(dev_priv->saved_flush_status & (1 << 12))) {
flush_flags = dev_priv->flush_flags;
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
drm_fence_handler(dev, 0, flush_sequence,
flush_flags, 0);
}
}
}
static void i915_fence_flush(struct drm_device *dev,
uint32_t fence_class)
{
struct drm_i915_private *dev_priv =
(struct drm_i915_private *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[0];
unsigned long irq_flags;
if (unlikely(!dev_priv))
return;
write_lock_irqsave(&fm->lock, irq_flags);
i915_initiate_rwflush(dev_priv, fc);
write_unlock_irqrestore(&fm->lock, irq_flags);
}
static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class,
uint32_t waiting_types)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[0];
uint32_t sequence;
if (unlikely(!dev_priv))
return;
/*
* First, report any executed sync flush:
*/
i915_report_rwflush(dev, dev_priv);
/*
* Report A new breadcrumb, and adjust IRQs.
*/
if (waiting_types & DRM_FENCE_TYPE_EXE) {
sequence = READ_BREADCRUMB(dev_priv);
drm_fence_handler(dev, 0, sequence,
DRM_FENCE_TYPE_EXE, 0);
if (dev_priv->fence_irq_on &&
!(fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
i915_user_irq_off(dev);
dev_priv->fence_irq_on = 0;
} else if (!dev_priv->fence_irq_on &&
(fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
i915_user_irq_on(dev);
dev_priv->fence_irq_on = 1;
}
}
/*
* There may be new RW flushes pending. Start them.
*/
i915_initiate_rwflush(dev_priv, fc);
/*
* And possibly, but unlikely, they finish immediately.
*/
i915_report_rwflush(dev, dev_priv);
}
static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
uint32_t flags, uint32_t *sequence,
uint32_t *native_type)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
if (unlikely(!dev_priv))
return -EINVAL;
i915_emit_irq(dev);
*sequence = (uint32_t) dev_priv->counter;
*native_type = DRM_FENCE_TYPE_EXE;
if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
*native_type |= DRM_I915_FENCE_TYPE_RW;
return 0;
}
void i915_fence_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[0];
write_lock(&fm->lock);
if (likely(dev_priv->fence_irq_on))
i915_fence_poll(dev, 0, fc->waiting_types);
write_unlock(&fm->lock);
}
/*
* We need a separate wait function since we need to poll for
* sync flushes.
*/
static int i915_fence_wait(struct drm_fence_object *fence,
int lazy, int interruptible, uint32_t mask)
{
struct drm_device *dev = fence->dev;
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[0];
int ret;
unsigned long _end = jiffies + 3 * DRM_HZ;
drm_fence_object_flush(fence, mask);
if (likely(interruptible))
ret = wait_event_interruptible_timeout
(fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
3 * DRM_HZ);
else
ret = wait_event_timeout
(fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
3 * DRM_HZ);
if (unlikely(ret == -ERESTARTSYS))
return -EAGAIN;
if (unlikely(ret == 0))
return -EBUSY;
if (likely(mask == DRM_FENCE_TYPE_EXE ||
drm_fence_object_signaled(fence, mask)))
return 0;
/*
* Remove this code snippet when fixed. HWSTAM doesn't let
* flush info through...
*/
if (unlikely(dev_priv && !dev_priv->irq_enabled)) {
unsigned long irq_flags;
DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n");
msleep(100);
dev_priv->flush_pending = 0;
write_lock_irqsave(&fm->lock, irq_flags);
drm_fence_handler(dev, fence->fence_class,
fence->sequence, fence->type, 0);
write_unlock_irqrestore(&fm->lock, irq_flags);
}
/*
* Poll for sync flush completion.
*/
return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end);
}
static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence)
{
uint32_t flush_flags = fence->waiting_types &
~(DRM_FENCE_TYPE_EXE | fence->signaled_types);
if (likely(flush_flags == 0 ||
((flush_flags & ~fence->native_types) == 0) ||
(fence->signaled_types != DRM_FENCE_TYPE_EXE)))
return 0;
else {
struct drm_device *dev = fence->dev;
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_fence_driver *driver = dev->driver->fence_driver;
if (unlikely(!dev_priv))
return 0;
if (dev_priv->flush_pending) {
uint32_t diff = (dev_priv->flush_sequence - fence->sequence) &
driver->sequence_mask;
if (diff < driver->wrap_diff)
return 0;
}
}
return flush_flags;
}
struct drm_fence_driver i915_fence_driver = {
.num_classes = 1,
.wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
.flush_diff = (1U << (BREADCRUMB_BITS - 2)),
.sequence_mask = BREADCRUMB_MASK,
.has_irq = NULL,
.emit = i915_fence_emit_sequence,
.flush = i915_fence_flush,
.poll = i915_fence_poll,
.needed_flush = i915_fence_needed_flush,
.wait = i915_fence_wait,
};

View file

@ -245,8 +245,7 @@ int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
if (!(obj_priv->bo->type != drm_bo_type_kernel && !DRM_SUSER(DRM_CURPROC))) {
ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
DRM_BO_HINT_DONT_FENCE,
0, NULL);
DRM_BO_HINT_DONT_FENCE, 0);
} else
ret = 0;
@ -276,8 +275,7 @@ int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
/* validate into a pin with no fence */
ret = drm_bo_do_validate(obj_priv->bo, DRM_BO_FLAG_NO_EVICT, DRM_BO_FLAG_NO_EVICT,
DRM_BO_HINT_DONT_FENCE,
0, NULL);
DRM_BO_HINT_DONT_FENCE, 0);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
@ -321,7 +319,7 @@ int radeon_gem_indirect_ioctl(struct drm_device *dev, void *data,
//VB_AGE_TEST_WITH_RETURN(dev_priv);
ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
0 , 0, NULL);
0 , 0);
if (ret)
return ret;
@ -693,7 +691,7 @@ int radeon_gem_object_pin(struct drm_gem_object *obj,
obj_priv = obj->driver_private;
ret = drm_bo_do_validate(obj_priv->bo, 0, DRM_BO_FLAG_NO_EVICT,
DRM_BO_HINT_DONT_FENCE, 0, NULL);
DRM_BO_HINT_DONT_FENCE, 0);
return ret;
}
@ -743,7 +741,7 @@ int radeon_gem_ib_get(struct drm_device *dev, void **ib, uint32_t dwords, uint32
ret = drm_bo_do_validate(dev_priv->ib_objs[index]->bo, 0,
DRM_BO_FLAG_NO_EVICT,
0, 0, NULL);
0, 0);
if (ret) {
DRM_ERROR("Failed to validate IB %d\n", index);
return -EINVAL;
@ -761,7 +759,6 @@ static void radeon_gem_ib_free(struct drm_device *dev, void *ib, uint32_t dwords
struct drm_fence_object *fence;
int ret;
int i;
RING_LOCALS;
for (i = 0; i < RADEON_NUM_IB; i++) {
@ -821,7 +818,7 @@ static int radeon_gem_relocate(struct drm_device *dev, struct drm_file *file_pri
flags = DRM_BO_FLAG_MEM_TT;
}
ret = drm_bo_do_validate(obj_priv->bo, flags, DRM_BO_MASK_MEM, 0, 0, NULL);
ret = drm_bo_do_validate(obj_priv->bo, flags, DRM_BO_MASK_MEM, 0, 0);
if (ret)
return ret;

View file

@ -675,324 +675,6 @@ struct drm_set_version {
int drm_dd_minor;
};
#define DRM_FENCE_FLAG_EMIT 0x00000001
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
/**
* On hardware with no interrupt events for operation completion,
* indicates that the kernel should sleep while waiting for any blocking
* operation to complete rather than spinning.
*
* Has no effect otherwise.
*/
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
#define DRM_FENCE_FLAG_NO_USER 0x00000010
/* Reserved for driver use */
#define DRM_FENCE_MASK_DRIVER 0xFF000000
#define DRM_FENCE_TYPE_EXE 0x00000001
struct drm_fence_arg {
unsigned int handle;
unsigned int fence_class;
unsigned int type;
unsigned int flags;
unsigned int signaled;
unsigned int error;
unsigned int sequence;
unsigned int pad64;
uint64_t expand_pad[2]; /*Future expansion */
};
/* Buffer permissions, referring to how the GPU uses the buffers.
* these translate to fence types used for the buffers.
* Typically a texture buffer is read, A destination buffer is write and
* a command (batch-) buffer is exe. Can be or-ed together.
*/
#define DRM_BO_FLAG_READ (1ULL << 0)
#define DRM_BO_FLAG_WRITE (1ULL << 1)
#define DRM_BO_FLAG_EXE (1ULL << 2)
/*
* All of the bits related to access mode
*/
#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
/*
* Status flags. Can be read to determine the actual state of a buffer.
* Can also be set in the buffer mask before validation.
*/
/*
* Mask: Never evict this buffer. Not even with force. This type of buffer is only
* available to root and must be manually removed before buffer manager shutdown
* or lock.
* Flags: Acknowledge
*/
#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
/*
* Mask: Require that the buffer is placed in mappable memory when validated.
* If not set the buffer may or may not be in mappable memory when validated.
* Flags: If set, the buffer is in mappable memory.
*/
#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
/* Mask: The buffer should be shareable with other processes.
* Flags: The buffer is shareable with other processes.
*/
#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
/* Mask: If set, place the buffer in cache-coherent memory if available.
* If clear, never place the buffer in cache coherent memory if validated.
* Flags: The buffer is currently in cache-coherent memory.
*/
#define DRM_BO_FLAG_CACHED (1ULL << 7)
/* Mask: Make sure that every time this buffer is validated,
* it ends up on the same location provided that the memory mask is the same.
* The buffer will also not be evicted when claiming space for
* other buffers. Basically a pinned buffer but it may be thrown out as
* part of buffer manager shutdown or locking.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
* with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
* with unsnooped PTEs instead of snooped, by using chipset-specific cache
* flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
* as the eviction to local memory (TTM unbind) on map is just a side effect
* to prevent aggressive cache prefetch from the GPU disturbing the cache
* management that the DRM is doing.
*
* Flags: Acknowledge.
* Buffers allocated with this flag should not be used for suballocators
* This type may have issues on CPUs with over-aggressive caching
* http://marc.info/?l=linux-kernel&m=102376926732464&w=2
*/
#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
/*
* Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
#define DRM_BO_FLAG_TILE (1ULL << 15)
/*
* Memory type flags that can be or'ed together in the mask, but only
* one appears in flags.
*/
/* System memory */
#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
/* Translation table memory */
#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
/* Vram memory */
#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
/* Up to the driver to define. */
#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
/* We can add more of these now with a 64-bit flag type */
/*
* This is a mask covering all of the memory type flags; easier to just
* use a single constant than a bunch of | values. It covers
* DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
*/
#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
/*
* This adds all of the CPU-mapping options in with the memory
* type to label all bits which change how the page gets mapped
*/
#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
DRM_BO_FLAG_CACHED_MAPPED | \
DRM_BO_FLAG_CACHED | \
DRM_BO_FLAG_MAPPABLE)
/* Driver-private flags */
#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
/*
* Don't block on validate and map. Instead, return EBUSY.
*/
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
/*
* Don't place this buffer on the unfenced list. This means
* that the buffer will not end up having a fence associated
* with it as a result of this operation
*/
#define DRM_BO_HINT_DONT_FENCE 0x00000004
/**
* On hardware with no interrupt events for operation completion,
* indicates that the kernel should sleep while waiting for any blocking
* operation to complete rather than spinning.
*
* Has no effect otherwise.
*/
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
/*
* The client has compute relocations refering to this buffer using the
* offset in the presumed_offset field. If that offset ends up matching
* where this buffer lands, the kernel is free to skip executing those
* relocations
*/
#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
#define DRM_BO_INIT_MAGIC 0xfe769812
#define DRM_BO_INIT_MAJOR 1
#define DRM_BO_INIT_MINOR 0
#define DRM_BO_INIT_PATCH 0
struct drm_bo_info_req {
uint64_t mask;
uint64_t flags;
unsigned int handle;
unsigned int hint;
unsigned int fence_class;
unsigned int desired_tile_stride;
unsigned int tile_info;
unsigned int pad64;
uint64_t presumed_offset;
};
struct drm_bo_create_req {
uint64_t flags;
uint64_t size;
uint64_t buffer_start;
unsigned int hint;
unsigned int page_alignment;
};
/*
* Reply flags
*/
#define DRM_BO_REP_BUSY 0x00000001
struct drm_bo_info_rep {
uint64_t flags;
uint64_t proposed_flags;
uint64_t size;
uint64_t offset;
uint64_t arg_handle;
uint64_t buffer_start;
unsigned int handle;
unsigned int fence_flags;
unsigned int rep_flags;
unsigned int page_alignment;
unsigned int desired_tile_stride;
unsigned int hw_tile_stride;
unsigned int tile_info;
unsigned int pad64;
uint64_t expand_pad[4]; /*Future expansion */
};
struct drm_bo_arg_rep {
struct drm_bo_info_rep bo_info;
int ret;
unsigned int pad64;
};
struct drm_bo_create_arg {
union {
struct drm_bo_create_req req;
struct drm_bo_info_rep rep;
} d;
};
struct drm_bo_handle_arg {
unsigned int handle;
};
struct drm_bo_reference_info_arg {
union {
struct drm_bo_handle_arg req;
struct drm_bo_info_rep rep;
} d;
};
struct drm_bo_map_wait_idle_arg {
union {
struct drm_bo_info_req req;
struct drm_bo_info_rep rep;
} d;
};
struct drm_bo_op_req {
enum {
drm_bo_validate,
drm_bo_fence,
drm_bo_ref_fence,
} op;
unsigned int arg_handle;
struct drm_bo_info_req bo_req;
};
struct drm_bo_op_arg {
uint64_t next;
union {
struct drm_bo_op_req req;
struct drm_bo_arg_rep rep;
} d;
int handled;
unsigned int pad64;
};
#define DRM_BO_MEM_LOCAL 0
#define DRM_BO_MEM_TT 1
#define DRM_BO_MEM_VRAM 2
#define DRM_BO_MEM_PRIV0 3
#define DRM_BO_MEM_PRIV1 4
#define DRM_BO_MEM_PRIV2 5
#define DRM_BO_MEM_PRIV3 6
#define DRM_BO_MEM_PRIV4 7
#define DRM_BO_MEM_TYPES 8 /* For now. */
#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
struct drm_bo_version_arg {
uint32_t major;
uint32_t minor;
uint32_t patchlevel;
};
struct drm_mm_type_arg {
unsigned int mem_type;
unsigned int lock_flags;
};
struct drm_mm_init_arg {
unsigned int magic;
unsigned int major;
unsigned int minor;
unsigned int mem_type;
uint64_t p_offset;
uint64_t p_size;
};
struct drm_mm_info_arg {
unsigned int mem_type;
uint64_t p_size;
};
struct drm_gem_close {
/** Handle of the object to be closed. */
uint32_t handle;
@ -1337,31 +1019,6 @@ struct drm_mode_crtc_lut {
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
#define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg)
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA2, struct drm_mode_get_connector)

View file

@ -148,7 +148,7 @@ int i915_dma_cleanup(struct drm_device * dev)
return 0;
}
#if defined(I915_HAVE_BUFFER) && defined(DRI2)
#if defined(DRI2)
#define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
#define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
#define DRI2_SAREA_BLOCK_NEXT(p) \
@ -226,12 +226,7 @@ static int i915_initialize(struct drm_device * dev,
}
}
#ifdef I915_HAVE_BUFFER
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
}
#endif
if (init->ring_size != 0) {
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
@ -285,10 +280,6 @@ static int i915_initialize(struct drm_device * dev,
}
DRM_DEBUG("Enabled hardware status page\n");
#ifdef I915_HAVE_BUFFER
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_init(&dev_priv->cmdbuf_mutex);
}
#ifdef DRI2
if (init->func == I915_INIT_DMA2) {
int ret = setup_dri2_sarea(dev, file_priv, init);
@ -299,7 +290,6 @@ static int i915_initialize(struct drm_device * dev,
}
}
#endif /* DRI2 */
#endif /* I915_HAVE_BUFFER */
return 0;
}
@ -565,9 +555,6 @@ int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
struct drm_i915_cmdbuffer * cmd)
{
#ifdef I915_HAVE_FENCE
struct drm_i915_private *dev_priv = dev->dev_private;
#endif
int nbox = cmd->num_cliprects;
int i = 0, count, ret;
@ -594,10 +581,6 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
}
i915_emit_breadcrumb(dev);
#ifdef I915_HAVE_FENCE
if (unlikely((dev_priv->counter & 0xFF) == 0))
drm_fence_flush_old(dev, 0, dev_priv->counter);
#endif
return 0;
}
@ -648,10 +631,6 @@ int i915_dispatch_batchbuffer(struct drm_device * dev,
}
i915_emit_breadcrumb(dev);
#ifdef I915_HAVE_FENCE
if (unlikely((dev_priv->counter & 0xFF) == 0))
drm_fence_flush_old(dev, 0, dev_priv->counter);
#endif
return 0;
}
@ -724,10 +703,6 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
i915_do_dispatch_flip(dev, i, sync);
i915_emit_breadcrumb(dev);
#ifdef I915_HAVE_FENCE
if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0)))
drm_fence_flush_old(dev, 0, dev_priv->counter);
#endif
}
int i915_quiescent(struct drm_device *dev)
@ -1077,9 +1052,6 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
#ifdef I915_HAVE_BUFFER
DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
#endif
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),

View file

@ -375,58 +375,6 @@ typedef struct drm_i915_hws_addr {
uint64_t addr;
} drm_i915_hws_addr_t;
/*
* Relocation header is 4 uint32_ts
* 0 - 32 bit reloc count
* 1 - 32-bit relocation type
* 2-3 - 64-bit user buffer handle ptr for another list of relocs.
*/
#define I915_RELOC_HEADER 4
/*
* type 0 relocation has 4-uint32_t stride
* 0 - offset into buffer
* 1 - delta to add in
* 2 - buffer handle
* 3 - reserved (for optimisations later).
*/
/*
* type 1 relocation has 4-uint32_t stride.
* Hangs off the first item in the op list.
* Performed after all valiations are done.
* Try to group relocs into the same relocatee together for
* performance reasons.
* 0 - offset into buffer
* 1 - delta to add in
* 2 - buffer index in op list.
* 3 - relocatee index in op list.
*/
#define I915_RELOC_TYPE_0 0
#define I915_RELOC0_STRIDE 4
#define I915_RELOC_TYPE_1 1
#define I915_RELOC1_STRIDE 4
struct drm_i915_op_arg {
uint64_t next;
uint64_t reloc_ptr;
int handled;
unsigned int pad64;
union {
struct drm_bo_op_req req;
struct drm_bo_arg_rep rep;
} d;
};
struct drm_i915_execbuffer {
uint64_t ops_list;
uint32_t num_buffers;
struct drm_i915_batchbuffer batch;
drm_context_t context; /* for lockless use in the future */
struct drm_fence_arg fence_arg;
};
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory