Allow a "native type" to be associated with a fence sequence.

In the intel case, we can associate a flush with a sequence.
This commit is contained in:
Thomas Hellstrom 2006-09-15 16:47:09 +02:00
parent 49fbeb339c
commit f613022cee
7 changed files with 73 additions and 119 deletions

View file

@ -765,7 +765,9 @@ typedef struct drm_fence_driver{
uint32_t flush_diff;
uint32_t sequence_mask;
int lazy_capable;
int (*emit) (struct drm_device *dev, uint32_t *breadcrumb);
int (*emit) (struct drm_device *dev, uint32_t flags,
uint32_t *breadcrumb,
uint32_t *native_type);
void (*poke_flush) (struct drm_device *dev);
} drm_fence_driver_t;
@ -804,7 +806,7 @@ typedef struct drm_buffer_manager{
struct list_head ddestroy;
struct list_head other;
struct work_struct wq;
uint32_t fence_flags;
uint32_t fence_type;
unsigned long max_pages;
unsigned long cur_pages;
} drm_buffer_manager_t;
@ -963,6 +965,7 @@ typedef struct drm_fence_object{
struct list_head ring;
int class;
uint32_t native_type;
volatile uint32_t type;
volatile uint32_t signaled;
uint32_t sequence;
@ -997,7 +1000,7 @@ typedef struct drm_buffer_object{
struct list_head head;
struct list_head ddestroy;
uint32_t fence_flags;
uint32_t fence_type;
uint32_t fence_class;
drm_fence_object_t *fence;
uint32_t priv_flags;
@ -1386,7 +1389,8 @@ extern void drm_fence_usage_deref_unlocked(drm_device_t * dev,
extern int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask);
extern int drm_fence_object_create(drm_device_t *dev, uint32_t type,
int emit, drm_fence_object_t **c_fence);
uint32_t fence_flags,
drm_fence_object_t **c_fence);
extern int drm_fence_add_user_object(drm_file_t *priv,
drm_fence_object_t *fence,
int shareable);
@ -1406,6 +1410,7 @@ extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
extern int drm_bo_clean_mm(drm_device_t *dev);
extern int drm_fence_buffer_objects(drm_file_t * priv,
struct list_head *list,
uint32_t fence_flags,
drm_fence_object_t *fence,
drm_fence_object_t **used_fence);

View file

@ -94,9 +94,9 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
if (bo->fence) {
if (!drm_fence_object_signaled(bo->fence, bo->fence_flags)) {
if (!drm_fence_object_signaled(bo->fence, bo->fence_type)) {
drm_fence_object_flush(dev, bo->fence, bo->fence_flags);
drm_fence_object_flush(dev, bo->fence, bo->fence_type);
list_add_tail(&bo->ddestroy, &bm->ddestroy);
schedule_delayed_work(&bm->wq,
((DRM_HZ / 100) <
@ -144,7 +144,7 @@ static void drm_bo_delayed_delete(drm_device_t * dev)
fence = entry->fence;
if (fence && drm_fence_object_signaled(fence,
entry->fence_flags)) {
entry->fence_type)) {
drm_fence_usage_deref_locked(dev, fence);
entry->fence = NULL;
}
@ -205,6 +205,7 @@ void drm_bo_usage_deref_unlocked(drm_device_t * dev, drm_buffer_object_t * bo)
int drm_fence_buffer_objects(drm_file_t * priv,
struct list_head *list,
uint32_t fence_flags,
drm_fence_object_t * fence,
drm_fence_object_t ** used_fence)
{
@ -212,7 +213,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
drm_buffer_manager_t *bm = &dev->bm;
drm_buffer_object_t *entry;
uint32_t fence_flags = 0;
uint32_t fence_type = 0;
int count = 0;
int ret = 0;
struct list_head f_list, *l;
@ -224,7 +225,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
list_for_each_entry(entry, list, head) {
BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
fence_flags |= entry->fence_flags;
fence_type |= entry->fence_type;
if (entry->fence_class != 0) {
DRM_ERROR("Fence class %d is not implemented yet.\n",
entry->fence_class);
@ -250,7 +251,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
list_del_init(list);
if (fence) {
if ((fence_flags & fence->type) != fence_flags) {
if ((fence_type & fence->type) != fence_type) {
DRM_ERROR("Given fence doesn't match buffers "
"on unfenced list.\n");
ret = -EINVAL;
@ -258,7 +259,9 @@ int drm_fence_buffer_objects(drm_file_t * priv,
}
} else {
mutex_unlock(&dev->struct_mutex);
ret = drm_fence_object_create(dev, fence_flags, 1, &fence);
ret = drm_fence_object_create(dev, fence_type,
fence_flags | DRM_FENCE_FLAG_EMIT,
&fence);
mutex_lock(&dev->struct_mutex);
if (ret)
goto out;
@ -317,7 +320,7 @@ static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
if (fence) {
drm_device_t *dev = bo->dev;
if (drm_fence_object_signaled(fence, bo->fence_flags)) {
if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
return 0;
@ -327,7 +330,7 @@ static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
}
ret =
drm_fence_object_wait(dev, fence, lazy, ignore_signals,
bo->fence_flags);
bo->fence_type);
if (ret)
return ret;
@ -624,7 +627,7 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo)
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (fence) {
drm_device_t *dev = bo->dev;
if (drm_fence_object_signaled(fence, bo->fence_flags)) {
if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
return 0;
@ -646,13 +649,13 @@ static int drm_bo_busy(drm_buffer_object_t * bo)
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (fence) {
drm_device_t *dev = bo->dev;
if (drm_fence_object_signaled(fence, bo->fence_flags)) {
if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
return 0;
}
drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
if (drm_fence_object_signaled(fence, bo->fence_flags)) {
if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
return 0;
@ -776,7 +779,7 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
rep->mask = bo->mask;
rep->buffer_start = bo->buffer_start;
rep->fence_flags = bo->fence_flags;
rep->fence_flags = bo->fence_type;
rep->rep_flags = 0;
if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
@ -988,7 +991,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
}
DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags);
ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_flags);
ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type);
if (ret) {
DRM_ERROR("Driver did not support given buffer permissions\n");
return ret;

View file

@ -31,84 +31,35 @@
#include "drmP.h"
static void drm_fm_update_pointers(drm_fence_manager_t * fm,
struct list_head *list, int no_types,
uint32_t type)
{
int i;
for (i = 0; i < no_types; ++i) {
if (type & (1 << i)) {
fm->fence_types[i] = list;
}
}
}
/*
* Typically called by the IRQ handler.
*/
void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
{
int i;
int wake = 0;
int largest = 0;
uint32_t diff;
uint32_t relevant;
int index = 0;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
struct list_head *list;
struct list_head *fence_list;
struct list_head *list, *prev;
drm_fence_object_t *fence;
int found = 0;
for (i = 0; i < driver->no_types; ++i) {
if (!(type & (1 << i)))
continue;
list = fm->fence_types[i];
fence_list = list->next;
if (fence_list == &fm->ring)
continue;
fence = list_entry(fence_list, drm_fence_object_t, ring);
list_for_each_entry(fence, &fm->ring, ring) {
diff = (sequence - fence->sequence) & driver->sequence_mask;
if (diff < driver->wrap_diff) {
if (diff >= largest) {
largest = diff;
index = i;
found = 1;
}
}
if (diff > driver->wrap_diff)
break;
}
if (!found)
return;
list = fence->ring.prev;
prev = list->prev;
/*
* Start with the fence object with the lowest sequence number, affected by
* the type mask of this call. Update signaled fields,
* Check if we need to wake sleeping processes
*/
list = fm->fence_types[index]->next;
do {
if (list == &fm->ring) {
drm_fm_update_pointers(fm, list->prev,
driver->no_types, type);
break;
}
for (; list != &fm->ring; list = prev, prev = list->prev) {
fence = list_entry(list, drm_fence_object_t, ring);
diff = (sequence - fence->sequence) & driver->sequence_mask;
if (diff >= driver->wrap_diff) {
drm_fm_update_pointers(fm, fence->ring.prev,
driver->no_types, type);
break;
}
type |= fence->native_type;
relevant = type & fence->type;
if ((fence->signaled | relevant) != fence->signaled) {
fence->signaled |= relevant;
DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
@ -119,35 +70,20 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
relevant = fence->flush_mask &
~(fence->signaled | fence->submitted_flush);
if (relevant) {
fm->pending_flush |= relevant;
fence->submitted_flush = fence->flush_mask;
}
list = list->next;
/*
* Remove a completely signaled fence from the
* fence manager ring.
*/
if (!(fence->type & ~fence->signaled)) {
DRM_DEBUG("Fence completely signaled 0x%08lx\n",
fence->base.hash.key);
fence_list = &fence->ring;
for (i = 0; i < driver->no_types; ++i) {
if (fm->fence_types[i] == fence_list)
fm->fence_types[i] = fence_list->prev;
}
list_del_init(fence_list);
list_del_init(&fence->ring);
}
} while (1);
/*
* Wake sleeping processes.
*/
}
if (wake) {
DRM_WAKEUP(&fm->fence_queue);
}
@ -158,15 +94,9 @@ EXPORT_SYMBOL(drm_fence_handler);
static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
unsigned long flags;
int i;
write_lock_irqsave(&fm->lock, flags);
for (i = 0; i < driver->no_types; ++i) {
if (fm->fence_types[i] == ring)
fm->fence_types[i] = ring->prev;
}
list_del_init(ring);
write_unlock_irqrestore(&fm->lock, flags);
}
@ -235,7 +165,8 @@ static void drm_fence_flush_exe(drm_fence_manager_t * fm,
* Last_exe_flush is invalid. Find oldest sequence.
*/
list = fm->fence_types[_DRM_FENCE_TYPE_EXE];
/* list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
list = &fm->ring;
if (list->next == &fm->ring) {
return;
} else {
@ -405,16 +336,17 @@ int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
}
int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
uint32_t type)
uint32_t fence_flags, uint32_t type)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
unsigned long flags;
uint32_t sequence;
uint32_t native_type;
int ret;
drm_fence_unring(dev, &fence->ring);
ret = driver->emit(dev, &sequence);
ret = driver->emit(dev, fence_flags, &sequence, &native_type);
if (ret)
return ret;
@ -424,13 +356,15 @@ int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
fence->submitted_flush = 0x00;
fence->signaled = 0x00;
fence->sequence = sequence;
fence->native_type = native_type;
list_add_tail(&fence->ring, &fm->ring);
write_unlock_irqrestore(&fm->lock, flags);
return 0;
}
int drm_fence_object_init(drm_device_t * dev, uint32_t type, int emit,
drm_fence_object_t * fence)
static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
uint32_t fence_flags,
drm_fence_object_t * fence)
{
int ret = 0;
unsigned long flags;
@ -449,13 +383,12 @@ int drm_fence_object_init(drm_device_t * dev, uint32_t type, int emit,
fence->signaled = 0;
fence->sequence = 0;
write_unlock_irqrestore(&fm->lock, flags);
if (emit) {
ret = drm_fence_object_emit(dev, fence, type);
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
ret = drm_fence_object_emit(dev, fence, fence_flags, type);
}
return ret;
}
EXPORT_SYMBOL(drm_fence_object_init);
int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
int shareable)
@ -477,7 +410,7 @@ int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
EXPORT_SYMBOL(drm_fence_add_user_object);
int drm_fence_object_create(drm_device_t * dev, uint32_t type,
int emit, drm_fence_object_t ** c_fence)
unsigned flags, drm_fence_object_t ** c_fence)
{
drm_fence_object_t *fence;
int ret;
@ -485,7 +418,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t type,
fence = kmem_cache_alloc(drm_cache.fence_object, GFP_KERNEL);
if (!fence)
return -ENOMEM;
ret = drm_fence_object_init(dev, type, emit, fence);
ret = drm_fence_object_init(dev, type, flags, fence);
if (ret) {
drm_fence_usage_deref_unlocked(dev, fence);
return ret;
@ -559,7 +492,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
if (arg.flags & DRM_FENCE_FLAG_EMIT)
LOCK_TEST_WITH_RETURN(dev, filp);
ret = drm_fence_object_create(dev, arg.type,
arg.flags & DRM_FENCE_FLAG_EMIT,
arg.flags,
&fence);
if (ret)
return ret;
@ -623,7 +556,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_emit(dev, fence, arg.type);
ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
break;
case drm_fence_buffers:
if (!dev->bm.initialized) {
@ -631,7 +564,8 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
return -EINVAL;
}
LOCK_TEST_WITH_RETURN(dev, filp);
ret = drm_fence_buffer_objects(priv, NULL, NULL, &fence);
ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
NULL, &fence);
if (ret)
return ret;
ret = drm_fence_add_user_object(priv, fence,

View file

@ -201,7 +201,7 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
else
new = context | _DRM_LOCK_HELD;
new = context | _DRM_LOCK_HELD | _DRM_LOCK_CONT;
prev = cmpxchg(lock, old, new);
} while (prev != old);
if (_DRM_LOCKING_CONTEXT(old) == context) {
@ -213,7 +213,7 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
return 0;
}
}
if (new == (context | _DRM_LOCK_HELD)) {
if (new == (context | _DRM_LOCK_HELD | _DRM_LOCK_CONT)) {
/* Have lock */
return 1;
}

View file

@ -86,6 +86,7 @@ static void i915_perform_flush(drm_device_t * dev)
}
if (fm->pending_flush && !dev_priv->flush_pending) {
DRM_ERROR("Sync flush");
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
dev_priv->flush_flags = fm->pending_flush;
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
@ -117,11 +118,16 @@ void i915_poke_flush(drm_device_t * dev)
write_unlock_irqrestore(&fm->lock, flags);
}
int i915_fence_emit_sequence(drm_device_t * dev, uint32_t * sequence)
int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags,
uint32_t * sequence, uint32_t *native_type)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
i915_emit_irq(dev);
*sequence = (uint32_t) dev_priv->counter;
*native_type = DRM_FENCE_TYPE_EXE;
if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
*native_type |= DRM_I915_FENCE_TYPE_RW;
return 0;
}

View file

@ -113,6 +113,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_FENCE_CLASS_ACCEL 0
/* Fence type that guarantees read-write flush */
#define DRM_I915_FENCE_TYPE_RW 2
/* MI_FLUSH programmed just before the fence */
#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
/* Flags for perf_boxes
*/

View file

@ -157,8 +157,12 @@ extern void i915_mem_release(drm_device_t * dev,
DRMFILE filp, struct mem_block *heap);
#ifdef I915_HAVE_FENCE
/* i915_fence.c */
extern void i915_fence_handler(drm_device_t *dev);
extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t *sequence);
extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t flags,
uint32_t *sequence,
uint32_t *native_type);
extern void i915_poke_flush(drm_device_t *dev);
extern void i915_sync_flush(drm_device_t *dev);
#endif