mirror of
https://gitlab.freedesktop.org/mesa/drm.git
synced 2025-12-25 21:10:11 +01:00
rradeon_ms: rework fence code and bring radeon ms up to date
This commit is contained in:
parent
75c9e0d346
commit
a7dc4d08b9
8 changed files with 243 additions and 105 deletions
|
|
@ -28,7 +28,7 @@
|
|||
#include "drm_pciids.h"
|
||||
#include "radeon_ms.h"
|
||||
|
||||
extern struct drm_fence_driver radeon_ms_fence_driver;
|
||||
extern struct drm_fence_driver r3xx_fence_driver;
|
||||
extern struct drm_bo_driver radeon_ms_bo_driver;
|
||||
extern struct drm_ioctl_desc radeon_ms_ioctls[];
|
||||
extern int radeon_ms_num_ioctls;
|
||||
|
|
@ -71,7 +71,7 @@ static struct drm_driver driver = {
|
|||
.set_version = NULL,
|
||||
.fb_probe = radeonfb_probe,
|
||||
.fb_remove = radeonfb_remove,
|
||||
.fence_driver = &radeon_ms_fence_driver,
|
||||
.fence_driver = &r3xx_fence_driver,
|
||||
.bo_driver = &radeon_ms_bo_driver,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
|
|
|
|||
|
|
@ -345,12 +345,11 @@ int radeonfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
|
|||
DRM_INFO("[radeon_ms] fb physical start : 0x%lX\n", info->fix.smem_start);
|
||||
DRM_INFO("[radeon_ms] fb physical size : %d\n", info->fix.smem_len);
|
||||
|
||||
ret = drm_mem_reg_ioremap(dev, &fb->bo->mem, &fb->virtual_base);
|
||||
if (ret) {
|
||||
DRM_ERROR("error mapping fb: %d\n", ret);
|
||||
ret = drm_bo_kmap(fb->bo, 0, fb->bo->num_pages, &fb->kmap);
|
||||
if (ret) {
|
||||
DRM_ERROR("error mapping fb: %d\n", ret);
|
||||
}
|
||||
|
||||
info->screen_base = fb->virtual_base;
|
||||
info->screen_base = fb->kmap.virtual;
|
||||
info->screen_size = info->fix.smem_len; /* FIXME */
|
||||
info->pseudo_palette = fb->pseudo_palette;
|
||||
info->var.xres_virtual = fb->width;
|
||||
|
|
@ -445,10 +444,10 @@ int radeonfb_remove(struct drm_device *dev, struct drm_crtc *crtc)
|
|||
|
||||
if (info) {
|
||||
unregister_framebuffer(info);
|
||||
framebuffer_release(info);
|
||||
drm_mem_reg_iounmap(dev, &fb->bo->mem, fb->virtual_base);
|
||||
drm_bo_kunmap(&fb->kmap);
|
||||
drm_bo_usage_deref_unlocked(&fb->bo);
|
||||
drm_framebuffer_destroy(fb);
|
||||
framebuffer_release(info);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@
|
|||
#define __RADEON_MS_H__
|
||||
|
||||
#include "radeon_ms_drv.h"
|
||||
#include "amd_r3xx_fence.h"
|
||||
#include "radeon_ms_reg.h"
|
||||
#include "radeon_ms_drm.h"
|
||||
#include "radeon_ms_rom.h"
|
||||
|
|
@ -328,6 +329,7 @@ struct drm_radeon_private {
|
|||
/* abstract asic specific structures */
|
||||
struct radeon_ms_rom rom;
|
||||
struct radeon_ms_properties properties;
|
||||
void *fence;
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -425,15 +427,9 @@ int radeon_ms_execbuffer(struct drm_device *dev, void *data,
|
|||
int radeon_ms_family_init(struct drm_device *dev);
|
||||
|
||||
/* radeon_ms_fence.c */
|
||||
int radeon_ms_fence_emit_sequence(struct drm_device *dev, uint32_t class,
|
||||
uint32_t flags, uint32_t *sequence,
|
||||
uint32_t *native_type);
|
||||
void radeon_ms_fence_handler(struct drm_device * dev);
|
||||
int radeon_ms_fence_has_irq(struct drm_device *dev, uint32_t class,
|
||||
uint32_t flags);
|
||||
int radeon_ms_fence_types(struct drm_buffer_object *bo,
|
||||
uint32_t * class, uint32_t * type);
|
||||
void radeon_ms_poke_flush(struct drm_device * dev, uint32_t class);
|
||||
void r3xx_fence_handler(struct drm_device * dev);
|
||||
int r3xx_fence_types(struct drm_buffer_object *bo,
|
||||
uint32_t * class, uint32_t * type);
|
||||
|
||||
/* radeon_ms_fb.c */
|
||||
int radeonfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
|
||||
|
|
|
|||
|
|
@ -43,24 +43,13 @@ static uint32_t radeon_ms_busy_prios[] = {
|
|||
DRM_BO_MEM_LOCAL,
|
||||
};
|
||||
|
||||
struct drm_fence_driver radeon_ms_fence_driver = {
|
||||
.num_classes = 1,
|
||||
.wrap_diff = (1 << 30),
|
||||
.flush_diff = (1 << 29),
|
||||
.sequence_mask = 0xffffffffU,
|
||||
.lazy_capable = 1,
|
||||
.emit = radeon_ms_fence_emit_sequence,
|
||||
.poke_flush = radeon_ms_poke_flush,
|
||||
.has_irq = radeon_ms_fence_has_irq,
|
||||
};
|
||||
|
||||
struct drm_bo_driver radeon_ms_bo_driver = {
|
||||
.mem_type_prio = radeon_ms_mem_prios,
|
||||
.mem_busy_prio = radeon_ms_busy_prios,
|
||||
.num_mem_type_prio = sizeof(radeon_ms_mem_prios)/sizeof(uint32_t),
|
||||
.num_mem_busy_prio = sizeof(radeon_ms_busy_prios)/sizeof(uint32_t),
|
||||
.create_ttm_backend_entry = radeon_ms_create_ttm_backend,
|
||||
.fence_type = radeon_ms_fence_types,
|
||||
.fence_type = r3xx_fence_types,
|
||||
.invalidate_caches = radeon_ms_invalidate_caches,
|
||||
.init_mem_type = radeon_ms_init_mem_type,
|
||||
.evict_flags = radeon_ms_evict_flags,
|
||||
|
|
@ -127,6 +116,13 @@ int radeon_ms_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
return ret;
|
||||
}
|
||||
|
||||
dev_priv->fence = drm_alloc(sizeof(struct r3xx_fence), DRM_MEM_DRIVER);
|
||||
if (dev_priv->fence == NULL) {
|
||||
radeon_ms_driver_unload(dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(dev_priv->fence, 0, sizeof(struct r3xx_fence));
|
||||
|
||||
/* we don't want userspace to be able to map this so don't use
|
||||
* drm_addmap */
|
||||
dev_priv->mmio.offset = drm_get_resource_start(dev, 2);
|
||||
|
|
@ -309,9 +305,11 @@ int radeon_ms_driver_unload(struct drm_device *dev)
|
|||
drm_core_ioremapfree(&dev_priv->vram, dev);
|
||||
}
|
||||
DRM_INFO("[radeon_ms] map released\n");
|
||||
drm_free(dev_priv->fence, sizeof(struct r3xx_fence), DRM_MEM_DRIVER);
|
||||
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
|
||||
dev->dev_private = NULL;
|
||||
|
||||
|
||||
DRM_INFO("[radeon_ms] that's all the folks\n");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,16 +30,23 @@
|
|||
#ifndef __RADEON_MS_DRM_H__
|
||||
#define __RADEON_MS_DRM_H__
|
||||
|
||||
/* fence definitions */
|
||||
/* The only fence class we support */
|
||||
#define DRM_RADEON_FENCE_CLASS_ACCEL 0
|
||||
/* Fence type that guarantees read-write flush */
|
||||
#define DRM_RADEON_FENCE_TYPE_RW 2
|
||||
/* cache flushes programmed just before the fence */
|
||||
#define DRM_RADEON_FENCE_FLAG_FLUSHED 0x01000000
|
||||
/* Fence
|
||||
* We have only one fence class as we submit command through th
|
||||
* same fifo so there is no need to synchronize buffer btw different
|
||||
* cmd stream.
|
||||
*
|
||||
* Set DRM_RADEON_FENCE_FLAG_FLUSHED if you want a flush with
|
||||
* emission of the fence
|
||||
*
|
||||
* For fence type we have the native DRM EXE type and the radeon RW
|
||||
* type.
|
||||
*/
|
||||
#define DRM_RADEON_FENCE_CLASS_ACCEL 0
|
||||
#define DRM_RADEON_FENCE_TYPE_RW 2
|
||||
#define DRM_RADEON_FENCE_FLAG_FLUSHED 0x01000000
|
||||
|
||||
/* radeon ms ioctl */
|
||||
#define DRM_RADEON_EXECBUFFER 0x00
|
||||
#define DRM_RADEON_EXECBUFFER 0x00
|
||||
|
||||
struct drm_radeon_execbuffer_arg {
|
||||
uint64_t next;
|
||||
|
|
|
|||
|
|
@ -223,7 +223,7 @@ int radeon_ms_execbuffer(struct drm_device *dev, void *data,
|
|||
fence_arg->handle = fence->base.hash.key;
|
||||
fence_arg->fence_class = fence->fence_class;
|
||||
fence_arg->type = fence->type;
|
||||
fence_arg->signaled = fence->signaled;
|
||||
fence_arg->signaled = fence->signaled_types;
|
||||
fence_arg->sequence = fence->sequence;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,51 +27,30 @@
|
|||
* Jerome Glisse <glisse@freedesktop.org>
|
||||
*/
|
||||
#include "radeon_ms.h"
|
||||
#include "amd_r3xx_fence.h"
|
||||
|
||||
static void radeon_ms_fence_flush(struct drm_device *dev)
|
||||
#define R3XX_FENCE_SEQUENCE_RW_FLUSH 0x80000000u
|
||||
|
||||
static inline int r3xx_fence_emit_sequence(struct drm_device *dev,
|
||||
struct drm_radeon_private *dev_priv,
|
||||
uint32_t sequence)
|
||||
{
|
||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||
struct drm_fence_class_manager *fc = &dev->fm.fence_class[0];
|
||||
uint32_t pending_flush_types = 0;
|
||||
uint32_t sequence;
|
||||
struct r3xx_fence *r3xx_fence = dev_priv->fence;
|
||||
uint32_t cmd[2];
|
||||
int i, r;
|
||||
|
||||
if (dev_priv == NULL) {
|
||||
return;
|
||||
}
|
||||
pending_flush_types = fc->pending_flush |
|
||||
((fc->pending_exe_flush) ?
|
||||
DRM_FENCE_TYPE_EXE : 0);
|
||||
if (pending_flush_types) {
|
||||
sequence = mmio_read(dev_priv, dev_priv->fence_reg);
|
||||
drm_fence_handler(dev, 0, sequence, pending_flush_types, 0);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_ms_fence_emit_sequence(struct drm_device *dev, uint32_t class,
|
||||
uint32_t flags, uint32_t *sequence,
|
||||
uint32_t *native_type)
|
||||
{
|
||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||
uint32_t fence_id, cmd[2], i, ret;
|
||||
|
||||
if (!dev_priv || dev_priv->cp_ready != 1) {
|
||||
return -EINVAL;
|
||||
}
|
||||
fence_id = (++dev_priv->fence_id_last);
|
||||
if (dev_priv->fence_id_last > 0x7FFFFFFF) {
|
||||
fence_id = dev_priv->fence_id_last = 1;
|
||||
}
|
||||
*sequence = fence_id;
|
||||
*native_type = DRM_FENCE_TYPE_EXE;
|
||||
if (flags & DRM_RADEON_FENCE_FLAG_FLUSHED) {
|
||||
*native_type |= DRM_RADEON_FENCE_TYPE_RW;
|
||||
if (sequence & R3XX_FENCE_SEQUENCE_RW_FLUSH) {
|
||||
r3xx_fence->sequence_last_flush =
|
||||
sequence & ~R3XX_FENCE_SEQUENCE_RW_FLUSH;
|
||||
/* Ask flush for VERTEX & FRAGPROG pipeline
|
||||
* have 3D idle */
|
||||
dev_priv->flush_cache(dev);
|
||||
}
|
||||
cmd[0] = CP_PACKET0(dev_priv->fence_reg, 0);
|
||||
cmd[1] = fence_id;
|
||||
cmd[1] = sequence;
|
||||
for (i = 0; i < dev_priv->usec_timeout; i++) {
|
||||
ret = radeon_ms_ring_emit(dev, cmd, 2);
|
||||
if (!ret) {
|
||||
r = radeon_ms_ring_emit(dev, cmd, 2);
|
||||
if (!r) {
|
||||
dev_priv->irq_emit(dev);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -79,51 +58,210 @@ int radeon_ms_fence_emit_sequence(struct drm_device *dev, uint32_t class,
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
void radeon_ms_fence_handler(struct drm_device * dev)
|
||||
static inline uint32_t r3xx_fence_sequence(struct r3xx_fence *r3xx_fence)
|
||||
{
|
||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
r3xx_fence->sequence += 1;
|
||||
if (unlikely(r3xx_fence->sequence > 0x7fffffffu)) {
|
||||
r3xx_fence->sequence = 1;
|
||||
}
|
||||
return r3xx_fence->sequence;
|
||||
}
|
||||
|
||||
static inline void r3xx_fence_report(struct drm_device *dev,
|
||||
struct drm_radeon_private *dev_priv,
|
||||
struct r3xx_fence *r3xx_fence)
|
||||
{
|
||||
uint32_t fence_types = DRM_FENCE_TYPE_EXE;
|
||||
uint32_t sequence;
|
||||
|
||||
if (dev_priv == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
write_lock(&fm->lock);
|
||||
radeon_ms_fence_flush(dev);
|
||||
write_unlock(&fm->lock);
|
||||
sequence = mmio_read(dev_priv, dev_priv->fence_reg);
|
||||
if (sequence & R3XX_FENCE_SEQUENCE_RW_FLUSH) {
|
||||
sequence &= ~R3XX_FENCE_SEQUENCE_RW_FLUSH;
|
||||
fence_types |= DRM_RADEON_FENCE_TYPE_RW;
|
||||
if (sequence == r3xx_fence->sequence_last_flush) {
|
||||
r3xx_fence->sequence_last_flush = 0;
|
||||
}
|
||||
}
|
||||
/* avoid to report already reported sequence */
|
||||
if (sequence != r3xx_fence->sequence_last_reported) {
|
||||
drm_fence_handler(dev, 0, sequence, fence_types, 0);
|
||||
r3xx_fence->sequence_last_reported = sequence;
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_ms_fence_has_irq(struct drm_device *dev, uint32_t class,
|
||||
uint32_t flags)
|
||||
static void r3xx_fence_flush(struct drm_device *dev, uint32_t class)
|
||||
{
|
||||
/*
|
||||
* We have an irq that tells us when we have a new breadcrumb.
|
||||
*/
|
||||
if (class == 0 && flags == DRM_FENCE_TYPE_EXE)
|
||||
return 1;
|
||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||
struct r3xx_fence *r3xx_fence = dev_priv->fence;
|
||||
uint32_t sequence;
|
||||
|
||||
sequence = r3xx_fence_sequence(r3xx_fence);
|
||||
sequence |= R3XX_FENCE_SEQUENCE_RW_FLUSH;
|
||||
r3xx_fence_emit_sequence(dev, dev_priv, sequence);
|
||||
}
|
||||
|
||||
static void r3xx_fence_poll(struct drm_device *dev, uint32_t fence_class,
|
||||
uint32_t waiting_types)
|
||||
{
|
||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
|
||||
struct r3xx_fence *r3xx_fence = dev_priv->fence;
|
||||
|
||||
if (unlikely(!dev_priv)) {
|
||||
return;
|
||||
}
|
||||
/* if there is a RW flush pending then submit new sequence
|
||||
* preceded by flush cmds */
|
||||
if (fc->pending_flush & DRM_RADEON_FENCE_TYPE_RW) {
|
||||
r3xx_fence_flush(dev, 0);
|
||||
fc->pending_flush &= ~DRM_RADEON_FENCE_TYPE_RW;
|
||||
}
|
||||
r3xx_fence_report(dev, dev_priv, r3xx_fence);
|
||||
return;
|
||||
}
|
||||
|
||||
static int r3xx_fence_emit(struct drm_device *dev, uint32_t class,
|
||||
uint32_t flags, uint32_t *sequence,
|
||||
uint32_t *native_type)
|
||||
{
|
||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||
struct r3xx_fence *r3xx_fence = dev_priv->fence;
|
||||
uint32_t tmp;
|
||||
|
||||
if (!dev_priv || dev_priv->cp_ready != 1) {
|
||||
return -EINVAL;
|
||||
}
|
||||
*sequence = tmp = r3xx_fence_sequence(r3xx_fence);
|
||||
*native_type = DRM_FENCE_TYPE_EXE;
|
||||
if (flags & DRM_RADEON_FENCE_FLAG_FLUSHED) {
|
||||
*native_type |= DRM_RADEON_FENCE_TYPE_RW;
|
||||
tmp |= R3XX_FENCE_SEQUENCE_RW_FLUSH;
|
||||
}
|
||||
return r3xx_fence_emit_sequence(dev, dev_priv, tmp);
|
||||
}
|
||||
|
||||
static int r3xx_fence_has_irq(struct drm_device *dev,
|
||||
uint32_t class, uint32_t type)
|
||||
{
|
||||
const uint32_t type_irq_mask = DRM_FENCE_TYPE_EXE |
|
||||
DRM_RADEON_FENCE_TYPE_RW;
|
||||
/*
|
||||
* We have an irq for EXE & RW fence.
|
||||
*/
|
||||
if (class == 0 && (type & type_irq_mask)) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_ms_fence_types(struct drm_buffer_object *bo,
|
||||
uint32_t *class, uint32_t *type)
|
||||
static uint32_t r3xx_fence_needed_flush(struct drm_fence_object *fence)
|
||||
{
|
||||
struct drm_device *dev = fence->dev;
|
||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||
struct r3xx_fence *r3xx_fence = dev_priv->fence;
|
||||
struct drm_fence_driver *driver = dev->driver->fence_driver;
|
||||
uint32_t flush_types, diff;
|
||||
|
||||
flush_types = fence->waiting_types &
|
||||
~(DRM_FENCE_TYPE_EXE | fence->signaled_types);
|
||||
|
||||
if (flush_types == 0 || ((flush_types & ~fence->native_types) == 0)) {
|
||||
return 0;
|
||||
}
|
||||
if (unlikely(dev_priv == NULL)) {
|
||||
return 0;
|
||||
}
|
||||
if (r3xx_fence->sequence_last_flush) {
|
||||
diff = (r3xx_fence->sequence_last_flush - fence->sequence) &
|
||||
driver->sequence_mask;
|
||||
if (diff < driver->wrap_diff) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return flush_types;
|
||||
}
|
||||
|
||||
static int r3xx_fence_wait(struct drm_fence_object *fence,
|
||||
int lazy, int interruptible, uint32_t mask)
|
||||
{
|
||||
struct drm_device *dev = fence->dev;
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
struct drm_fence_class_manager *fc = &fm->fence_class[0];
|
||||
int r;
|
||||
|
||||
drm_fence_object_flush(fence, mask);
|
||||
if (likely(interruptible)) {
|
||||
r = wait_event_interruptible_timeout(
|
||||
fc->fence_queue,
|
||||
drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
|
||||
3 * DRM_HZ);
|
||||
} else {
|
||||
r = wait_event_timeout(
|
||||
fc->fence_queue,
|
||||
drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
|
||||
3 * DRM_HZ);
|
||||
}
|
||||
if (unlikely(r == -ERESTARTSYS)) {
|
||||
return -EAGAIN;
|
||||
}
|
||||
if (unlikely(r == 0)) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (likely(mask == DRM_FENCE_TYPE_EXE ||
|
||||
drm_fence_object_signaled(fence, mask))) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Poll for sync flush completion.
|
||||
*/
|
||||
return drm_fence_wait_polling(fence, lazy, interruptible,
|
||||
mask, 3 * DRM_HZ);
|
||||
}
|
||||
|
||||
struct drm_fence_driver r3xx_fence_driver = {
|
||||
.num_classes = 1,
|
||||
.wrap_diff = (1 << 29),
|
||||
.flush_diff = (1 << 28),
|
||||
.sequence_mask = 0x7fffffffU,
|
||||
.has_irq = r3xx_fence_has_irq,
|
||||
.emit = r3xx_fence_emit,
|
||||
.flush = r3xx_fence_flush,
|
||||
.poll = r3xx_fence_poll,
|
||||
.needed_flush = r3xx_fence_needed_flush,
|
||||
.wait = r3xx_fence_wait,
|
||||
};
|
||||
|
||||
/* this are used by the buffer object code */
|
||||
int r3xx_fence_types(struct drm_buffer_object *bo,
|
||||
uint32_t *class, uint32_t *type)
|
||||
{
|
||||
*class = 0;
|
||||
if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
||||
*type = 3;
|
||||
else
|
||||
*type = 1;
|
||||
if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) {
|
||||
*type = DRM_FENCE_TYPE_EXE | DRM_RADEON_FENCE_TYPE_RW;
|
||||
} else {
|
||||
*type = DRM_FENCE_TYPE_EXE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_ms_poke_flush(struct drm_device *dev, uint32_t class)
|
||||
/* this are used by the irq code */
|
||||
void r3xx_fence_handler(struct drm_device * dev)
|
||||
{
|
||||
struct drm_radeon_private *dev_priv = dev->dev_private;
|
||||
struct drm_fence_manager *fm = &dev->fm;
|
||||
unsigned long flags;
|
||||
struct drm_fence_class_manager *fc = &fm->fence_class[0];
|
||||
|
||||
if (class != 0)
|
||||
if (unlikely(dev_priv == NULL)) {
|
||||
return;
|
||||
write_lock_irqsave(&fm->lock, flags);
|
||||
radeon_ms_fence_flush(dev);
|
||||
write_unlock_irqrestore(&fm->lock, flags);
|
||||
}
|
||||
|
||||
write_lock(&fm->lock);
|
||||
r3xx_fence_poll(dev, 0, fc->waiting_types);
|
||||
write_unlock(&fm->lock);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -92,9 +92,9 @@ irqreturn_t radeon_ms_irq_handler(DRM_IRQ_ARGS)
|
|||
|
||||
/* SW interrupt */
|
||||
if (GEN_INT_STATUS__SW_INT & status) {
|
||||
radeon_ms_fence_handler(dev);
|
||||
r3xx_fence_handler(dev);
|
||||
}
|
||||
radeon_ms_fence_handler(dev);
|
||||
r3xx_fence_handler(dev);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue