Merge from trunk to bsd-4-0-0-branch.

This commit is contained in:
Eric Anholt 2003-02-21 07:16:52 +00:00
parent ae0ee9a2f7
commit 734270fb37
39 changed files with 834 additions and 506 deletions

View file

@ -404,6 +404,14 @@ typedef struct drm_map_list_entry {
drm_local_map_t *map;
} drm_map_list_entry_t;
TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig);
typedef struct drm_vbl_sig {
TAILQ_ENTRY(drm_vbl_sig) link;
unsigned int sequence;
int signo;
int pid;
} drm_vbl_sig_t;
struct drm_device {
#ifdef __NetBSD__
struct device device; /* NetBSD's softc is an extension of struct device */
@ -481,6 +489,8 @@ struct drm_device {
#if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue; /* vbl wait channel */
atomic_t vbl_received;
struct drm_vbl_sig_list vbl_sig_list;
DRM_SPINTYPE vbl_lock;
#endif
cycles_t ctx_start;
cycles_t lck_start;
@ -626,6 +636,7 @@ extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
#endif /* __HAVE_DMA */
#if __HAVE_VBL_IRQ
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif
#if __REALLY_HAVE_AGP

View file

@ -524,6 +524,11 @@ int DRM(irq_install)( drm_device_t *dev, int irq )
TASK_INIT(&dev->task, 0, DRM(dma_immediate_bh), dev);
#endif
#if __HAVE_VBL_IRQ
DRM_SPININIT( dev->vbl_lock, "vblsig" );
TAILQ_INIT( &dev->vbl_sig_list );
#endif
/* Before installing handler */
DRM(driver_irq_preinstall)( dev );
@ -633,21 +638,68 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
sizeof(vblwait) );
if ( vblwait.type == _DRM_VBLANK_RELATIVE ) {
vblwait.sequence += atomic_read( &dev->vbl_received );
if (vblwait.request.type & _DRM_VBLANK_RELATIVE) {
vblwait.request.sequence += atomic_read(&dev->vbl_received);
vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
}
ret = DRM(vblank_wait)( dev, &vblwait.sequence );
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
if (flags & _DRM_VBLANK_SIGNAL) {
drm_vbl_sig_t *vbl_sig = DRM_MALLOC(sizeof(drm_vbl_sig_t));
if (vbl_sig == NULL)
return ENOMEM;
bzero(vbl_sig, sizeof(*vbl_sig));
vbl_sig->sequence = vblwait.request.sequence;
vbl_sig->signo = vblwait.request.signal;
vbl_sig->pid = DRM_CURRENTPID;
microtime( &now );
vblwait.tval_sec = now.tv_sec;
vblwait.tval_usec = now.tv_usec;
vblwait.reply.sequence = atomic_read(&dev->vbl_received);
DRM_SPINLOCK(&dev->vbl_lock);
TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
DRM_SPINUNLOCK(&dev->vbl_lock);
ret = 0;
} else {
ret = DRM(vblank_wait)(dev, &vblwait.request.sequence);
microtime(&now);
vblwait.reply.tval_sec = now.tv_sec;
vblwait.reply.tval_usec = now.tv_usec;
}
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) );
return ret;
}
void DRM(vbl_send_signals)( drm_device_t *dev )
{
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
struct proc *p;
DRM_SPINLOCK(&dev->vbl_lock);
vbl_sig = TAILQ_FIRST(&dev->vbl_sig_list);
while (vbl_sig != NULL) {
drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
p = pfind(vbl_sig->pid);
if (p != NULL)
psignal(p, vbl_sig->signo);
TAILQ_REMOVE(&dev->vbl_sig_list, vbl_sig, link);
DRM_FREE(vbl_sig);
}
vbl_sig = next;
}
DRM_SPINUNLOCK(&dev->vbl_lock);
}
#endif /* __HAVE_VBL_IRQ */
#else

View file

@ -982,7 +982,7 @@ int DRM(close)(dev_t kdev, int flags, int fmt, DRM_STRUCTPROC *p)
}
if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT ) ) {
dev->lock.pid = p->p_pid;
dev->lock.pid = DRM_CURRENTPID;
dev->lock.lock_time = jiffies;
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
break; /* Got lock */

View file

@ -346,15 +346,29 @@ typedef struct drm_irq_busid {
} drm_irq_busid_t;
typedef enum {
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1 /* Wait for given number of vblanks */
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */
_DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drm_vblank_seq_type_t;
typedef struct drm_radeon_vbl_wait {
#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
struct drm_wait_vblank_request {
drm_vblank_seq_type_t type;
unsigned int sequence;
unsigned long signal;
};
struct drm_wait_vblank_reply {
drm_vblank_seq_type_t type;
unsigned int sequence;
long tval_sec;
long tval_usec;
};
typedef union drm_wait_vblank {
struct drm_wait_vblank_request request;
struct drm_wait_vblank_reply reply;
} drm_wait_vblank_t;
typedef struct drm_agp_mode {

View file

@ -404,6 +404,14 @@ typedef struct drm_map_list_entry {
drm_local_map_t *map;
} drm_map_list_entry_t;
TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig);
typedef struct drm_vbl_sig {
TAILQ_ENTRY(drm_vbl_sig) link;
unsigned int sequence;
int signo;
int pid;
} drm_vbl_sig_t;
struct drm_device {
#ifdef __NetBSD__
struct device device; /* NetBSD's softc is an extension of struct device */
@ -481,6 +489,8 @@ struct drm_device {
#if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue; /* vbl wait channel */
atomic_t vbl_received;
struct drm_vbl_sig_list vbl_sig_list;
DRM_SPINTYPE vbl_lock;
#endif
cycles_t ctx_start;
cycles_t lck_start;
@ -626,6 +636,7 @@ extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
#endif /* __HAVE_DMA */
#if __HAVE_VBL_IRQ
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif
#if __REALLY_HAVE_AGP

View file

@ -524,6 +524,11 @@ int DRM(irq_install)( drm_device_t *dev, int irq )
TASK_INIT(&dev->task, 0, DRM(dma_immediate_bh), dev);
#endif
#if __HAVE_VBL_IRQ
DRM_SPININIT( dev->vbl_lock, "vblsig" );
TAILQ_INIT( &dev->vbl_sig_list );
#endif
/* Before installing handler */
DRM(driver_irq_preinstall)( dev );
@ -633,21 +638,68 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
sizeof(vblwait) );
if ( vblwait.type == _DRM_VBLANK_RELATIVE ) {
vblwait.sequence += atomic_read( &dev->vbl_received );
if (vblwait.request.type & _DRM_VBLANK_RELATIVE) {
vblwait.request.sequence += atomic_read(&dev->vbl_received);
vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
}
ret = DRM(vblank_wait)( dev, &vblwait.sequence );
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
if (flags & _DRM_VBLANK_SIGNAL) {
drm_vbl_sig_t *vbl_sig = DRM_MALLOC(sizeof(drm_vbl_sig_t));
if (vbl_sig == NULL)
return ENOMEM;
bzero(vbl_sig, sizeof(*vbl_sig));
vbl_sig->sequence = vblwait.request.sequence;
vbl_sig->signo = vblwait.request.signal;
vbl_sig->pid = DRM_CURRENTPID;
microtime( &now );
vblwait.tval_sec = now.tv_sec;
vblwait.tval_usec = now.tv_usec;
vblwait.reply.sequence = atomic_read(&dev->vbl_received);
DRM_SPINLOCK(&dev->vbl_lock);
TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
DRM_SPINUNLOCK(&dev->vbl_lock);
ret = 0;
} else {
ret = DRM(vblank_wait)(dev, &vblwait.request.sequence);
microtime(&now);
vblwait.reply.tval_sec = now.tv_sec;
vblwait.reply.tval_usec = now.tv_usec;
}
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) );
return ret;
}
void DRM(vbl_send_signals)( drm_device_t *dev )
{
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
struct proc *p;
DRM_SPINLOCK(&dev->vbl_lock);
vbl_sig = TAILQ_FIRST(&dev->vbl_sig_list);
while (vbl_sig != NULL) {
drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
p = pfind(vbl_sig->pid);
if (p != NULL)
psignal(p, vbl_sig->signo);
TAILQ_REMOVE(&dev->vbl_sig_list, vbl_sig, link);
DRM_FREE(vbl_sig);
}
vbl_sig = next;
}
DRM_SPINUNLOCK(&dev->vbl_lock);
}
#endif /* __HAVE_VBL_IRQ */
#else

View file

@ -982,7 +982,7 @@ int DRM(close)(dev_t kdev, int flags, int fmt, DRM_STRUCTPROC *p)
}
if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT ) ) {
dev->lock.pid = p->p_pid;
dev->lock.pid = DRM_CURRENTPID;
dev->lock.lock_time = jiffies;
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
break; /* Got lock */

View file

@ -248,6 +248,7 @@ static int drmOpenDevice(long dev, int minor)
fd = open(buf, O_RDWR, 0);
drmMsg("drmOpenDevice: open result is %d, (%s)\n",
fd, fd < 0 ? strerror(errno) : "OK");
if (fd >= 0) return fd;
drmMsg("drmOpenDevice: Open failed\n");
remove(buf);
@ -1107,6 +1108,7 @@ int drmWaitVBlank(int fd, drmVBlankPtr vbl)
do {
ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
vbl->request.type &= ~DRM_VBLANK_RELATIVE;
} while (ret && errno == EINTR);
return ret;

View file

@ -489,7 +489,6 @@ typedef struct drm_agp_mem {
typedef struct drm_agp_head {
agp_kern_info agp_info;
const char *chipset;
drm_agp_mem_t *memory;
unsigned long mode;
int enabled;
@ -521,6 +520,17 @@ typedef struct drm_map_list {
typedef drm_map_t drm_local_map_t;
#if __HAVE_VBL_IRQ
typedef struct drm_vbl_sig {
struct list_head head;
unsigned int sequence;
struct siginfo info;
struct task_struct *task;
} drm_vbl_sig_t;
#endif
typedef struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
@ -583,6 +593,9 @@ typedef struct drm_device {
#if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue;
atomic_t vbl_received;
spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs;
unsigned int vbl_pending;
#endif
cycles_t ctx_start;
cycles_t lck_start;
@ -823,6 +836,7 @@ extern void DRM(driver_irq_uninstall)( drm_device_t *dev );
extern int DRM(wait_vblank)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif
#if __HAVE_DMA_IRQ_BH
extern void DRM(dma_immediate_bh)( void *dev );

View file

@ -260,60 +260,6 @@ drm_agp_head_t *DRM(agp_init)(void)
return NULL;
}
head->memory = NULL;
switch (head->agp_info.chipset) {
case INTEL_GENERIC: head->chipset = "Intel"; break;
case INTEL_LX: head->chipset = "Intel 440LX"; break;
case INTEL_BX: head->chipset = "Intel 440BX"; break;
case INTEL_GX: head->chipset = "Intel 440GX"; break;
case INTEL_I810: head->chipset = "Intel i810"; break;
case INTEL_I815: head->chipset = "Intel i815"; break;
#if LINUX_VERSION_CODE >= 0x02040f /* KERNEL_VERSION(2,4,15) */
case INTEL_I820: head->chipset = "Intel i820"; break;
#endif
case INTEL_I840: head->chipset = "Intel i840"; break;
#if LINUX_VERSION_CODE >= 0x02040f /* KERNEL_VERSION(2,4,15) */
case INTEL_I845: head->chipset = "Intel i845"; break;
#endif
case INTEL_I850: head->chipset = "Intel i850"; break;
case VIA_GENERIC: head->chipset = "VIA"; break;
case VIA_VP3: head->chipset = "VIA VP3"; break;
case VIA_MVP3: head->chipset = "VIA MVP3"; break;
case VIA_MVP4: head->chipset = "VIA MVP4"; break;
case VIA_APOLLO_KX133: head->chipset = "VIA Apollo KX133";
break;
case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133";
break;
case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro";
break;
case SIS_GENERIC: head->chipset = "SiS"; break;
case AMD_GENERIC: head->chipset = "AMD"; break;
case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
case ALI_GENERIC: head->chipset = "ALi"; break;
case ALI_M1541: head->chipset = "ALi M1541"; break;
#if LINUX_VERSION_CODE >= 0x020402
case ALI_M1621: head->chipset = "ALi M1621"; break;
case ALI_M1631: head->chipset = "ALi M1631"; break;
case ALI_M1632: head->chipset = "ALi M1632"; break;
case ALI_M1641: head->chipset = "ALi M1641"; break;
case ALI_M1647: head->chipset = "ALi M1647"; break;
case ALI_M1651: head->chipset = "ALi M1651"; break;
#endif
#if LINUX_VERSION_CODE >= 0x020406
case SVWRKS_HE: head->chipset = "Serverworks HE";
break;
case SVWRKS_LE: head->chipset = "Serverworks LE";
break;
case SVWRKS_GENERIC: head->chipset = "Serverworks Generic";
break;
#endif
default: head->chipset = "Unknown"; break;
}
#if LINUX_VERSION_CODE <= 0x020408
head->cant_use_aperture = 0;
head->page_mask = ~(0xfff);
@ -321,13 +267,12 @@ drm_agp_head_t *DRM(agp_init)(void)
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
#endif
DRM_INFO("AGP %d.%d on %s @ 0x%08lx %ZuMB\n",
head->agp_info.version.major,
head->agp_info.version.minor,
head->chipset,
head->agp_info.aper_base,
head->agp_info.aper_size);
DRM_DEBUG("AGP %d.%d, aperture @ 0x%08lx %ZuMB\n",
head->agp_info.version.major,
head->agp_info.version.minor,
head->agp_info.aper_base,
head->agp_info.aper_size);
}
return head;
}

View file

@ -540,6 +540,12 @@ int DRM(irq_install)( drm_device_t *dev, int irq )
#if __HAVE_VBL_IRQ
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init( &dev->vbl_lock );
INIT_LIST_HEAD( &dev->vbl_sigs.head );
dev->vbl_pending = 0;
#endif
/* Before installing handler */
@ -610,7 +616,8 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
drm_device_t *dev = priv->dev;
drm_wait_vblank_t vblwait;
struct timeval now;
int ret;
int ret = 0;
unsigned int flags;
if (!dev->irq)
return -EINVAL;
@ -618,22 +625,104 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
sizeof(vblwait) );
if ( vblwait.type == _DRM_VBLANK_RELATIVE ) {
vblwait.sequence += atomic_read( &dev->vbl_received );
switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
case _DRM_VBLANK_RELATIVE:
vblwait.request.sequence += atomic_read( &dev->vbl_received );
vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
case _DRM_VBLANK_ABSOLUTE:
break;
default:
return -EINVAL;
}
ret = DRM(vblank_wait)( dev, &vblwait.sequence );
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
if ( flags & _DRM_VBLANK_SIGNAL ) {
unsigned long irqflags;
drm_vbl_sig_t *vbl_sig;
vblwait.reply.sequence = atomic_read( &dev->vbl_received );
do_gettimeofday( &now );
vblwait.tval_sec = now.tv_sec;
vblwait.tval_usec = now.tv_usec;
spin_lock_irqsave( &dev->vbl_lock, irqflags );
/* Check if this task has already scheduled the same signal
* for the same vblank sequence number; nothing to be done in
* that case
*/
list_for_each( ( (struct list_head *) vbl_sig ), &dev->vbl_sigs.head ) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current)
{
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
goto done;
}
}
if ( dev->vbl_pending >= 100 ) {
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
return -EBUSY;
}
dev->vbl_pending++;
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) {
return -ENOMEM;
}
memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
vbl_sig->sequence = vblwait.request.sequence;
vbl_sig->info.si_signo = vblwait.request.signal;
vbl_sig->task = current;
spin_lock_irqsave( &dev->vbl_lock, irqflags );
list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
} else {
ret = DRM(vblank_wait)( dev, &vblwait.request.sequence );
do_gettimeofday( &now );
vblwait.reply.tval_sec = now.tv_sec;
vblwait.reply.tval_usec = now.tv_usec;
}
done:
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) );
return ret;
}
void DRM(vbl_send_signals)( drm_device_t *dev )
{
struct list_head *tmp;
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
unsigned long flags;
spin_lock_irqsave( &dev->vbl_lock, flags );
list_for_each_safe( ( (struct list_head *) vbl_sig ), tmp, &dev->vbl_sigs.head ) {
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
list_del( (struct list_head *) vbl_sig );
DRM_FREE( vbl_sig );
dev->vbl_pending--;
}
}
spin_unlock_irqrestore( &dev->vbl_lock, flags );
}
#endif /* __HAVE_VBL_IRQ */
#else

View file

@ -449,7 +449,8 @@ static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
pgd = pgd_offset(vma->vm_mm, i);
pmd = pmd_offset(pgd, i);
pte = pte_offset(pmd, i);
preempt_disable();
pte = pte_offset_map(pmd, i);
if (pte_present(*pte)) {
address = __pa(pte_page(*pte))
+ (i & (PAGE_SIZE-1));
@ -465,6 +466,8 @@ static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
} else {
DRM_PROC_PRINT(" 0x%08lx\n", i);
}
pte_unmap(pte);
preempt_enable();
}
#endif
}

View file

@ -38,6 +38,13 @@
#include "i810_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/pagemap.h>
#ifdef DO_MUNMAP_4_ARGS
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1)
#else
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l)
#endif
#ifdef DO_MUNMAP_4_ARGS
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1)
@ -1184,7 +1191,8 @@ int i810_ov0_info(struct inode *inode, struct file *filp,
data.offset = dev_priv->overlay_offset;
data.physical = dev_priv->overlay_physical;
copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data));
if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)))
return -EFAULT;
return 0;
}

View file

@ -66,7 +66,7 @@ WARNINGS = -Wall -Wwrite-strings -Wpointer-arith -Wcast-align \
-Wstrict-prototypes -Wnested-externs \
-Wpointer-arith
# -Wshadow -Winline -- make output too noisy
MODCFLAGS = $(CFLAGS) -D__KERNEL__ -DMODULE -fomit-frame-pointer
MODCFLAGS = $(CFLAGS) -D__KERNEL__ -DMODULE -fomit-frame-pointer -fno-strict-aliasing
PRGCFLAGS = $(CFLAGS) -g -ansi -pedantic -DPOSIX_C_SOURCE=199309L \
-D_POSIX_SOURCE -D_XOPEN_SOURCE -D_BSD_SOURCE -D_SVID_SOURCE \
-I../../../../../../include -I../../../../../../../../include \

View file

@ -346,17 +346,30 @@ typedef struct drm_irq_busid {
} drm_irq_busid_t;
typedef enum {
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1 /* Wait for given number of vblanks */
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */
_DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drm_vblank_seq_type_t;
typedef struct drm_radeon_vbl_wait {
#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
struct drm_wait_vblank_request {
drm_vblank_seq_type_t type;
unsigned int sequence;
unsigned long signal;
};
struct drm_wait_vblank_reply {
drm_vblank_seq_type_t type;
unsigned int sequence;
long tval_sec;
long tval_usec;
} drm_wait_vblank_t;
};
typedef union drm_wait_vblank {
struct drm_wait_vblank_request request;
struct drm_wait_vblank_reply reply;
} drm_wait_vblank_t;
typedef struct drm_agp_mode {
unsigned long mode;

View file

@ -489,7 +489,6 @@ typedef struct drm_agp_mem {
typedef struct drm_agp_head {
agp_kern_info agp_info;
const char *chipset;
drm_agp_mem_t *memory;
unsigned long mode;
int enabled;
@ -521,6 +520,17 @@ typedef struct drm_map_list {
typedef drm_map_t drm_local_map_t;
#if __HAVE_VBL_IRQ
typedef struct drm_vbl_sig {
struct list_head head;
unsigned int sequence;
struct siginfo info;
struct task_struct *task;
} drm_vbl_sig_t;
#endif
typedef struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
@ -583,6 +593,9 @@ typedef struct drm_device {
#if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue;
atomic_t vbl_received;
spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs;
unsigned int vbl_pending;
#endif
cycles_t ctx_start;
cycles_t lck_start;
@ -823,6 +836,7 @@ extern void DRM(driver_irq_uninstall)( drm_device_t *dev );
extern int DRM(wait_vblank)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
extern void DRM(vbl_send_signals)( drm_device_t *dev );
#endif
#if __HAVE_DMA_IRQ_BH
extern void DRM(dma_immediate_bh)( void *dev );

View file

@ -260,60 +260,6 @@ drm_agp_head_t *DRM(agp_init)(void)
return NULL;
}
head->memory = NULL;
switch (head->agp_info.chipset) {
case INTEL_GENERIC: head->chipset = "Intel"; break;
case INTEL_LX: head->chipset = "Intel 440LX"; break;
case INTEL_BX: head->chipset = "Intel 440BX"; break;
case INTEL_GX: head->chipset = "Intel 440GX"; break;
case INTEL_I810: head->chipset = "Intel i810"; break;
case INTEL_I815: head->chipset = "Intel i815"; break;
#if LINUX_VERSION_CODE >= 0x02040f /* KERNEL_VERSION(2,4,15) */
case INTEL_I820: head->chipset = "Intel i820"; break;
#endif
case INTEL_I840: head->chipset = "Intel i840"; break;
#if LINUX_VERSION_CODE >= 0x02040f /* KERNEL_VERSION(2,4,15) */
case INTEL_I845: head->chipset = "Intel i845"; break;
#endif
case INTEL_I850: head->chipset = "Intel i850"; break;
case VIA_GENERIC: head->chipset = "VIA"; break;
case VIA_VP3: head->chipset = "VIA VP3"; break;
case VIA_MVP3: head->chipset = "VIA MVP3"; break;
case VIA_MVP4: head->chipset = "VIA MVP4"; break;
case VIA_APOLLO_KX133: head->chipset = "VIA Apollo KX133";
break;
case VIA_APOLLO_KT133: head->chipset = "VIA Apollo KT133";
break;
case VIA_APOLLO_PRO: head->chipset = "VIA Apollo Pro";
break;
case SIS_GENERIC: head->chipset = "SiS"; break;
case AMD_GENERIC: head->chipset = "AMD"; break;
case AMD_IRONGATE: head->chipset = "AMD Irongate"; break;
case ALI_GENERIC: head->chipset = "ALi"; break;
case ALI_M1541: head->chipset = "ALi M1541"; break;
#if LINUX_VERSION_CODE >= 0x020402
case ALI_M1621: head->chipset = "ALi M1621"; break;
case ALI_M1631: head->chipset = "ALi M1631"; break;
case ALI_M1632: head->chipset = "ALi M1632"; break;
case ALI_M1641: head->chipset = "ALi M1641"; break;
case ALI_M1647: head->chipset = "ALi M1647"; break;
case ALI_M1651: head->chipset = "ALi M1651"; break;
#endif
#if LINUX_VERSION_CODE >= 0x020406
case SVWRKS_HE: head->chipset = "Serverworks HE";
break;
case SVWRKS_LE: head->chipset = "Serverworks LE";
break;
case SVWRKS_GENERIC: head->chipset = "Serverworks Generic";
break;
#endif
default: head->chipset = "Unknown"; break;
}
#if LINUX_VERSION_CODE <= 0x020408
head->cant_use_aperture = 0;
head->page_mask = ~(0xfff);
@ -321,13 +267,12 @@ drm_agp_head_t *DRM(agp_init)(void)
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
#endif
DRM_INFO("AGP %d.%d on %s @ 0x%08lx %ZuMB\n",
head->agp_info.version.major,
head->agp_info.version.minor,
head->chipset,
head->agp_info.aper_base,
head->agp_info.aper_size);
DRM_DEBUG("AGP %d.%d, aperture @ 0x%08lx %ZuMB\n",
head->agp_info.version.major,
head->agp_info.version.minor,
head->agp_info.aper_base,
head->agp_info.aper_size);
}
return head;
}

View file

@ -540,6 +540,12 @@ int DRM(irq_install)( drm_device_t *dev, int irq )
#if __HAVE_VBL_IRQ
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init( &dev->vbl_lock );
INIT_LIST_HEAD( &dev->vbl_sigs.head );
dev->vbl_pending = 0;
#endif
/* Before installing handler */
@ -610,7 +616,8 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
drm_device_t *dev = priv->dev;
drm_wait_vblank_t vblwait;
struct timeval now;
int ret;
int ret = 0;
unsigned int flags;
if (!dev->irq)
return -EINVAL;
@ -618,22 +625,104 @@ int DRM(wait_vblank)( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
sizeof(vblwait) );
if ( vblwait.type == _DRM_VBLANK_RELATIVE ) {
vblwait.sequence += atomic_read( &dev->vbl_received );
switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
case _DRM_VBLANK_RELATIVE:
vblwait.request.sequence += atomic_read( &dev->vbl_received );
vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
case _DRM_VBLANK_ABSOLUTE:
break;
default:
return -EINVAL;
}
ret = DRM(vblank_wait)( dev, &vblwait.sequence );
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
if ( flags & _DRM_VBLANK_SIGNAL ) {
unsigned long irqflags;
drm_vbl_sig_t *vbl_sig;
vblwait.reply.sequence = atomic_read( &dev->vbl_received );
do_gettimeofday( &now );
vblwait.tval_sec = now.tv_sec;
vblwait.tval_usec = now.tv_usec;
spin_lock_irqsave( &dev->vbl_lock, irqflags );
/* Check if this task has already scheduled the same signal
* for the same vblank sequence number; nothing to be done in
* that case
*/
list_for_each( ( (struct list_head *) vbl_sig ), &dev->vbl_sigs.head ) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current)
{
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
goto done;
}
}
if ( dev->vbl_pending >= 100 ) {
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
return -EBUSY;
}
dev->vbl_pending++;
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) {
return -ENOMEM;
}
memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
vbl_sig->sequence = vblwait.request.sequence;
vbl_sig->info.si_signo = vblwait.request.signal;
vbl_sig->task = current;
spin_lock_irqsave( &dev->vbl_lock, irqflags );
list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
} else {
ret = DRM(vblank_wait)( dev, &vblwait.request.sequence );
do_gettimeofday( &now );
vblwait.reply.tval_sec = now.tv_sec;
vblwait.reply.tval_usec = now.tv_usec;
}
done:
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) );
return ret;
}
void DRM(vbl_send_signals)( drm_device_t *dev )
{
struct list_head *tmp;
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
unsigned long flags;
spin_lock_irqsave( &dev->vbl_lock, flags );
list_for_each_safe( ( (struct list_head *) vbl_sig ), tmp, &dev->vbl_sigs.head ) {
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
list_del( (struct list_head *) vbl_sig );
DRM_FREE( vbl_sig );
dev->vbl_pending--;
}
}
spin_unlock_irqrestore( &dev->vbl_lock, flags );
}
#endif /* __HAVE_VBL_IRQ */
#else

View file

@ -449,7 +449,8 @@ static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
pgd = pgd_offset(vma->vm_mm, i);
pmd = pmd_offset(pgd, i);
pte = pte_offset(pmd, i);
preempt_disable();
pte = pte_offset_map(pmd, i);
if (pte_present(*pte)) {
address = __pa(pte_page(*pte))
+ (i & (PAGE_SIZE-1));
@ -465,6 +466,8 @@ static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
} else {
DRM_PROC_PRINT(" 0x%08lx\n", i);
}
pte_unmap(pte);
preempt_enable();
}
#endif
}

View file

@ -38,6 +38,13 @@
#include "i810_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/pagemap.h>
#ifdef DO_MUNMAP_4_ARGS
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1)
#else
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l)
#endif
#ifdef DO_MUNMAP_4_ARGS
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1)
@ -1184,7 +1191,8 @@ int i810_ov0_info(struct inode *inode, struct file *filp,
data.offset = dev_priv->overlay_offset;
data.physical = dev_priv->overlay_physical;
copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data));
if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)))
return -EFAULT;
return 0;
}

View file

@ -50,15 +50,16 @@ set_t *setInit(void)
set_t *set;
set = (set_t *)MALLOC(sizeof(set_t));
for(i = 0; i < SET_SIZE; i++){
set->list[i].free_next = i+1;
set->list[i].alloc_next = -1;
}
set->list[SET_SIZE-1].free_next = -1;
set->free = 0;
set->alloc = -1;
set->trace = -1;
if (set) {
for(i = 0; i < SET_SIZE; i++){
set->list[i].free_next = i+1;
set->list[i].alloc_next = -1;
}
set->list[SET_SIZE-1].free_next = -1;
set->free = 0;
set->alloc = -1;
set->trace = -1;
}
return set;
}
@ -172,7 +173,8 @@ static void *calloc(size_t nmemb, size_t size)
{
void *addr;
addr = kmalloc(nmemb*size, GFP_KERNEL);
memset(addr, 0, nmemb*size);
if (addr)
memset(addr, 0, nmemb*size);
return addr;
}
#define free(n) kfree(n)

View file

@ -346,17 +346,30 @@ typedef struct drm_irq_busid {
} drm_irq_busid_t;
typedef enum {
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1 /* Wait for given number of vblanks */
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */
_DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drm_vblank_seq_type_t;
typedef struct drm_radeon_vbl_wait {
#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
struct drm_wait_vblank_request {
drm_vblank_seq_type_t type;
unsigned int sequence;
unsigned long signal;
};
struct drm_wait_vblank_reply {
drm_vblank_seq_type_t type;
unsigned int sequence;
long tval_sec;
long tval_usec;
} drm_wait_vblank_t;
};
typedef union drm_wait_vblank {
struct drm_wait_vblank_request request;
struct drm_wait_vblank_reply reply;
} drm_wait_vblank_t;
typedef struct drm_agp_mode {
unsigned long mode;

View file

@ -142,12 +142,12 @@ extern int mga_warp_init( drm_mga_private_t *dev_priv );
#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg)))
#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg)))
#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(dev_priv->mmio); MGA_DEREF( reg ) = val; } while (0)
#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(dev_priv->mmio); MGA_DEREF8( reg ) = val; } while (0)
static inline u32 _MGA_READ(u32 *addr)
{
DRM_READMEMORYBARRIER();
DRM_READMEMORYBARRIER(dev_priv->mmio);
return *(volatile u32 *)addr;
}
#else

View file

@ -1355,6 +1355,9 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) );
if (!dev_priv->cp_running)
return 0;
/* Flush any pending CP commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
@ -1382,6 +1385,39 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
return 0;
}
void radeon_do_release( drm_device_t *dev )
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int ret;
if (dev_priv) {
if (dev_priv->cp_running) {
/* Stop the cp */
while ((ret = radeon_do_cp_idle( dev_priv )) != 0) {
DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
#ifdef __linux__
schedule();
#else
tsleep(&ret, PZERO, "rdnrel", 1);
#endif
}
radeon_do_cp_stop( dev_priv );
radeon_do_engine_reset( dev );
}
/* Disable *all* interrupts */
RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 );
/* Free memory heap structures */
radeon_mem_takedown( &(dev_priv->agp_heap) );
radeon_mem_takedown( &(dev_priv->fb_heap) );
/* deallocate kernel resources */
radeon_do_cleanup_cp( dev );
}
}
/* Just reset the CP ring. Called as part of an X Server engine reset.
*/
int radeon_cp_reset( DRM_IOCTL_ARGS )
@ -1413,9 +1449,6 @@ int radeon_cp_idle( DRM_IOCTL_ARGS )
LOCK_TEST_WITH_RETURN( dev );
/* if (dev->irq) */
/* radeon_emit_and_wait_irq( dev ); */
return radeon_do_cp_idle( dev_priv );
}
@ -1498,7 +1531,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
}
}
DRM_ERROR( "returning NULL!\n" );
DRM_DEBUG( "returning NULL!\n" );
return NULL;
}
#if 0

View file

@ -382,7 +382,7 @@ typedef struct {
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t)
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex2_t)
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t)
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t)
#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52)

View file

@ -194,6 +194,7 @@ extern int radeon_emit_and_wait_irq(drm_device_t *dev);
extern int radeon_wait_irq(drm_device_t *dev, int swi_nr);
extern int radeon_emit_irq(drm_device_t *dev);
extern void radeon_do_release(drm_device_t *dev);
/* Flags for stats.boxes
*/
@ -821,13 +822,6 @@ do { \
* Ring control
*/
#if defined(__powerpc__)
#define radeon_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring )
#else
#define radeon_flush_write_combine() DRM_WRITEMEMORYBARRIER( dev_priv->ring_rtpr )
#endif
#define RADEON_VERBOSE 0
#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring;
@ -861,8 +855,13 @@ do { \
dev_priv->ring.tail = write; \
} while (0)
#define COMMIT_RING() do { \
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
#define COMMIT_RING() do { \
/* Flush writes to ring */ \
DRM_READMEMORYBARRIER(dev_priv->mmio); \
GET_RING_HEAD( &dev_priv->ring ); \
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
/* read from PCI bus to ensure correct posting */ \
RADEON_READ( RADEON_CP_RB_RPTR ); \
} while (0)
#define OUT_RING( x ) do { \

View file

@ -61,7 +61,11 @@ void DRM(dma_service)( DRM_IRQ_ARGS )
(drm_radeon_private_t *)dev->dev_private;
u32 stat;
stat = RADEON_READ(RADEON_GEN_INT_STATUS);
/* Only consider the bits we're interested in - others could be used
* outside the DRM
*/
stat = RADEON_READ(RADEON_GEN_INT_STATUS)
& (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT);
if (!stat)
return;
@ -70,23 +74,21 @@ void DRM(dma_service)( DRM_IRQ_ARGS )
DRM_WAKEUP( &dev_priv->swi_queue );
}
#if __HAVE_VBL_IRQ
/* VBLANK interrupt */
if (stat & RADEON_CRTC_VBLANK_STAT) {
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
DRM(vbl_send_signals)( dev );
}
#endif
/* Acknowledge all the bits in GEN_INT_STATUS -- seem to get
* more than we asked for...
*/
/* Acknowledge interrupts we handle */
RADEON_WRITE(RADEON_GEN_INT_STATUS, stat);
}
static __inline__ void radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv)
{
u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS );
u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS )
& (RADEON_SW_INT_TEST_ACK | RADEON_CRTC_VBLANK_STAT);
if (tmp)
RADEON_WRITE( RADEON_GEN_INT_STATUS, tmp );
}
@ -138,7 +140,6 @@ int radeon_emit_and_wait_irq(drm_device_t *dev)
}
#if __HAVE_VBL_IRQ
int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence)
{
drm_radeon_private_t *dev_priv =
@ -161,13 +162,12 @@ int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence)
*/
DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ,
( ( ( cur_vblank = atomic_read(&dev->vbl_received ) )
+ ~*sequence + 1 ) <= (1<<23) ) );
- *sequence ) <= (1<<23) ) );
*sequence = cur_vblank;
return ret;
}
#endif
/* Needs the lock as it touches the ring.

View file

@ -130,18 +130,6 @@ static void free_block( struct mem_block *p )
}
}
#if 0
static void print_heap( struct mem_block *heap )
{
struct mem_block *p;
for (p = heap->next ; p != heap ; p = p->next)
DRM_DEBUG("0x%x..0x%x (0x%x) -- owner %d\n",
p->start, p->start + p->size,
p->size, p->pid);
}
#endif
/* Initialize. How to check for an uninitialized heap?
*/
static int init_heap(struct mem_block **heap, int start, int size)

View file

@ -1073,20 +1073,31 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
u32 *buffer;
const u8 *data;
int size, dwords, tex_width, blit_width;
u32 y, height;
int ret = 0, i;
u32 height;
int i;
RING_LOCALS;
dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
/* FIXME: Be smarter about this...
/* Flush the pixel cache. This ensures no pixel data gets mixed
* up with the texture data from the host data blit, otherwise
* part of the texture image may be corrupted.
*/
buf = radeon_freelist_get( dev );
if ( !buf ) return DRM_ERR(EAGAIN);
BEGIN_RING( 4 );
RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING();
#ifdef __BIG_ENDIAN
/* The Mesa texture functions provide the data in little endian as the
* chip wants it, but we need to compensate for the fact that the CP
* ring gets byte-swapped
*/
BEGIN_RING( 2 );
OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
ADVANCE_RING();
#endif
DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
tex->offset >> 10, tex->pitch, tex->format,
image->x, image->y, image->width, image->height );
/* The compiler won't optimize away a division by a variable,
* even if the only legal values are powers of two. Thus, we'll
@ -1120,127 +1131,111 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
return DRM_ERR(EINVAL);
}
DRM_DEBUG( " tex=%dx%d blit=%d\n",
tex_width, tex->height, blit_width );
DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );
/* Flush the pixel cache. This ensures no pixel data gets mixed
* up with the texture data from the host data blit, otherwise
* part of the texture image may be corrupted.
*/
BEGIN_RING( 4 );
do {
DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
tex->offset >> 10, tex->pitch, tex->format,
image->x, image->y, image->width, image->height );
RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING();
#ifdef __BIG_ENDIAN
/* The Mesa texture functions provide the data in little endian as the
* chip wants it, but we need to compensate for the fact that the CP
* ring gets byte-swapped
*/
BEGIN_RING( 2 );
OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
ADVANCE_RING();
#endif
/* Make a copy of the parameters in case we have to update them
* for a multi-pass texture blit.
*/
y = image->y;
height = image->height;
data = (const u8 *)image->data;
size = height * blit_width;
if ( size > RADEON_MAX_TEXTURE_SIZE ) {
/* Texture image is too large, do a multipass upload */
ret = DRM_ERR(EAGAIN);
/* Adjust the blit size to fit the indirect buffer */
height = RADEON_MAX_TEXTURE_SIZE / blit_width;
/* Make a copy of some parameters in case we have to
* update them for a multi-pass texture blit.
*/
height = image->height;
data = (const u8 *)image->data;
size = height * blit_width;
if ( size > RADEON_MAX_TEXTURE_SIZE ) {
height = RADEON_MAX_TEXTURE_SIZE / blit_width;
size = height * blit_width;
} else if ( size < 4 && size > 0 ) {
size = 4;
} else if ( size == 0 ) {
return 0;
}
buf = radeon_freelist_get( dev );
if ( 0 && !buf ) {
radeon_do_cp_idle( dev_priv );
buf = radeon_freelist_get( dev );
}
if ( !buf ) {
DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
DRM_COPY_TO_USER( tex->image, image, sizeof(*image) );
return DRM_ERR(EAGAIN);
}
/* Dispatch the indirect buffer.
*/
buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset);
dwords = size / 4;
buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_BRUSH_NONE |
(format << 8) |
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_S |
RADEON_DP_SRC_SOURCE_HOST_DATA |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
buffer[3] = 0xffffffff;
buffer[4] = 0xffffffff;
buffer[5] = (image->y << 16) | image->x;
buffer[6] = (height << 16) | image->width;
buffer[7] = dwords;
buffer += 8;
if ( tex_width >= 32 ) {
/* Texture image width is larger than the minimum, so we
* can upload it directly.
*/
if ( DRM_COPY_FROM_USER( buffer, data,
dwords * sizeof(u32) ) ) {
DRM_ERROR( "EFAULT on data, %d dwords\n",
dwords );
return DRM_ERR(EFAULT);
}
} else {
/* Texture image width is less than the minimum, so we
* need to pad out each image scanline to the minimum
* width.
*/
for ( i = 0 ; i < tex->height ; i++ ) {
if ( DRM_COPY_FROM_USER( buffer, data,
tex_width ) ) {
DRM_ERROR( "EFAULT on pad, %d bytes\n",
tex_width );
return DRM_ERR(EFAULT);
}
buffer += 8;
data += tex_width;
}
}
buf->pid = DRM_CURRENTPID;
buf->used = (dwords + 8) * sizeof(u32);
radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
radeon_cp_discard_buffer( dev, buf );
/* Update the input parameters for next time */
image->y += height;
image->height -= height;
image->data = (const char *)image->data + size;
if ( DRM_COPY_TO_USER( tex->image, image, sizeof(*image) ) ) {
DRM_ERROR( "EFAULT on tex->image\n" );
return DRM_ERR(EFAULT);
}
} else if ( size < 4 && size > 0 ) {
size = 4;
}
dwords = size / 4;
/* Dispatch the indirect buffer.
*/
buffer = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_BRUSH_NONE |
(format << 8) |
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_S |
RADEON_DP_SRC_SOURCE_HOST_DATA |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
buffer[3] = 0xffffffff;
buffer[4] = 0xffffffff;
buffer[5] = (y << 16) | image->x;
buffer[6] = (height << 16) | image->width;
buffer[7] = dwords;
buffer += 8;
if ( tex_width >= 32 ) {
/* Texture image width is larger than the minimum, so we
* can upload it directly.
*/
if ( DRM_COPY_FROM_USER( buffer, data, dwords * sizeof(u32) ) ) {
DRM_ERROR( "EFAULT on data, %d dwords\n", dwords );
return DRM_ERR(EFAULT);
}
} else {
/* Texture image width is less than the minimum, so we
* need to pad out each image scanline to the minimum
* width.
*/
for ( i = 0 ; i < tex->height ; i++ ) {
if ( DRM_COPY_FROM_USER( buffer, data, tex_width ) ) {
DRM_ERROR( "EFAULT on pad, %d bytes\n",
tex_width );
return DRM_ERR(EFAULT);
}
buffer += 8;
data += tex_width;
}
}
buf->pid = DRM_CURRENTPID;
buf->used = (dwords + 8) * sizeof(u32);
radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
radeon_cp_discard_buffer( dev, buf );
(const u8 *)image->data += size;
} while (image->height > 0);
/* Flush the pixel cache after the blit completes. This ensures
* the texture data is written out to memory before rendering
* continues.
*/
BEGIN_RING( 4 );
RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_2D_IDLE();
ADVANCE_RING();
return ret;
return 0;
}

View file

@ -346,17 +346,30 @@ typedef struct drm_irq_busid {
} drm_irq_busid_t;
typedef enum {
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1 /* Wait for given number of vblanks */
_DRM_VBLANK_ABSOLUTE = 0x0, /* Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /* Wait for given number of vblanks */
_DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drm_vblank_seq_type_t;
typedef struct drm_radeon_vbl_wait {
#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL
struct drm_wait_vblank_request {
drm_vblank_seq_type_t type;
unsigned int sequence;
unsigned long signal;
};
struct drm_wait_vblank_reply {
drm_vblank_seq_type_t type;
unsigned int sequence;
long tval_sec;
long tval_usec;
} drm_wait_vblank_t;
};
typedef union drm_wait_vblank {
struct drm_wait_vblank_request request;
struct drm_wait_vblank_reply reply;
} drm_wait_vblank_t;
typedef struct drm_agp_mode {
unsigned long mode;

View file

@ -142,12 +142,12 @@ extern int mga_warp_init( drm_mga_private_t *dev_priv );
#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg)))
#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg)))
#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(dev_priv->mmio); MGA_DEREF( reg ) = val; } while (0)
#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(dev_priv->mmio); MGA_DEREF8( reg ) = val; } while (0)
static inline u32 _MGA_READ(u32 *addr)
{
DRM_READMEMORYBARRIER();
DRM_READMEMORYBARRIER(dev_priv->mmio);
return *(volatile u32 *)addr;
}
#else

View file

@ -71,7 +71,7 @@
[DRM_IOCTL_NR(DRM_IOCTL_R128_DEPTH)] = { r128_cce_depth, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 }, \
[DRM_IOCTL_NR(DRM_IOCTL_R128_GETPARAM)] = { r128_getparam, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_GETPARAM)] = { r128_getparam, 1, 0 },
/* Driver customization:
*/

View file

@ -51,7 +51,7 @@
#define DRIVER_DATE "20020828"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 7
#define DRIVER_MINOR 8
#define DRIVER_PATCHLEVEL 0
/* Interface history:
@ -77,6 +77,7 @@
* and R200_PP_CUBIC_OFFSET_F1_[0..5].
* Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
* R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian)
* 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
*/
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
@ -105,11 +106,6 @@
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 },
#define USE_IRQS 1
#if USE_IRQS
#define __HAVE_DMA_IRQ 1
#define __HAVE_VBL_IRQ 1
#define __HAVE_SHARED_IRQ 1
/* When a client dies:
* - Check for and clean up flipped page state
@ -117,35 +113,36 @@
*
* DRM infrastructure takes care of reclaiming dma buffers.
*/
#define DRIVER_PRERELEASE() do { \
#define DRIVER_PRERELEASE() \
do { \
if ( dev->dev_private ) { \
drm_radeon_private_t *dev_priv = dev->dev_private; \
if ( dev_priv->page_flipping ) { \
radeon_do_cleanup_pageflip( dev ); \
} \
radeon_mem_release( dev_priv->agp_heap ); \
radeon_mem_release( dev_priv->fb_heap ); \
} \
} while (0)
/* On unloading the module:
* - Free memory heap structure
* - Remove mappings made at startup and free dev_private.
/* When the last client dies, shut down the CP and free dev->dev_priv.
*/
#define DRIVER_PRETAKEDOWN() do { \
if ( dev->dev_private ) { \
drm_radeon_private_t *dev_priv = dev->dev_private; \
radeon_mem_takedown( &(dev_priv->agp_heap) ); \
radeon_do_cleanup_cp( dev ); \
} \
#define __HAVE_RELEASE 1
#define DRIVER_RELEASE() \
do { \
DRM(reclaim_buffers)( dev, priv->pid ); \
if ( dev->open_count == 1) \
radeon_do_release( dev ); \
} while (0)
#else
#define __HAVE_DMA_IRQ 0
#endif
/* DMA customization:
*/
#define __HAVE_DMA 1
#define __HAVE_DMA_IRQ 1
#define __HAVE_VBL_IRQ 1
#define __HAVE_SHARED_IRQ 1
/* Buffer customization:

View file

@ -1355,6 +1355,9 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) );
if (!dev_priv->cp_running)
return 0;
/* Flush any pending CP commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
@ -1382,6 +1385,39 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
return 0;
}
void radeon_do_release( drm_device_t *dev )
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int ret;
if (dev_priv) {
if (dev_priv->cp_running) {
/* Stop the cp */
while ((ret = radeon_do_cp_idle( dev_priv )) != 0) {
DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
#ifdef __linux__
schedule();
#else
tsleep(&ret, PZERO, "rdnrel", 1);
#endif
}
radeon_do_cp_stop( dev_priv );
radeon_do_engine_reset( dev );
}
/* Disable *all* interrupts */
RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 );
/* Free memory heap structures */
radeon_mem_takedown( &(dev_priv->agp_heap) );
radeon_mem_takedown( &(dev_priv->fb_heap) );
/* deallocate kernel resources */
radeon_do_cleanup_cp( dev );
}
}
/* Just reset the CP ring. Called as part of an X Server engine reset.
*/
int radeon_cp_reset( DRM_IOCTL_ARGS )
@ -1413,9 +1449,6 @@ int radeon_cp_idle( DRM_IOCTL_ARGS )
LOCK_TEST_WITH_RETURN( dev );
/* if (dev->irq) */
/* radeon_emit_and_wait_irq( dev ); */
return radeon_do_cp_idle( dev_priv );
}
@ -1498,7 +1531,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
}
}
DRM_ERROR( "returning NULL!\n" );
DRM_DEBUG( "returning NULL!\n" );
return NULL;
}
#if 0

View file

@ -382,7 +382,7 @@ typedef struct {
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( 0x4c, drm_radeon_stipple_t)
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(0x4d, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(0x4e, drm_radeon_texture_t)
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( 0x4f, drm_radeon_vertex2_t)
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t)
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t)
#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52)

View file

@ -194,6 +194,7 @@ extern int radeon_emit_and_wait_irq(drm_device_t *dev);
extern int radeon_wait_irq(drm_device_t *dev, int swi_nr);
extern int radeon_emit_irq(drm_device_t *dev);
extern void radeon_do_release(drm_device_t *dev);
/* Flags for stats.boxes
*/
@ -821,13 +822,6 @@ do { \
* Ring control
*/
#if defined(__powerpc__)
#define radeon_flush_write_combine() (void) GET_RING_HEAD( &dev_priv->ring )
#else
#define radeon_flush_write_combine() DRM_WRITEMEMORYBARRIER( dev_priv->ring_rtpr )
#endif
#define RADEON_VERBOSE 0
#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring;
@ -861,8 +855,13 @@ do { \
dev_priv->ring.tail = write; \
} while (0)
#define COMMIT_RING() do { \
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
#define COMMIT_RING() do { \
/* Flush writes to ring */ \
DRM_READMEMORYBARRIER(dev_priv->mmio); \
GET_RING_HEAD( &dev_priv->ring ); \
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
/* read from PCI bus to ensure correct posting */ \
RADEON_READ( RADEON_CP_RB_RPTR ); \
} while (0)
#define OUT_RING( x ) do { \

View file

@ -61,7 +61,11 @@ void DRM(dma_service)( DRM_IRQ_ARGS )
(drm_radeon_private_t *)dev->dev_private;
u32 stat;
stat = RADEON_READ(RADEON_GEN_INT_STATUS);
/* Only consider the bits we're interested in - others could be used
* outside the DRM
*/
stat = RADEON_READ(RADEON_GEN_INT_STATUS)
& (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT);
if (!stat)
return;
@ -70,23 +74,21 @@ void DRM(dma_service)( DRM_IRQ_ARGS )
DRM_WAKEUP( &dev_priv->swi_queue );
}
#if __HAVE_VBL_IRQ
/* VBLANK interrupt */
if (stat & RADEON_CRTC_VBLANK_STAT) {
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
DRM(vbl_send_signals)( dev );
}
#endif
/* Acknowledge all the bits in GEN_INT_STATUS -- seem to get
* more than we asked for...
*/
/* Acknowledge interrupts we handle */
RADEON_WRITE(RADEON_GEN_INT_STATUS, stat);
}
static __inline__ void radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv)
{
u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS );
u32 tmp = RADEON_READ( RADEON_GEN_INT_STATUS )
& (RADEON_SW_INT_TEST_ACK | RADEON_CRTC_VBLANK_STAT);
if (tmp)
RADEON_WRITE( RADEON_GEN_INT_STATUS, tmp );
}
@ -138,7 +140,6 @@ int radeon_emit_and_wait_irq(drm_device_t *dev)
}
#if __HAVE_VBL_IRQ
int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence)
{
drm_radeon_private_t *dev_priv =
@ -161,13 +162,12 @@ int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence)
*/
DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ,
( ( ( cur_vblank = atomic_read(&dev->vbl_received ) )
+ ~*sequence + 1 ) <= (1<<23) ) );
- *sequence ) <= (1<<23) ) );
*sequence = cur_vblank;
return ret;
}
#endif
/* Needs the lock as it touches the ring.

View file

@ -130,18 +130,6 @@ static void free_block( struct mem_block *p )
}
}
#if 0
static void print_heap( struct mem_block *heap )
{
struct mem_block *p;
for (p = heap->next ; p != heap ; p = p->next)
DRM_DEBUG("0x%x..0x%x (0x%x) -- owner %d\n",
p->start, p->start + p->size,
p->size, p->pid);
}
#endif
/* Initialize. How to check for an uninitialized heap?
*/
static int init_heap(struct mem_block **heap, int start, int size)

View file

@ -1073,20 +1073,31 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
u32 *buffer;
const u8 *data;
int size, dwords, tex_width, blit_width;
u32 y, height;
int ret = 0, i;
u32 height;
int i;
RING_LOCALS;
dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
/* FIXME: Be smarter about this...
/* Flush the pixel cache. This ensures no pixel data gets mixed
* up with the texture data from the host data blit, otherwise
* part of the texture image may be corrupted.
*/
buf = radeon_freelist_get( dev );
if ( !buf ) return DRM_ERR(EAGAIN);
BEGIN_RING( 4 );
RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING();
#ifdef __BIG_ENDIAN
/* The Mesa texture functions provide the data in little endian as the
* chip wants it, but we need to compensate for the fact that the CP
* ring gets byte-swapped
*/
BEGIN_RING( 2 );
OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
ADVANCE_RING();
#endif
DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
tex->offset >> 10, tex->pitch, tex->format,
image->x, image->y, image->width, image->height );
/* The compiler won't optimize away a division by a variable,
* even if the only legal values are powers of two. Thus, we'll
@ -1120,127 +1131,111 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
return DRM_ERR(EINVAL);
}
DRM_DEBUG( " tex=%dx%d blit=%d\n",
tex_width, tex->height, blit_width );
DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );
/* Flush the pixel cache. This ensures no pixel data gets mixed
* up with the texture data from the host data blit, otherwise
* part of the texture image may be corrupted.
*/
BEGIN_RING( 4 );
do {
DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
tex->offset >> 10, tex->pitch, tex->format,
image->x, image->y, image->width, image->height );
RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING();
#ifdef __BIG_ENDIAN
/* The Mesa texture functions provide the data in little endian as the
* chip wants it, but we need to compensate for the fact that the CP
* ring gets byte-swapped
*/
BEGIN_RING( 2 );
OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
ADVANCE_RING();
#endif
/* Make a copy of the parameters in case we have to update them
* for a multi-pass texture blit.
*/
y = image->y;
height = image->height;
data = (const u8 *)image->data;
size = height * blit_width;
if ( size > RADEON_MAX_TEXTURE_SIZE ) {
/* Texture image is too large, do a multipass upload */
ret = DRM_ERR(EAGAIN);
/* Adjust the blit size to fit the indirect buffer */
height = RADEON_MAX_TEXTURE_SIZE / blit_width;
/* Make a copy of some parameters in case we have to
* update them for a multi-pass texture blit.
*/
height = image->height;
data = (const u8 *)image->data;
size = height * blit_width;
if ( size > RADEON_MAX_TEXTURE_SIZE ) {
height = RADEON_MAX_TEXTURE_SIZE / blit_width;
size = height * blit_width;
} else if ( size < 4 && size > 0 ) {
size = 4;
} else if ( size == 0 ) {
return 0;
}
buf = radeon_freelist_get( dev );
if ( 0 && !buf ) {
radeon_do_cp_idle( dev_priv );
buf = radeon_freelist_get( dev );
}
if ( !buf ) {
DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
DRM_COPY_TO_USER( tex->image, image, sizeof(*image) );
return DRM_ERR(EAGAIN);
}
/* Dispatch the indirect buffer.
*/
buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset);
dwords = size / 4;
buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_BRUSH_NONE |
(format << 8) |
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_S |
RADEON_DP_SRC_SOURCE_HOST_DATA |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
buffer[3] = 0xffffffff;
buffer[4] = 0xffffffff;
buffer[5] = (image->y << 16) | image->x;
buffer[6] = (height << 16) | image->width;
buffer[7] = dwords;
buffer += 8;
if ( tex_width >= 32 ) {
/* Texture image width is larger than the minimum, so we
* can upload it directly.
*/
if ( DRM_COPY_FROM_USER( buffer, data,
dwords * sizeof(u32) ) ) {
DRM_ERROR( "EFAULT on data, %d dwords\n",
dwords );
return DRM_ERR(EFAULT);
}
} else {
/* Texture image width is less than the minimum, so we
* need to pad out each image scanline to the minimum
* width.
*/
for ( i = 0 ; i < tex->height ; i++ ) {
if ( DRM_COPY_FROM_USER( buffer, data,
tex_width ) ) {
DRM_ERROR( "EFAULT on pad, %d bytes\n",
tex_width );
return DRM_ERR(EFAULT);
}
buffer += 8;
data += tex_width;
}
}
buf->pid = DRM_CURRENTPID;
buf->used = (dwords + 8) * sizeof(u32);
radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
radeon_cp_discard_buffer( dev, buf );
/* Update the input parameters for next time */
image->y += height;
image->height -= height;
image->data = (const char *)image->data + size;
if ( DRM_COPY_TO_USER( tex->image, image, sizeof(*image) ) ) {
DRM_ERROR( "EFAULT on tex->image\n" );
return DRM_ERR(EFAULT);
}
} else if ( size < 4 && size > 0 ) {
size = 4;
}
dwords = size / 4;
/* Dispatch the indirect buffer.
*/
buffer = (u32 *)((char *)dev_priv->buffers->handle + buf->offset);
buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_BRUSH_NONE |
(format << 8) |
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_S |
RADEON_DP_SRC_SOURCE_HOST_DATA |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
buffer[3] = 0xffffffff;
buffer[4] = 0xffffffff;
buffer[5] = (y << 16) | image->x;
buffer[6] = (height << 16) | image->width;
buffer[7] = dwords;
buffer += 8;
if ( tex_width >= 32 ) {
/* Texture image width is larger than the minimum, so we
* can upload it directly.
*/
if ( DRM_COPY_FROM_USER( buffer, data, dwords * sizeof(u32) ) ) {
DRM_ERROR( "EFAULT on data, %d dwords\n", dwords );
return DRM_ERR(EFAULT);
}
} else {
/* Texture image width is less than the minimum, so we
* need to pad out each image scanline to the minimum
* width.
*/
for ( i = 0 ; i < tex->height ; i++ ) {
if ( DRM_COPY_FROM_USER( buffer, data, tex_width ) ) {
DRM_ERROR( "EFAULT on pad, %d bytes\n",
tex_width );
return DRM_ERR(EFAULT);
}
buffer += 8;
data += tex_width;
}
}
buf->pid = DRM_CURRENTPID;
buf->used = (dwords + 8) * sizeof(u32);
radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
radeon_cp_discard_buffer( dev, buf );
(const u8 *)image->data += size;
} while (image->height > 0);
/* Flush the pixel cache after the blit completes. This ensures
* the texture data is written out to memory before rendering
* continues.
*/
BEGIN_RING( 4 );
RADEON_FLUSH_CACHE();
RADEON_WAIT_UNTIL_2D_IDLE();
ADVANCE_RING();
return ret;
return 0;
}