Reorganise so that each card driver is in a different directory to make

module building easier.
Integrate latest api changes from linux.
Change sysctl root from hw.graphics to hw.dri.
Change major device number from 201 to 145 which I have reserved for DRI.
Hook the card driver into the kernel using the standard pci driver
    interfaces. This makes it easier to hook resources and interrupts.
This commit is contained in:
Doug Rabson 2000-05-07 14:14:47 +00:00
parent 462ded9c91
commit b6da7b4178
34 changed files with 846 additions and 887 deletions

View file

@ -1,17 +1,5 @@
# $FreeBSD$
KMOD = drm
SRCS = init.c memory.c auth.c context.c drawable.c bufs.c \
lists.c lock.c ioctl.c fops.c vm.c dma.c sysctl.c
SRCS += tdfx_drv.c tdfx_context.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS}
KERN = /usr/src/sys
SUBDIR = drm tdfx gamma
@:
ln -sf /sys @
machine:
ln -sf /sys/i386/include machine
.include <bsd.kmod.mk>
.include <bsd.subdir.mk>

View file

@ -48,6 +48,7 @@
#include <sys/filio.h>
#include <sys/sysctl.h>
#include <sys/select.h>
#include <sys/bus.h>
#include "drm.h"
@ -58,6 +59,7 @@ typedef u_int32_t spinlock_t;
#define atomic_read(p) (*(p))
#define atomic_inc(p) atomic_add_int(p, 1)
#define atomic_dec(p) atomic_subtract_int(p, 1)
#define atomic_add(n, p) atomic_add_int(p, n)
#define atomic_sub(n, p) atomic_subtract_int(p, n)
/* Fake this */
@ -359,7 +361,8 @@ typedef struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
int unique_len; /* Length of unique field */
dev_t device; /* Device number for mknod */
device_t device; /* Device instance from newbus */
dev_t devnode; /* Device number for mknod */
char *devname; /* For /proc/interrupts */
int blocked; /* Blocked due to VC switch? */
@ -409,7 +412,8 @@ typedef struct drm_device {
drm_device_dma_t *dma; /* Optional pointer for DMA support */
/* Context support */
int irq; /* Interrupt used by board */
struct resource *irq; /* Interrupt used by board */
void *irqh; /* Handle from bus_setup_intr */
__volatile__ int context_flag; /* Context swapping flag */
__volatile__ int interrupt_flag;/* Interruption handler flag */
__volatile__ int dma_flag; /* DMA dispatch flag */

16
bsd-core/tdfx/Makefile Normal file
View file

@ -0,0 +1,16 @@
# $FreeBSD$
KMOD = tdfx
SRCS = tdfx_drv.c tdfx_context.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS} -I..
KERN = /usr/src/sys
KMODDEPS = drm
@:
ln -sf /sys @
machine:
ln -sf /sys/i386/include machine
.include <bsd.kmod.mk>

View file

@ -1,17 +1,5 @@
# $FreeBSD$
KMOD = drm
SRCS = init.c memory.c auth.c context.c drawable.c bufs.c \
lists.c lock.c ioctl.c fops.c vm.c dma.c sysctl.c
SRCS += tdfx_drv.c tdfx_context.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS}
KERN = /usr/src/sys
SUBDIR = drm tdfx gamma
@:
ln -sf /sys @
machine:
ln -sf /sys/i386/include machine
.include <bsd.kmod.mk>
.include <bsd.subdir.mk>

View file

@ -1,17 +1,5 @@
# $FreeBSD$
KMOD = drm
SRCS = init.c memory.c auth.c context.c drawable.c bufs.c \
lists.c lock.c ioctl.c fops.c vm.c dma.c sysctl.c
SRCS += tdfx_drv.c tdfx_context.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS}
KERN = /usr/src/sys
SUBDIR = drm tdfx gamma
@:
ln -sf /sys @
machine:
ln -sf /sys/i386/include machine
.include <bsd.kmod.mk>
.include <bsd.subdir.mk>

View file

@ -56,6 +56,19 @@ typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
/* Warning: If you change this structure, make sure you change
* XF86DRIClipRectRec in the server as well */
typedef struct drm_clip_rect {
unsigned short x1;
unsigned short y1;
unsigned short x2;
unsigned short y2;
} drm_clip_rect_t;
/* Seperate include files for the i810/mga specific structures */
#include "mga_drm.h"
#include "i810_drm.h"
typedef struct drm_version {
int version_major; /* Major version */
@ -160,8 +173,11 @@ typedef struct drm_buf_desc {
int low_mark; /* Low water mark */
int high_mark; /* High water mark */
enum {
DRM_PAGE_ALIGN = 0x01 /* Align on page boundaries for DMA */
_DRM_PAGE_ALIGN = 0x01, /* Align on page boundaries for DMA */
_DRM_AGP_BUFFER = 0x02 /* Buffer is in agp space */
} flags;
unsigned long agp_start; /* Start address of where the agp buffers
* are in the agp aperture */
} drm_buf_desc_t;
typedef struct drm_buf_info {
@ -232,6 +248,38 @@ typedef struct drm_irq_busid {
int funcnum;
} drm_irq_busid_t;
typedef struct drm_agp_mode {
unsigned long mode;
} drm_agp_mode_t;
/* For drm_agp_alloc -- allocated a buffer */
typedef struct drm_agp_buffer {
unsigned long size; /* In bytes -- will round to page boundary */
unsigned long handle; /* Used for BIND/UNBIND ioctls */
unsigned long type; /* Type of memory to allocate */
unsigned long physical; /* Physical used by i810 */
} drm_agp_buffer_t;
/* For drm_agp_bind */
typedef struct drm_agp_binding {
unsigned long handle; /* From drm_agp_buffer */
unsigned long offset; /* In bytes -- will round to page boundary */
} drm_agp_binding_t;
typedef struct drm_agp_info {
int agp_version_major;
int agp_version_minor;
unsigned long mode;
unsigned long aperture_base; /* physical address */
unsigned long aperture_size; /* bytes */
unsigned long memory_allowed; /* bytes */
unsigned long memory_used;
/* PCI information */
unsigned short id_vendor;
unsigned short id_device;
} drm_agp_info_t;
#define DRM_IOCTL_BASE 'd'
#define DRM_IOCTL_NR(n) ((n) & 0xff)
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
@ -271,4 +319,28 @@ typedef struct drm_irq_busid {
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
#define DRM_IOCTL_AGP_ENABLE DRM_IOR( 0x32, drm_agp_mode_t)
#define DRM_IOCTL_AGP_INFO DRM_IOW( 0x33, drm_agp_info_t)
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
#define DRM_IOCTL_AGP_BIND DRM_IOWR(0x36, drm_agp_binding_t)
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
/* Mga specific ioctls */
#define DRM_IOCTL_MGA_INIT DRM_IOW( 0x40, drm_mga_init_t)
#define DRM_IOCTL_MGA_SWAP DRM_IOW( 0x41, drm_mga_swap_t)
#define DRM_IOCTL_MGA_CLEAR DRM_IOW( 0x42, drm_mga_clear_t)
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( 0x43, drm_mga_iload_t)
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( 0x44, drm_mga_vertex_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( 0x45, drm_lock_t )
/* I810 specific ioctls */
#define DRM_IOCTL_I810_INIT DRM_IOW( 0x40, drm_i810_init_t)
#define DRM_IOCTL_I810_VERTEX DRM_IOW( 0x41, drm_i810_vertex_t)
#define DRM_IOCTL_I810_DMA DRM_IOW( 0x42, drm_i810_general_t)
#define DRM_IOCTL_I810_FLUSH DRM_IO ( 0x43)
#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44)
#endif

16
bsd/drm/Makefile Normal file
View file

@ -0,0 +1,16 @@
# $FreeBSD$
KMOD = drm
SRCS = init.c memory.c auth.c context.c drawable.c bufs.c \
lists.c lock.c ioctl.c fops.c vm.c dma.c sysctl.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS} -I..
KERN = /usr/src/sys
@:
ln -sf /sys @
machine:
ln -sf /sys/i386/include machine
.include <bsd.kmod.mk>

View file

@ -169,7 +169,7 @@ int drm_addbufs(dev_t kdev, u_long cmd, caddr_t data,
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
if (dev->queue_count) return EBUSY; /* Not while in use */
alignment = (request.flags & DRM_PAGE_ALIGN) ? round_page(size) :size;
alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) :size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;

View file

@ -206,7 +206,7 @@ int drm_context_switch(drm_device_t *dev, int old, int new)
}
#if DRM_DMA_HISTOGRAM
nanotime(&dev->ctx_start);
getnanotime(&dev->ctx_start);
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
@ -262,7 +262,7 @@ int drm_context_switch_complete(drm_device_t *dev, int new)
#if DRM_DMA_HISTOGRAM
{
struct timespec ts;
nanotime(&ts);
getnanotime(&ts);
timespecsub(&ts, &dev->ctx_start);
atomic_inc(&dev->histo.ctx[drm_histogram_slot(&ts)]);
}
@ -372,7 +372,7 @@ int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *d)
if (!_DRM_LOCK_IS_HELD(context)) {
DRM_ERROR("No lock held during \"while locked\""
" request\n");
return -EINVAL;
return EINVAL;
}
if (d->context != _DRM_LOCKING_CONTEXT(context)
&& _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
@ -380,7 +380,7 @@ int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *d)
" \"while locked\" request\n",
_DRM_LOCKING_CONTEXT(context),
d->context);
return -EINVAL;
return EINVAL;
}
q = dev->queuelist[DRM_KERNEL_CONTEXT];
while_locked = 1;

View file

@ -33,7 +33,7 @@
#include "drmP.h"
#include <sys/sysctl.h>
SYSCTL_NODE(_hw, OID_AUTO, graphics, CTLFLAG_RW, 0, "DRI Graphics");
SYSCTL_NODE(_hw, OID_AUTO, dri, CTLFLAG_RW, 0, "DRI Graphics");
static int drm_name_info SYSCTL_HANDLER_ARGS;
static int drm_vm_info SYSCTL_HANDLER_ARGS;
@ -81,7 +81,7 @@ int drm_sysctl_init(drm_device_t *dev)
/* Find the next free slot under hw.graphics */
i = 0;
SLIST_FOREACH(oid, &sysctl__hw_graphics_children, oid_link) {
SLIST_FOREACH(oid, &sysctl__hw_dri_children, oid_link) {
if (i <= oid->oid_arg2)
i = oid->oid_arg2 + 1;
}
@ -94,7 +94,7 @@ int drm_sysctl_init(drm_device_t *dev)
info->name[1] = 0;
oid = &info->oids[DRM_SYSCTL_ENTRIES];
bzero(oid, sizeof(*oid));
oid->oid_parent = &sysctl__hw_graphics_children;
oid->oid_parent = &sysctl__hw_dri_children;
oid->oid_number = OID_AUTO;
oid->oid_kind = CTLTYPE_NODE | CTLFLAG_RW;
oid->oid_arg1 = &info->list;
@ -145,9 +145,9 @@ static int drm_name_info SYSCTL_HANDLER_ARGS
if (dev->unique) {
DRM_SYSCTL_PRINT("%s 0x%x %s\n",
dev->name, dev2udev(dev->device), dev->unique);
dev->name, dev2udev(dev->devnode), dev->unique);
} else {
DRM_SYSCTL_PRINT("%s 0x%x\n", dev->name, dev2udev(dev->device));
DRM_SYSCTL_PRINT("%s 0x%x\n", dev->name, dev2udev(dev->devnode));
}
SYSCTL_OUT(req, "", 1);
@ -255,7 +255,7 @@ static int drm_queues_info SYSCTL_HANDLER_ARGS
}
/* drm_bufs_info is called whenever a process reads
/dev/drm/<dev>/bufs. */
hw.dri.0.bufs. */
static int _drm_bufs_info SYSCTL_HANDLER_ARGS
{

View file

@ -48,6 +48,7 @@
#include <sys/filio.h>
#include <sys/sysctl.h>
#include <sys/select.h>
#include <sys/bus.h>
#include "drm.h"
@ -58,6 +59,7 @@ typedef u_int32_t spinlock_t;
#define atomic_read(p) (*(p))
#define atomic_inc(p) atomic_add_int(p, 1)
#define atomic_dec(p) atomic_subtract_int(p, 1)
#define atomic_add(n, p) atomic_add_int(p, n)
#define atomic_sub(n, p) atomic_subtract_int(p, n)
/* Fake this */
@ -359,7 +361,8 @@ typedef struct drm_device {
const char *name; /* Simple driver name */
char *unique; /* Unique identifier: e.g., busid */
int unique_len; /* Length of unique field */
dev_t device; /* Device number for mknod */
device_t device; /* Device instance from newbus */
dev_t devnode; /* Device number for mknod */
char *devname; /* For /proc/interrupts */
int blocked; /* Blocked due to VC switch? */
@ -409,7 +412,8 @@ typedef struct drm_device {
drm_device_dma_t *dma; /* Optional pointer for DMA support */
/* Context support */
int irq; /* Interrupt used by board */
struct resource *irq; /* Interrupt used by board */
void *irqh; /* Handle from bus_setup_intr */
__volatile__ int context_flag; /* Context swapping flag */
__volatile__ int interrupt_flag;/* Interruption handler flag */
__volatile__ int dma_flag; /* DMA dispatch flag */

16
bsd/gamma/Makefile Normal file
View file

@ -0,0 +1,16 @@
# $FreeBSD$
KMOD = gamma
SRCS = gamma_drv.c gamma_dma.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS} -I..
KERN = /usr/src/sys
KMODDEPS = drm
@:
ln -sf /sys @
machine:
ln -sf /sys/i386/include machine
.include <bsd.kmod.mk>

View file

@ -33,8 +33,11 @@
#include "drmP.h"
#include "gamma_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <vm/vm.h>
#include <vm/pmap.h>
/* WARNING!!! MAGIC NUMBER!!! The number of regions already added to the
kernel must be specified here. Currently, the number is 2. This must
@ -79,16 +82,17 @@
#define GAMMA_SYNC 0x8c40
#define GAMMA_SYNC_TAG 0x0188
static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
unsigned long length)
static __inline void gamma_dma_dispatch(drm_device_t *dev,
vm_offset_t address,
vm_size_t length)
{
GAMMA_WRITE(GAMMA_DMAADDRESS, virt_to_phys((void *)address));
GAMMA_WRITE(GAMMA_DMAADDRESS, vtophys(address));
while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
;
GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
}
static inline void gamma_dma_quiescent(drm_device_t *dev)
static __inline void gamma_dma_quiescent(drm_device_t *dev)
{
while (GAMMA_READ(GAMMA_DMACOUNT))
;
@ -112,20 +116,20 @@ static inline void gamma_dma_quiescent(drm_device_t *dev)
} while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
}
static inline void gamma_dma_ready(drm_device_t *dev)
static __inline void gamma_dma_ready(drm_device_t *dev)
{
while (GAMMA_READ(GAMMA_DMACOUNT))
;
}
static inline int gamma_dma_is_ready(drm_device_t *dev)
static __inline int gamma_dma_is_ready(drm_device_t *dev)
{
return !GAMMA_READ(GAMMA_DMACOUNT);
}
static void gamma_dma_service(int irq, void *device, struct pt_regs *regs)
static void gamma_dma_service(void *arg)
{
drm_device_t *dev = (drm_device_t *)device;
drm_device_t *dev = (drm_device_t *)arg;
drm_device_dma_t *dma = dev->dma;
atomic_inc(&dev->total_irq);
@ -144,9 +148,11 @@ static void gamma_dma_service(int irq, void *device, struct pt_regs *regs)
}
clear_bit(0, &dev->dma_flag);
#if 0
/* Dispatch new buffer */
queue_task(&dev->tq, &tq_immediate);
mark_bh(IMMEDIATE_BH);
#endif
}
}
@ -159,22 +165,22 @@ static int gamma_do_dma(drm_device_t *dev, int locked)
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
#if DRM_DMA_HISTOGRAM
cycles_t dma_start, dma_stop;
struct timespec dma_start, dma_stop;
#endif
if (test_and_set_bit(0, &dev->dma_flag)) {
atomic_inc(&dma->total_missed_dma);
return -EBUSY;
return EBUSY;
}
#if DRM_DMA_HISTOGRAM
dma_start = get_cycles();
getnanotime(&dma_start);
#endif
if (!dma->next_buffer) {
DRM_ERROR("No next_buffer\n");
clear_bit(0, &dev->dma_flag);
return -EINVAL;
return EINVAL;
}
buf = dma->next_buffer;
@ -188,7 +194,7 @@ static int gamma_do_dma(drm_device_t *dev, int locked)
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return -EINVAL;
return EINVAL;
}
if (!length) {
@ -201,7 +207,7 @@ static int gamma_do_dma(drm_device_t *dev, int locked)
if (!gamma_dma_is_ready(dev)) {
clear_bit(0, &dev->dma_flag);
return -EBUSY;
return EBUSY;
}
if (buf->while_locked) {
@ -215,7 +221,7 @@ static int gamma_do_dma(drm_device_t *dev, int locked)
DRM_KERNEL_CONTEXT)) {
atomic_inc(&dma->total_missed_lock);
clear_bit(0, &dev->dma_flag);
return -EBUSY;
return EBUSY;
}
}
@ -227,7 +233,7 @@ static int gamma_do_dma(drm_device_t *dev, int locked)
drm_clear_next_buffer(dev);
drm_free_buffer(dev, buf);
}
retcode = -EBUSY;
retcode = EBUSY;
goto cleanup;
/* POST: we will wait for the context
@ -242,7 +248,7 @@ static int gamma_do_dma(drm_device_t *dev, int locked)
buf->waiting = 0;
buf->list = DRM_LIST_PEND;
#if DRM_DMA_HISTOGRAM
buf->time_dispatched = get_cycles();
getnanotime(&buf->time_dispatched);
#endif
gamma_dma_dispatch(dev, address, length);
@ -263,19 +269,15 @@ cleanup:
clear_bit(0, &dev->dma_flag);
#if DRM_DMA_HISTOGRAM
dma_stop = get_cycles();
atomic_inc(&dev->histo.dma[drm_histogram_slot(dma_stop - dma_start)]);
getnanotime(&dma_stop);
timespecsub(&dma_stop, &dma_start);
atomic_inc(&dev->histo.ctx[drm_histogram_slot(&dma_stop)]);
#endif
return retcode;
}
static void gamma_dma_schedule_timer_wrapper(unsigned long dev)
{
gamma_dma_schedule((drm_device_t *)dev, 0);
}
static void gamma_dma_schedule_tq_wrapper(void *dev)
static void gamma_dma_schedule_wrapper(void *dev)
{
gamma_dma_schedule(dev, 0);
}
@ -291,24 +293,24 @@ int gamma_dma_schedule(drm_device_t *dev, int locked)
int expire = 20;
drm_device_dma_t *dma = dev->dma;
#if DRM_DMA_HISTOGRAM
cycles_t schedule_start;
struct timespec schedule_start;
#endif
if (test_and_set_bit(0, &dev->interrupt_flag)) {
/* Not reentrant */
atomic_inc(&dma->total_missed_sched);
return -EBUSY;
return EBUSY;
}
missed = atomic_read(&dma->total_missed_sched);
#if DRM_DMA_HISTOGRAM
schedule_start = get_cycles();
getnanotime(&schedule_start);
#endif
again:
if (dev->context_flag) {
clear_bit(0, &dev->interrupt_flag);
return -EBUSY;
return EBUSY;
}
if (dma->next_buffer) {
/* Unsent buffer that was previously
@ -324,7 +326,7 @@ again:
} else {
do {
next = drm_select_queue(dev,
gamma_dma_schedule_timer_wrapper);
gamma_dma_schedule_wrapper);
if (next >= 0) {
q = dev->queuelist[next];
buf = drm_waitlist_get(&q->waitlist);
@ -358,14 +360,19 @@ again:
clear_bit(0, &dev->interrupt_flag);
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.schedule[drm_histogram_slot(get_cycles()
- schedule_start)]);
{
struct timespec ts;
getnanotime(&ts);
timespecsub(&ts, &schedule_start);
atomic_inc(&dev->histo.schedule[drm_histogram_slot(&ts)]);
}
#endif
return retcode;
}
static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
{
struct proc *p = curproc;
unsigned long address;
unsigned long length;
int must_free = 0;
@ -375,21 +382,20 @@ static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
drm_buf_t *buf;
drm_buf_t *last_buf = NULL;
drm_device_dma_t *dma = dev->dma;
DECLARE_WAITQUEUE(entry, current);
static int never;
/* Turn off interrupt handling */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
schedule();
if (signal_pending(current)) return -EINTR;
retcode = tsleep(&never, PZERO|PCATCH, "gamp1", 1);
if (retcode)
return retcode;
}
if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
while (!drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
return -EINTR;
}
retcode = tsleep(&never, PZERO|PCATCH, "gamp2", 1);
if (retcode)
return retcode;
}
++must_free;
}
@ -403,16 +409,16 @@ static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
continue;
}
buf = dma->buflist[ idx ];
if (buf->pid != current->pid) {
if (buf->pid != p->p_pid) {
DRM_ERROR("Process %d using buffer owned by %d\n",
current->pid, buf->pid);
retcode = -EINVAL;
p->p_pid, buf->pid);
retcode = EINVAL;
goto cleanup;
}
if (buf->list != DRM_LIST_NONE) {
DRM_ERROR("Process %d using %d's buffer on list %d\n",
current->pid, buf->pid, buf->list);
retcode = -EINVAL;
p->p_pid, buf->pid, buf->list);
retcode = EINVAL;
goto cleanup;
}
/* This isn't a race condition on
@ -433,14 +439,14 @@ static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
DRM_ERROR("Sending pending buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
retcode = -EINVAL;
retcode = EINVAL;
goto cleanup;
}
if (buf->waiting) {
DRM_ERROR("Sending waiting buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
retcode = -EINVAL;
retcode = EINVAL;
goto cleanup;
}
buf->pending = 1;
@ -448,8 +454,7 @@ static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
if (dev->last_context != buf->context
&& !(dev->queuelist[buf->context]->flags
& _DRM_CONTEXT_PRESERVED)) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
atomic_inc(&dev->context_wait);
/* PRE: dev->last_context != buf->context */
drm_context_switch(dev, dev->last_context,
buf->context);
@ -458,13 +463,11 @@ static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
when dev->last_context == buf->context.
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
retcode = -EINTR;
retcode = tsleep(&dev->context_wait, PZERO|PCATCH,
"gamctx", 0);
atomic_dec(&dev->context_wait);
if (retcode)
goto cleanup;
}
if (dev->last_context != buf->context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context,
@ -473,7 +476,7 @@ static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
}
#if DRM_DMA_HISTOGRAM
buf->time_queued = get_cycles();
getnanotime(&buf->time_queued);
buf->time_dispatched = buf->time_queued;
#endif
gamma_dma_dispatch(dev, address, length);
@ -505,43 +508,40 @@ cleanup:
static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
{
DECLARE_WAITQUEUE(entry, current);
struct proc *p = curproc;
drm_buf_t *last_buf = NULL;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
if (d->flags & _DRM_DMA_BLOCK) {
last_buf = dma->buflist[d->send_indices[d->send_count-1]];
add_wait_queue(&last_buf->dma_wait, &entry);
}
if ((retcode = drm_dma_enqueue(dev, d))) {
if (d->flags & _DRM_DMA_BLOCK)
remove_wait_queue(&last_buf->dma_wait, &entry);
return retcode;
}
gamma_dma_schedule(dev, 0);
if (d->flags & _DRM_DMA_BLOCK) {
DRM_DEBUG("%d waiting\n", current->pid);
current->state = TASK_INTERRUPTIBLE;
last_buf = dma->buflist[d->send_indices[d->send_count-1]];
atomic_inc(&last_buf->dma_wait);
}
if (d->flags & _DRM_DMA_BLOCK) {
DRM_DEBUG("%d waiting\n", p->p_pid);
for (;;) {
retcode = tsleep(&last_buf->dma_wait, PZERO|PCATCH,
"gamdw", 0);
if (!last_buf->waiting
&& !last_buf->pending)
break; /* finished */
schedule();
if (signal_pending(current)) {
retcode = -EINTR; /* Can't restart */
if (retcode)
break;
}
}
current->state = TASK_RUNNING;
DRM_DEBUG("%d running\n", current->pid);
remove_wait_queue(&last_buf->dma_wait, &entry);
DRM_DEBUG("%d running\n", p->p_pid);
atomic_dec(&last_buf->dma_wait);
if (!retcode
|| (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
if (!waitqueue_active(&last_buf->dma_wait)) {
if (!last_buf->dma_wait) {
drm_free_buffer(dev, last_buf);
}
}
@ -554,39 +554,37 @@ static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
last_buf->idx,
last_buf->list,
last_buf->pid,
current->pid);
p->p_pid);
}
}
return retcode;
}
int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
int gamma_dma(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
drm_dma_t d;
copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT);
d = *(drm_dma_t *) data;
DRM_DEBUG("%d %d: %d send, %d req\n",
current->pid, d.context, d.send_count, d.request_count);
p->p_pid, d.context, d.send_count, d.request_count);
if (d.context == DRM_KERNEL_CONTEXT || d.context >= dev->queue_slots) {
DRM_ERROR("Process %d using context %d\n",
current->pid, d.context);
return -EINVAL;
p->p_pid, d.context);
return EINVAL;
}
if (d.send_count < 0 || d.send_count > dma->buf_count) {
DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
current->pid, d.send_count, dma->buf_count);
return -EINVAL;
p->p_pid, d.send_count, dma->buf_count);
return EINVAL;
}
if (d.request_count < 0 || d.request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
current->pid, d.request_count, dma->buf_count);
return -EINVAL;
p->p_pid, d.request_count, dma->buf_count);
return EINVAL;
}
if (d.send_count) {
@ -603,25 +601,23 @@ int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
}
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT);
p->p_pid, d.granted_count);
*(drm_dma_t *) data = d;
return retcode;
}
int gamma_irq_install(drm_device_t *dev, int irq)
{
int rid;
int retcode;
if (!irq) return -EINVAL;
if (!irq) return EINVAL;
down(&dev->struct_sem);
if (dev->irq) {
up(&dev->struct_sem);
return -EBUSY;
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return EBUSY;
}
dev->irq = irq;
up(&dev->struct_sem);
DRM_DEBUG("%d\n", irq);
@ -633,25 +629,28 @@ int gamma_irq_install(drm_device_t *dev, int irq)
dev->dma->next_queue = NULL;
dev->dma->this_buffer = NULL;
#if 0
dev->tq.next = NULL;
dev->tq.sync = 0;
dev->tq.routine = gamma_dma_schedule_tq_wrapper;
dev->tq.data = dev;
#endif
/* Before installing handler */
GAMMA_WRITE(GAMMA_GCOMMANDMODE, 0);
GAMMA_WRITE(GAMMA_GDMACONTROL, 0);
/* Install handler */
if ((retcode = request_irq(dev->irq,
gamma_dma_service,
0,
dev->devname,
dev))) {
down(&dev->struct_sem);
rid = 0;
dev->irq = bus_alloc_resource(dev->device, SYS_RES_IRQ, &rid,
0, ~0, 1, RF_SHAREABLE);
if (!dev->irq)
return ENOENT;
retcode = bus_setup_intr(dev->device, dev->irq, INTR_TYPE_TTY,
gamma_dma_service, dev, &dev->irqh);
if (retcode) {
bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irq);
dev->irq = 0;
up(&dev->struct_sem);
return retcode;
}
@ -665,35 +664,31 @@ int gamma_irq_install(drm_device_t *dev, int irq)
int gamma_irq_uninstall(drm_device_t *dev)
{
int irq;
if (!dev->irq)
return EINVAL;
DRM_DEBUG("%ld\n", rman_get_start(dev->irq));
down(&dev->struct_sem);
irq = dev->irq;
dev->irq = 0;
up(&dev->struct_sem);
if (!irq) return -EINVAL;
DRM_DEBUG("%d\n", irq);
GAMMA_WRITE(GAMMA_GDELAYTIMER, 0);
GAMMA_WRITE(GAMMA_COMMANDINTENABLE, 0);
GAMMA_WRITE(GAMMA_GINTENABLE, 0);
free_irq(irq, dev);
bus_teardown_intr(dev->device, dev->irq, dev->irqh);
bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irq);
dev->irq = 0;
return 0;
}
int gamma_control(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
int gamma_control(dev_t kdev, u_long cmd, caddr_t data,
int flags, struct proc *p)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_t *dev = kdev->si_drv1;
drm_control_t ctl;
int retcode;
copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT);
ctl = *(drm_control_t *) data;
switch (ctl.func) {
case DRM_INST_HANDLER:
@ -705,40 +700,38 @@ int gamma_control(struct inode *inode, struct file *filp, unsigned int cmd,
return retcode;
break;
default:
return -EINVAL;
return EINVAL;
}
return 0;
}
int gamma_lock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
int gamma_lock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DECLARE_WAITQUEUE(entry, current);
drm_device_t *dev = kdev->si_drv1;
int ret = 0;
drm_lock_t lock;
drm_queue_t *q;
#if DRM_DMA_HISTOGRAM
cycles_t start;
struct timespec start;
dev->lck_start = start = get_cycles();
getnanotime(&start);
dev->lck_start = start;
#endif
copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT);
lock = *(drm_lock_t *) data;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
p->p_pid, lock.context);
return EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, current->pid, dev->lock.hw_lock->lock,
lock.context, p->p_pid, dev->lock.hw_lock->lock,
lock.flags);
if (lock.context < 0 || lock.context >= dev->queue_count)
return -EINVAL;
return EINVAL;
q = dev->queuelist[lock.context];
ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
@ -746,26 +739,29 @@ int gamma_lock(struct inode *inode, struct file *filp, unsigned int cmd,
if (!ret) {
if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
!= lock.context) {
long j = jiffies - dev->lock.lock_time;
long j = ticks - dev->lock.lock_time;
if (j > 0 && j <= DRM_LOCK_SLICE) {
/* Can't take lock if we just had it and
there is contention. */
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(j);
static int never;
ret = tsleep(&never, PZERO|PCATCH,
"gaml1", j);
if (ret)
return ret;
}
}
add_wait_queue(&dev->lock.lock_queue, &entry);
atomic_inc(&dev->lock.lock_queue);
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
ret = EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
lock.context)) {
dev->lock.pid = current->pid;
dev->lock.lock_time = jiffies;
dev->lock.pid = p->p_pid;
dev->lock.lock_time = ticks;
atomic_inc(&dev->total_locks);
atomic_inc(&q->total_locks);
break; /* Got lock */
@ -773,15 +769,12 @@ int gamma_lock(struct inode *inode, struct file *filp, unsigned int cmd,
/* Contention */
atomic_inc(&dev->total_sleeps);
current->state = TASK_INTERRUPTIBLE;
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
ret = tsleep(&dev->lock.lock_queue, PZERO|PCATCH,
"gaml2", 0);
if (ret)
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(&dev->lock.lock_queue, &entry);
atomic_dec(&dev->lock.lock_queue);
}
drm_flush_unblock(dev, lock.context, lock.flags); /* cleanup phase */
@ -795,7 +788,12 @@ int gamma_lock(struct inode *inode, struct file *filp, unsigned int cmd,
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
{
struct timespec ts;
getnanotime(&ts);
timespecsub(&ts, &start);
atomic_inc(&dev->histo.lacq[drm_histogram_slot(&ts)]);
}
#endif
return ret;

View file

@ -32,6 +32,62 @@
#include "drmP.h"
#include "gamma_drv.h"
#include <pci/pcivar.h>
static int gamma_init(device_t nbdev);
static void gamma_cleanup(device_t nbdev);
static int gamma_probe(device_t dev)
{
const char *s = 0;
switch (pci_get_devid(dev)) {
case 0x00083d3d:
s = "3D Labs Gamma graphics accelerator";
break;
}
if (s) {
device_set_desc(dev, s);
return 0;
}
return ENXIO;
}
static int gamma_attach(device_t dev)
{
gamma_init(dev);
return 0;
}
static int gamma_detach(device_t dev)
{
gamma_cleanup(dev);
return 0;
}
static device_method_t gamma_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, gamma_probe),
DEVMETHOD(device_attach, gamma_attach),
DEVMETHOD(device_detach, gamma_detach),
{ 0, 0 }
};
static driver_t gamma_driver = {
"drm",
gamma_methods,
sizeof(drm_device_t),
};
static devclass_t gamma_devclass;
#define GAMMA_SOFTC(unit) \
((drm_device_t *) devclass_get_softc(gamma_devclass, unit))
DRIVER_MODULE(if_gamma, pci, gamma_driver, gamma_devclass, 0, 0);
#define GAMMA_NAME "gamma"
#define GAMMA_DESC "3dlabs GMX 2000"
#define GAMMA_DATE "19990830"
@ -39,8 +95,6 @@
#define GAMMA_MINOR 0
#define GAMMA_PATCHLEVEL 5
static drm_device_t gamma_device;
#define CDEV_MAJOR 200
static struct cdevsw gamma_cdevsw = {
@ -259,21 +313,19 @@ gamma_takedown(drm_device_t *dev)
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
#if 0
wake_up_interruptible(&dev->lock.lock_queue);
#endif
wakeup(&dev->lock.lock_queue);
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return 0;
}
/* gamma_init is called via SYSINIT at module load time */
/* gamma_init is called via gamma_attach at module load time */
static void
gamma_init(void *arg)
static int
gamma_init(device_t nbdev)
{
drm_device_t *dev = &gamma_device;
drm_device_t *dev = device_get_softc(nbdev);
DRM_DEBUG("\n");
@ -291,16 +343,17 @@ gamma_init(void *arg)
return retcode;
}
#endif
dev->device = make_dev(&gamma_cdevsw,
/* gamma_misc.minor */ 0,
DRM_DEV_UID,
DRM_DEV_GID,
DRM_DEV_MODE,
GAMMA_NAME);
dev->device = nbdev;
dev->devnode = make_dev(&gamma_cdevsw,
device_get_unit(nbdev),
DRM_DEV_UID,
DRM_DEV_GID,
DRM_DEV_MODE,
GAMMA_NAME);
dev->name = GAMMA_NAME;
drm_mem_init();
drm_proc_init(dev);
drm_sysctl_init(dev);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
GAMMA_NAME,
@ -308,21 +361,21 @@ gamma_init(void *arg)
GAMMA_MINOR,
GAMMA_PATCHLEVEL,
GAMMA_DATE,
/* gamma_misc.minor */ 0);
device_get_unit(nbdev));
return 0;
}
SYSINIT(gamma_init, SI_SUB_DRIVERS, SI_ORDER_ANY, gamma_init, 0);
/* gamma_cleanup is called via SYSUNINIT at module unload time. */
/* gamma_cleanup is called via gamma_detach at module unload time. */
static void
gamma_cleanup(void *arg)
gamma_cleanup(device_t nbdev)
{
drm_device_t *dev = &gamma_device;
drm_device_t *dev = device_get_softc(nbdev);
DRM_DEBUG("\n");
drm_proc_cleanup();
drm_sysctl_cleanup(dev);
#if 0
if (misc_deregister(&gamma_misc)) {
DRM_ERROR("Cannot unload module\n");
@ -374,27 +427,31 @@ int gamma_version(struct inode *inode, struct file *filp, unsigned int cmd,
int
gamma_open(dev_t kdev, int flags, int fmt, struct proc *p)
{
drm_device_t *dev = &gamma_device;
drm_device_t *dev = GAMMA_SOFTC(minor(kdev));
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
device_busy(dev->device);
if (!(retcode = drm_open_helper(kdev, flags, fmt, p, dev))) {
atomic_inc(&dev->total_open);
simple_lock(&dev->count_lock);
if (!dev->open_count++) {
simple_unlock(&dev->count_lock);
return gamma_setup(dev);
retcode = gamma_setup(dev);
}
simple_unlock(&dev->count_lock);
}
if (retcode)
device_unbusy(dev->device);
return retcode;
}
int
gamma_close(dev_t kdev, int flags, int fmt, struct proc *p)
{
drm_file_t *priv = kdev->si_drv1;
drm_device_t *dev = priv->dev;
drm_device_t *dev = kdev->si_drv1;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
@ -410,6 +467,7 @@ gamma_close(dev_t kdev, int flags, int fmt, struct proc *p)
return EBUSY;
}
simple_unlock(&dev->count_lock);
device_unbusy(dev->device);
return gamma_takedown(dev);
}
simple_unlock(&dev->count_lock);
@ -423,12 +481,18 @@ int
gamma_ioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
int nr = DRM_IOCTL_NR(cmd);
drm_file_t *priv = kdev->si_drv1;
drm_device_t *dev = priv->dev;
drm_device_t *dev = kdev->si_drv1;
drm_file_t *priv;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
d_ioctl_t *func;
priv = drm_find_file_by_proc(dev, p);
if (!priv) {
DRM_DEBUG("can't find authenticator\n");
return EINVAL;
}
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
@ -468,8 +532,7 @@ gamma_ioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
int gamma_unlock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_file_t *priv = kdev->si_drv1;
drm_device_t *dev = priv->dev;
drm_device_t *dev = kdev->si_drv1;
drm_lock_t *lockp = (drm_lock_t *) data;
if (lockp->context == DRM_KERNEL_CONTEXT) {

93
bsd/i810_drm.h Normal file
View file

@ -0,0 +1,93 @@
#ifndef _I810_DRM_H_
#define _I810_DRM_H_
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
*/
/* Might one day want to support the client-side ringbuffer code again.
*/
#ifndef _I810_DEFINES_
#define _I810_DEFINES_
#define I810_USE_BATCH 1
#define I810_DMA_BUF_ORDER 12
#define I810_DMA_BUF_SZ (1<<I810_DMA_BUF_ORDER)
#define I810_DMA_BUF_NR 256
#define I810_NR_SAREA_CLIPRECTS 2
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
#define I810_NR_TEX_REGIONS 64
#define I810_LOG_MIN_TEX_REGION_SIZE 16
#endif
typedef struct _drm_i810_init {
enum {
I810_INIT_DMA = 0x01,
I810_CLEANUP_DMA = 0x02
} func;
int ring_map_idx;
int buffer_map_idx;
int sarea_priv_offset;
unsigned long ring_start;
unsigned long ring_end;
unsigned long ring_size;
} drm_i810_init_t;
/* Warning: If you change the SAREA structure you must change the Xserver
* structure as well */
typedef struct _drm_i810_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char in_use; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_i810_tex_region_t;
typedef struct _drm_i810_sarea {
unsigned int nbox;
drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS];
/* Maintain an LRU of contiguous regions of texture space. If
* you think you own a region of texture memory, and it has an
* age different to the one you set, then you are mistaken and
* it has been stolen by another client. If global texAge
* hasn't changed, there is no need to walk the list.
*
* These regions can be used as a proxy for the fine-grained
* texture information of other clients - by maintaining them
* in the same lru which is used to age their own textures,
* clients have an approximate lru for the whole of global
* texture space, and can make informed decisions as to which
* areas to kick out. There is no need to choose whether to
* kick out your own texture or someone else's - simply eject
* them all in LRU order.
*/
drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS+1];
/* Last elt is sentinal */
int texAge; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
int ctxOwner; /* last context to upload state */
} drm_i810_sarea_t;
typedef struct _drm_i810_general {
int idx;
int used;
} drm_i810_general_t;
/* These may be placeholders if we have more cliprects than
* I810_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
* false, indicating that the buffer will be dispatched again with a
* new set of cliprects.
*/
typedef struct _drm_i810_vertex {
int idx; /* buffer index */
int used; /* nr bytes in use */
int discard; /* client is finished with the buffer? */
} drm_i810_vertex_t;
#endif /* _I810_DRM_H_ */

261
bsd/mga_drm.h Normal file
View file

@ -0,0 +1,261 @@
/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
* Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Jeff Hartmann <jhartmann@precisioninsight.com>
* Keith Whitwell <keithw@precisioninsight.com>
*
* $XFree86$
*/
#ifndef _MGA_DRM_H_
#define _MGA_DRM_H_
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmMga.h)
*/
#ifndef _MGA_DEFINES_
#define _MGA_DEFINES_
#define MGA_F 0x1 /* fog */
#define MGA_A 0x2 /* alpha */
#define MGA_S 0x4 /* specular */
#define MGA_T2 0x8 /* multitexture */
#define MGA_WARP_TGZ 0
#define MGA_WARP_TGZF (MGA_F)
#define MGA_WARP_TGZA (MGA_A)
#define MGA_WARP_TGZAF (MGA_F|MGA_A)
#define MGA_WARP_TGZS (MGA_S)
#define MGA_WARP_TGZSF (MGA_S|MGA_F)
#define MGA_WARP_TGZSA (MGA_S|MGA_A)
#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
#define MGA_WARP_T2GZ (MGA_T2)
#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
#define MGA_MAX_G400_PIPES 16
#define MGA_MAX_G200_PIPES 8 /* no multitex */
#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
#define MGA_CARD_TYPE_G200 1
#define MGA_CARD_TYPE_G400 2
#define MGA_FRONT 0x1
#define MGA_BACK 0x2
#define MGA_DEPTH 0x4
/* 3d state excluding texture units:
*/
#define MGA_CTXREG_DSTORG 0 /* validated */
#define MGA_CTXREG_MACCESS 1
#define MGA_CTXREG_PLNWT 2
#define MGA_CTXREG_DWGCTL 3
#define MGA_CTXREG_ALPHACTRL 4
#define MGA_CTXREG_FOGCOLOR 5
#define MGA_CTXREG_WFLAG 6
#define MGA_CTXREG_TDUAL0 7
#define MGA_CTXREG_TDUAL1 8
#define MGA_CTXREG_FCOL 9
#define MGA_CTX_SETUP_SIZE 10
/* 2d state
*/
#define MGA_2DREG_PITCH 0
#define MGA_2D_SETUP_SIZE 1
/* Each texture unit has a state:
*/
#define MGA_TEXREG_CTL 0
#define MGA_TEXREG_CTL2 1
#define MGA_TEXREG_FILTER 2
#define MGA_TEXREG_BORDERCOL 3
#define MGA_TEXREG_ORG 4 /* validated */
#define MGA_TEXREG_ORG1 5
#define MGA_TEXREG_ORG2 6
#define MGA_TEXREG_ORG3 7
#define MGA_TEXREG_ORG4 8
#define MGA_TEXREG_WIDTH 9
#define MGA_TEXREG_HEIGHT 10
#define MGA_TEX_SETUP_SIZE 11
/* What needs to be changed for the current vertex dma buffer?
*/
#define MGA_UPLOAD_CTX 0x1
#define MGA_UPLOAD_TEX0 0x2
#define MGA_UPLOAD_TEX1 0x4
#define MGA_UPLOAD_PIPE 0x8
#define MGA_UPLOAD_TEX0IMAGE 0x10
#define MGA_UPLOAD_TEX1IMAGE 0x20
#define MGA_UPLOAD_2D 0x40
#define MGA_WAIT_AGE 0x80 /* handled client-side */
#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
quiescent */
/* 64 buffers of 16k each, total 1 meg.
*/
#define MGA_DMA_BUF_ORDER 14
#define MGA_DMA_BUF_SZ (1<<MGA_DMA_BUF_ORDER)
#define MGA_DMA_BUF_NR 63
/* Keep these small for testing.
*/
#define MGA_NR_SAREA_CLIPRECTS 8
/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
* regions, subject to a minimum region size of (1<<16) == 64k.
*
* Clients may subdivide regions internally, but when sharing between
* clients, the region size is the minimum granularity.
*/
#define MGA_CARD_HEAP 0
#define MGA_AGP_HEAP 1
#define MGA_NR_TEX_HEAPS 2
#define MGA_NR_TEX_REGIONS 16
#define MGA_LOG_MIN_TEX_REGION_SIZE 16
#endif
typedef struct _drm_mga_warp_index {
int installed;
unsigned long phys_addr;
int size;
} drm_mga_warp_index_t;
typedef struct drm_mga_init {
enum {
MGA_INIT_DMA = 0x01,
MGA_CLEANUP_DMA = 0x02
} func;
int reserved_map_agpstart;
int reserved_map_idx;
int buffer_map_idx;
int sarea_priv_offset;
int primary_size;
int warp_ucode_size;
int frontOffset;
int backOffset;
int depthOffset;
int textureOffset;
int textureSize;
int agpTextureOffset;
int agpTextureSize;
int cpp;
int stride;
int sgram;
int chipset;
drm_mga_warp_index_t WarpIndex[MGA_MAX_WARP_PIPES];
int mAccess;
} drm_mga_init_t;
/* Warning: if you change the sarea structure, you must change the Xserver
* structures as well */
typedef struct _drm_mga_tex_region {
unsigned char next, prev;
unsigned char in_use;
int age;
} drm_mga_tex_region_t;
typedef struct _drm_mga_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex dma buffer.
*/
unsigned int ContextState[MGA_CTX_SETUP_SIZE];
unsigned int ServerState[MGA_2D_SETUP_SIZE];
unsigned int TexState[2][MGA_TEX_SETUP_SIZE];
unsigned int WarpPipe;
unsigned int dirty;
unsigned int nbox;
drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS];
/* Information about the most recently used 3d drawable. The
* client fills in the req_* fields, the server fills in the
* exported_ fields and puts the cliprects into boxes, above.
*
* The client clears the exported_drawable field before
* clobbering the boxes data.
*/
unsigned int req_drawable; /* the X drawable id */
unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
unsigned int exported_drawable;
unsigned int exported_index;
unsigned int exported_stamp;
unsigned int exported_buffers;
unsigned int exported_nfront;
unsigned int exported_nback;
int exported_back_x, exported_front_x, exported_w;
int exported_back_y, exported_front_y, exported_h;
drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS];
/* Counters for aging textures and for client-side throttling.
*/
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
/* LRU lists for texture memory in agp space and on the card
*/
drm_mga_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS+1];
unsigned int texAge[MGA_NR_TEX_HEAPS];
/* Mechanism to validate card state.
*/
int ctxOwner;
} drm_mga_sarea_t;
/* Device specific ioctls:
*/
typedef struct _drm_mga_clear {
int clear_color;
int clear_depth;
int flags;
} drm_mga_clear_t;
typedef struct _drm_mga_swap {
int dummy;
} drm_mga_swap_t;
typedef struct _drm_mga_iload {
int idx;
int length;
unsigned int destOrg;
} drm_mga_iload_t;
typedef struct _drm_mga_vertex {
int idx; /* buffer to queue */
int used; /* bytes in use */
int discard; /* client finished with buffer? */
} drm_mga_vertex_t;
#endif

16
bsd/tdfx/Makefile Normal file
View file

@ -0,0 +1,16 @@
# $FreeBSD$
KMOD = tdfx
SRCS = tdfx_drv.c tdfx_context.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS} -I..
KERN = /usr/src/sys
KMODDEPS = drm
@:
ln -sf /sys @
machine:
ln -sf /sys/i386/include machine
.include <bsd.kmod.mk>

View file

@ -32,6 +32,8 @@
#include "drmP.h"
#include "tdfx_drv.h"
#include <pci/pcivar.h>
#define TDFX_NAME "tdfx"
#define TDFX_DESC "tdfx"
#define TDFX_DATE "19991009"
@ -39,10 +41,67 @@
#define TDFX_MINOR 0
#define TDFX_PATCHLEVEL 1
static drm_device_t tdfx_device;
static int tdfx_init(device_t nbdev);
static void tdfx_cleanup(device_t nbdev);
drm_ctx_t tdfx_res_ctx;
#define CDEV_MAJOR 201
static int tdfx_probe(device_t dev)
{
const char *s = 0;
switch (pci_get_devid(dev)) {
case 0x0003121a:
s = "3Dfx Voodoo Banshee graphics accelerator";
break;
case 0x0005121a:
s = "3Dfx Voodoo 3 graphics accelerator";
break;
}
if (s) {
device_set_desc(dev, s);
return 0;
}
return ENXIO;
}
static int tdfx_attach(device_t dev)
{
tdfx_init(dev);
return 0;
}
static int tdfx_detach(device_t dev)
{
tdfx_cleanup(dev);
return 0;
}
static device_method_t tdfx_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, tdfx_probe),
DEVMETHOD(device_attach, tdfx_attach),
DEVMETHOD(device_detach, tdfx_detach),
{ 0, 0 }
};
static driver_t tdfx_driver = {
"drm",
tdfx_methods,
sizeof(drm_device_t),
};
static devclass_t tdfx_devclass;
#define TDFX_SOFTC(unit) \
((drm_device_t *) devclass_get_softc(tdfx_devclass, unit))
DRIVER_MODULE(if_tdfx, pci, tdfx_driver, tdfx_devclass, 0, 0);
#define CDEV_MAJOR 145
/* tdfx_drv.c */
static d_open_t tdfx_open;
static d_close_t tdfx_close;
@ -243,21 +302,19 @@ tdfx_takedown(drm_device_t *dev)
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
#if 0
wake_up_interruptible(&dev->lock.lock_queue);
#endif
wakeup(&dev->lock.lock_queue);
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return 0;
}
/* tdfx_init is called via SYSINIT at module load time, */
/* tdfx_init is called via tdfx_attach at module load time, */
static int
tdfx_init(void *arg)
tdfx_init(device_t nbdev)
{
drm_device_t *dev = &tdfx_device;
drm_device_t *dev = device_get_softc(nbdev);
DRM_DEBUG("\n");
@ -269,12 +326,13 @@ tdfx_init(void *arg)
drm_parse_options(tdfx);
#endif
dev->device = make_dev(&tdfx_cdevsw,
/* gamma_misc.minor */ 0,
DRM_DEV_UID,
DRM_DEV_GID,
DRM_DEV_MODE,
TDFX_NAME);
dev->device = nbdev;
dev->devnode = make_dev(&tdfx_cdevsw,
device_get_unit(nbdev),
DRM_DEV_UID,
DRM_DEV_GID,
DRM_DEV_MODE,
TDFX_NAME);
dev->name = TDFX_NAME;
drm_mem_init();
@ -287,32 +345,28 @@ tdfx_init(void *arg)
TDFX_MINOR,
TDFX_PATCHLEVEL,
TDFX_DATE,
0 /* tdfx_misc.minor */);
device_get_unit(nbdev));
return 0;
}
SYSINIT(tdfx_init, SI_SUB_DRIVERS, SI_ORDER_ANY, tdfx_init, 0);
/* tdfx_cleanup is called via SYSUNINIT at module unload time. */
/* tdfx_cleanup is called via tdfx_detach at module unload time. */
static void
tdfx_cleanup(void *arg)
tdfx_cleanup(device_t nbdev)
{
drm_device_t *dev = &tdfx_device;
drm_device_t *dev = device_get_softc(nbdev);
DRM_DEBUG("\n");
drm_sysctl_cleanup(dev);
destroy_dev(dev->device);
destroy_dev(dev->devnode);
DRM_INFO("Module unloaded\n");
tdfx_takedown(dev);
}
SYSUNINIT(tdfx_init, SI_SUB_DRIVERS, SI_ORDER_ANY, tdfx_cleanup, 0);
static int
tdfx_version(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
@ -345,19 +399,24 @@ tdfx_version(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
static int
tdfx_open(dev_t kdev, int flags, int fmt, struct proc *p)
{
drm_device_t *dev = &tdfx_device;
drm_device_t *dev = TDFX_SOFTC(minor(kdev));
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
device_busy(dev->device);
if (!(retcode = drm_open_helper(kdev, flags, fmt, p, dev))) {
atomic_inc(&dev->total_open);
simple_lock(&dev->count_lock);
if (!dev->open_count++) {
simple_unlock(&dev->count_lock);
return tdfx_setup(dev);
retcode = tdfx_setup(dev);
}
simple_unlock(&dev->count_lock);
}
if (retcode)
device_unbusy(dev->device);
return retcode;
}
@ -380,6 +439,7 @@ tdfx_close(dev_t kdev, int flags, int fmt, struct proc *p)
return EBUSY;
}
simple_unlock(&dev->count_lock);
device_unbusy(dev->device);
return tdfx_takedown(dev);
}
simple_unlock(&dev->count_lock);

View file

@ -1,624 +0,0 @@
/* tdfx.c -- tdfx driver -*- c -*-
* Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
* Revised: Tue Oct 12 08:51:35 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* $PI$
* $XFree86$
*
*/
#include "drmP.h"
#include "tdfx_drv.h"
#define TDFX_NAME "tdfx"
#define TDFX_DESC "tdfx"
#define TDFX_DATE "19991009"
#define TDFX_MAJOR 0
#define TDFX_MINOR 0
#define TDFX_PATCHLEVEL 1
static drm_device_t tdfx_device;
drm_ctx_t tdfx_res_ctx;
#define CDEV_MAJOR 201
/* tdfx_drv.c */
static d_open_t tdfx_open;
static d_close_t tdfx_close;
static d_ioctl_t tdfx_version;
static d_ioctl_t tdfx_ioctl;
static d_ioctl_t tdfx_lock;
static d_ioctl_t tdfx_unlock;
static struct cdevsw tdfx_cdevsw = {
/* open */ tdfx_open,
/* close */ tdfx_close,
/* read */ drm_read,
/* write */ drm_write,
/* ioctl */ tdfx_ioctl,
/* poll */ drm_poll,
/* mmap */ drm_mmap,
/* strategy */ nostrategy,
/* name */ "tdfx",
/* maj */ CDEV_MAJOR,
/* dump */ nodump,
/* psize */ nopsize,
/* flags */ D_TTY | D_TRACKCLOSE,
/* bmaj */ -1
};
static drm_ioctl_desc_t tdfx_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { tdfx_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { tdfx_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { tdfx_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { tdfx_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { tdfx_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { tdfx_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { tdfx_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { tdfx_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { tdfx_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
};
#define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
static int
tdfx_setup(drm_device_t *dev)
{
int i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
dev->lock.lock_queue = 0;
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
callout_init(&dev->timer);
dev->context_wait = 0;
timespecclear(&dev->ctx_start);
timespecclear(&dev->lck_start);
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
bzero(&dev->buf_sel, sizeof dev->buf_sel);
dev->buf_sigio = NULL;
dev->buf_readers = 0;
dev->buf_writers = 0;
dev->buf_selecting = 0;
tdfx_res_ctx.handle=-1;
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int
tdfx_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
callout_stop(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
#if 0
wake_up_interruptible(&dev->lock.lock_queue);
#endif
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
return 0;
}
/* tdfx_init is called via SYSINIT at module load time, */
static int
tdfx_init(void *arg)
{
drm_device_t *dev = &tdfx_device;
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
simple_lock_init(&dev->count_lock);
lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
#if 0
drm_parse_options(tdfx);
#endif
dev->device = make_dev(&tdfx_cdevsw,
/* gamma_misc.minor */ 0,
DRM_DEV_UID,
DRM_DEV_GID,
DRM_DEV_MODE,
TDFX_NAME);
dev->name = TDFX_NAME;
drm_mem_init();
drm_sysctl_init(dev);
TAILQ_INIT(&dev->files);
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
TDFX_NAME,
TDFX_MAJOR,
TDFX_MINOR,
TDFX_PATCHLEVEL,
TDFX_DATE,
0 /* tdfx_misc.minor */);
return 0;
}
SYSINIT(tdfx_init, SI_SUB_DRIVERS, SI_ORDER_ANY, tdfx_init, 0);
/* tdfx_cleanup is called via SYSUNINIT at module unload time. */
static void
tdfx_cleanup(void *arg)
{
drm_device_t *dev = &tdfx_device;
DRM_DEBUG("\n");
drm_sysctl_cleanup(dev);
destroy_dev(dev->device);
DRM_INFO("Module unloaded\n");
tdfx_takedown(dev);
}
SYSUNINIT(tdfx_init, SI_SUB_DRIVERS, SI_ORDER_ANY, tdfx_cleanup, 0);
static int
tdfx_version(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_version_t version;
int len;
version = *(drm_version_t *) data;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
int error = copyout(value, name, len); \
if (error) return error; \
}
version.version_major = TDFX_MAJOR;
version.version_minor = TDFX_MINOR;
version.version_patchlevel = TDFX_PATCHLEVEL;
DRM_COPY(version.name, TDFX_NAME);
DRM_COPY(version.date, TDFX_DATE);
DRM_COPY(version.desc, TDFX_DESC);
*(drm_version_t *) data = version;
return 0;
}
static int
tdfx_open(dev_t kdev, int flags, int fmt, struct proc *p)
{
drm_device_t *dev = &tdfx_device;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_open_helper(kdev, flags, fmt, p, dev))) {
atomic_inc(&dev->total_open);
simple_lock(&dev->count_lock);
if (!dev->open_count++) {
simple_unlock(&dev->count_lock);
return tdfx_setup(dev);
}
simple_unlock(&dev->count_lock);
}
return retcode;
}
static int
tdfx_close(dev_t kdev, int flags, int fmt, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_close(kdev, flags, fmt, p))) {
atomic_inc(&dev->total_close);
simple_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
simple_unlock(&dev->count_lock);
return EBUSY;
}
simple_unlock(&dev->count_lock);
return tdfx_takedown(dev);
}
simple_unlock(&dev->count_lock);
}
return retcode;
}
/* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
static int
tdfx_ioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
int nr = DRM_IOCTL_NR(cmd);
drm_device_t *dev = kdev->si_drv1;
drm_file_t *priv;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
d_ioctl_t *func;
DRM_DEBUG("dev=%p\n", dev);
priv = drm_find_file_by_proc(dev, p);
if (!priv) {
DRM_DEBUG("can't find authenticator\n");
return EINVAL;
}
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02lx, nr = 0x%02x, auth = %d\n",
p->p_pid, cmd, nr, priv->authenticated);
switch (cmd) {
case FIONBIO:
atomic_dec(&dev->ioctl_count);
return 0;
case FIOASYNC:
atomic_dec(&dev->ioctl_count);
dev->flags |= FASYNC;
return 0;
case FIOSETOWN:
atomic_dec(&dev->ioctl_count);
return fsetown(*(int *)data, &dev->buf_sigio);
case FIOGETOWN:
atomic_dec(&dev->ioctl_count);
*(int *) data = fgetown(dev->buf_sigio);
return 0;
}
if (nr >= TDFX_IOCTL_COUNT) {
retcode = EINVAL;
} else {
ioctl = &tdfx_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = EINVAL;
} else if ((ioctl->root_only && suser(p))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = EACCES;
} else {
retcode = (func)(kdev, cmd, data, flags, p);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
static int
tdfx_lock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
int ret = 0;
drm_lock_t lock;
#if DRM_DMA_HISTOGRAM
getnanotime(&dev->lck_start);
#endif
lock = *(drm_lock_t *) data;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
p->p_pid, lock.context);
return EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, p->p_pid, dev->lock.hw_lock->lock,
lock.flags);
#if 0
/* dev->queue_count == 0 right now for
tdfx. FIXME? */
if (lock.context < 0 || lock.context >= dev->queue_count)
return EINVAL;
#endif
if (!ret) {
#if 0
if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
!= lock.context) {
long j = ticks - dev->lock.lock_time;
if (lock.context == tdfx_res_ctx.handle &&
j >= 0 && j < DRM_LOCK_SLICE) {
/* Can't take lock if we just had it and
there is contention. */
DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d ticks=%d\n",
lock.context, p->p_pid, j,
dev->lock.lock_time, ticks);
ret = tsleep(&never, PZERO|PCATCH, "drmlk1",
DRM_LOCK_SLICE - j);
if (ret)
return ret;
DRM_DEBUG("ticks=%d\n", ticks);
}
}
#endif
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
lock.context)) {
dev->lock.pid = p->p_pid;
dev->lock.lock_time = ticks;
atomic_inc(&dev->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
ret = tsleep(&dev->lock.lock_queue,
PZERO|PCATCH,
"drmlk2",
0);
}
}
#if 0
if (!ret && dev->last_context != lock.context &&
lock.context != tdfx_res_ctx.handle &&
dev->last_context != tdfx_res_ctx.handle) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != lock.context */
tdfx_context_switch(dev, dev->last_context, lock.context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == lock.context
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
current->policy |= SCHED_YIELD;
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
ret = EINTR;
} else if (dev->last_context != lock.context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context, lock.context);
}
}
#endif
if (!ret) {
if (lock.flags & _DRM_LOCK_READY) {
/* Wait for space in DMA/FIFO */
}
if (lock.flags & _DRM_LOCK_QUIESCENT) {
/* Make hardware quiescent */
#if 0
tdfx_quiescent(dev);
#endif
}
}
#if 0
DRM_ERROR("pid = %5d, old counter = %5ld\n",
p->p_pid, current->counter);
#endif
#if 0
while (current->counter > 25)
current->counter >>= 1; /* decrease time slice */
DRM_ERROR("pid = %5d, new counter = %5ld\n",
p->p_pid, current->counter);
#endif
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
#if DRM_DMA_HISTOGRAM
{
struct timespec ts;
getnanotime(&ts);
timespecsub(&ts, &dev->lck_start);
atomic_inc(&dev->histo.lhld[drm_histogram_slot(&ts)]);
}
#endif
return ret;
}
static int
tdfx_unlock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_lock_t lock;
lock = *(drm_lock_t *) data;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
p->p_pid, lock.context);
return EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
/* FIXME: Try to send data to card here */
if (!dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
return 0;
}