- Implemented vertex buffer management (kernel and 3d driver)

- Fixed locking problem with VB flushes
- Added fallback code for when VBs are disabled
- General kernel and 3d driver code cleanup
- Added swap buffer throttling to keep from getting too many frames ahead
- Fixed engine reset infinite loop in X server
- Added VB size parameter
- Added timeout parameter to allow the timeout to be tuned
- Fixed CCE timeout/lock ups in kernel driver
- Fixed ring reset in kernel driver
- Fixed read-only mmap problem (Rik Faith)
- Added support for multiple aux cliprects (Gareth Hughes)
- Added fog table, dither, color mask support (Gareth Hughes)
- Updated span/depth SW fallbacks (Gareth Hughes)
- Added HOSTDATA_BLT texture uploads (Gareth Hughes)
- Added support for other texture formats (Gareth Hughes)
This commit is contained in:
Kevin E Martin 2000-04-15 08:06:36 +00:00
parent 49b16eebb7
commit 782b476dd3
13 changed files with 958 additions and 283 deletions

View file

@ -1,6 +1,6 @@
/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Sun Feb 13 23:34:30 2000 by kevin@precisioninsight.com
* Revised: Thu Apr 13 10:32:41 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
@ -228,8 +228,8 @@ typedef struct drm_magic_entry {
} drm_magic_entry_t;
typedef struct drm_magic_head {
struct drm_magic_entry *head;
struct drm_magic_entry *tail;
struct drm_magic_entry *head;
struct drm_magic_entry *tail;
} drm_magic_head_t;
typedef struct drm_vma_entry {
@ -262,16 +262,15 @@ typedef struct drm_buf {
DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */
void *dev_private;
int dev_priv_size;
#if DRM_DMA_HISTOGRAM
cycles_t time_queued; /* Queued to kernel DMA queue */
cycles_t time_dispatched; /* Dispatched to hardware */
cycles_t time_completed; /* Completed by hardware */
cycles_t time_freed; /* Back on freelist */
#endif
int dev_priv_size; /* Size of buffer private stoarge */
void *dev_private; /* Per-buffer private storage */
} drm_buf_t;
#if DRM_DMA_HISTOGRAM

View file

@ -38,10 +38,10 @@ EXPORT_SYMBOL(r128_cleanup);
#define R128_NAME "r128"
#define R128_DESC "r128"
#define R128_DATE "20000405"
#define R128_DATE "20000415"
#define R128_MAJOR 0
#define R128_MINOR 0
#define R128_PATCHLEVEL 2
#define R128_PATCHLEVEL 4
static drm_device_t r128_device;
drm_ctx_t r128_res_ctx;
@ -74,6 +74,11 @@ static drm_ioctl_desc_t r128_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 },
@ -97,10 +102,11 @@ static drm_ioctl_desc_t r128_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_ENGINE_RESET)] = { r128_engine_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_SUBMIT_PACKETS)] = { r128_submit_packets, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_WAIT_FOR_IDLE)] = { r128_wait_for_idle, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_eng_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_submit_pkt, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_W4IDLE)] = { r128_wait_idle, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_vertex_buf, 1, 0 },
};
#define R128_IOCTL_COUNT DRM_ARRAY_SIZE(r128_ioctls)
@ -156,6 +162,8 @@ static int r128_setup(drm_device_t *dev)
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
drm_dma_setup(dev);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
@ -182,7 +190,6 @@ static int r128_setup(drm_device_t *dev)
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
@ -314,6 +321,9 @@ static int r128_takedown(drm_device_t *dev)
dev->map_count = 0;
}
drm_dma_takedown(dev);
dev->queue_count = 0;
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;

View file

@ -123,7 +123,7 @@ MGAHEADERS= mga_drv.h $(DRMHEADERS)
I810OBJS= i810_drv.o i810_dma.o i810_bufs.o i810_context.o
I810HEADERS= i810_drv.h $(DRMHEADERS)
R128OBJS= r128_drv.o r128_dma.o r128_context.o
R128OBJS= r128_drv.o r128_dma.o r128_bufs.o r128_context.o
R128HEADERS= r128_drv.h r128_drm.h $(DRMHEADERS)
endif

View file

@ -1,6 +1,6 @@
/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Sun Apr 9 20:33:27 2000 by kevin@precisioninsight.com
* Revised: Sat Apr 15 01:40:31 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
@ -351,9 +351,10 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44)
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_ENGINE_RESET DRM_IO( 0x41)
#define DRM_IOCTL_R128_SUBMIT_PACKETS DRM_IOW( 0x42, drm_r128_packet_t)
#define DRM_IOCTL_R128_WAIT_FOR_IDLE DRM_IO( 0x43)
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x41)
#define DRM_IOCTL_R128_W4IDLE DRM_IO( 0x42)
#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x43, drm_r128_packet_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x44, drm_r128_vertex_t)
#endif

View file

@ -1,6 +1,6 @@
/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Sun Feb 13 23:34:30 2000 by kevin@precisioninsight.com
* Revised: Thu Apr 13 10:32:41 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
@ -228,8 +228,8 @@ typedef struct drm_magic_entry {
} drm_magic_entry_t;
typedef struct drm_magic_head {
struct drm_magic_entry *head;
struct drm_magic_entry *tail;
struct drm_magic_entry *head;
struct drm_magic_entry *tail;
} drm_magic_head_t;
typedef struct drm_vma_entry {
@ -262,16 +262,15 @@ typedef struct drm_buf {
DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */
void *dev_private;
int dev_priv_size;
#if DRM_DMA_HISTOGRAM
cycles_t time_queued; /* Queued to kernel DMA queue */
cycles_t time_dispatched; /* Dispatched to hardware */
cycles_t time_completed; /* Completed by hardware */
cycles_t time_freed; /* Back on freelist */
#endif
int dev_priv_size; /* Size of buffer private stoarge */
void *dev_private; /* Per-buffer private storage */
} drm_buf_t;
#if DRM_DMA_HISTOGRAM

299
linux/r128_bufs.c Normal file
View file

@ -0,0 +1,299 @@
/* r128_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
* Created: Wed Apr 12 16:19:08 2000 by kevin@precisioninsight.com
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Kevin E. Martin <kevin@precisioninsight.com>
* Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86$
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "r128_drv.h"
#include "linux/un.h"
int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
if (!dma) return -EINVAL;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
count = request.count;
order = drm_order(request.size);
size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = dev->agp->base + request.agp_start;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %ld\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
if (dev->queue_count) return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
for (offset = 0;
entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->address = (void *)(agp_offset + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
buf->pid = 0;
buf->dev_priv_size = sizeof(drm_r128_buf_priv_t);
buf->dev_private = drm_alloc(sizeof(drm_r128_buf_priv_t),
DRM_MEM_BUFS);
memset(buf->dev_private, 0, buf->dev_priv_size);
#if DRM_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
byte_count += PAGE_SIZE << page_order;
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
DRM_DEBUG("byte_count: %d\n", byte_count);
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
up(&dev->struct_sem);
request.count = entry->buf_count;
request.size = size;
copy_to_user_ret((drm_buf_desc_t *)arg,
&request,
sizeof(request),
-EFAULT);
dma->flags = _DRM_DMA_USE_AGP;
atomic_dec(&dev->buf_alloc);
return 0;
}
int r128_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_buf_desc_t request;
copy_from_user_ret(&request,
(drm_buf_desc_t *)arg,
sizeof(request),
-EFAULT);
if (request.flags & _DRM_AGP_BUFFER)
return r128_addbufs_agp(inode, filp, cmd, arg);
else
return -EINVAL;
}
int r128_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
unsigned long address;
drm_buf_map_t request;
int i;
if (!dma) return -EINVAL;
DRM_DEBUG("\n");
spin_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
copy_from_user_ret(&request,
(drm_buf_map_t *)arg,
sizeof(request),
-EFAULT);
if (request.count >= dma->buf_count) {
if (dma->flags & _DRM_DMA_USE_AGP) {
drm_map_t *map;
map = dev->maplist[R128_AGP_VERTBUFS()];
if (!map) {
retcode = -EINVAL;
goto done;
}
down(&current->mm->mmap_sem);
virtual = do_mmap(filp, 0, map->size,
PROT_READ|PROT_WRITE,
MAP_SHARED,
(unsigned long)map->offset);
up(&current->mm->mmap_sem);
} else {
down(&current->mm->mmap_sem);
virtual = do_mmap(filp, 0, dma->byte_count,
PROT_READ|PROT_WRITE, MAP_SHARED, 0);
up(&current->mm->mmap_sem);
}
if (virtual > -1024UL) {
/* Real error */
retcode = (signed long)virtual;
goto done;
}
request.virtual = (void *)virtual;
for (i = 0; i < dma->buf_count; i++) {
if (copy_to_user(&request.list[i].idx,
&dma->buflist[i]->idx,
sizeof(request.list[0].idx))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].total,
&dma->buflist[i]->total,
sizeof(request.list[0].total))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].used,
&zero,
sizeof(zero))) {
retcode = -EFAULT;
goto done;
}
address = virtual + dma->buflist[i]->offset;
if (copy_to_user(&request.list[i].address,
&address,
sizeof(address))) {
retcode = -EFAULT;
goto done;
}
}
}
done:
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
copy_to_user_ret((drm_buf_map_t *)arg,
&request,
sizeof(request),
-EFAULT);
return retcode;
}

View file

@ -1,7 +1,7 @@
/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
* Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -23,8 +23,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Kevin E. Martin <kevin@precisioninsight.com>
* Authors: Kevin E. Martin <kevin@precisioninsight.com>
*
* $XFree86$
*
@ -35,98 +34,65 @@
#include "r128_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#define R128_MAX_VBUF_AGE 0x10000000
#define R128_VBUF_AGE_TAKEN R128_MAX_VBUF_AGE*2
#define R128_VB_AGE_REG R128_GUI_SCRATCH_REG0
#define R128_PC_NGUI_CTLSTAT 0x0184
# define R128_PC_FLUSH_ALL 0x00ff
# define R128_PC_BUSY (1 << 31)
extern int r128_do_engine_reset(drm_device_t *dev);
#define R128_CLOCK_CNTL_INDEX 0x0008
#define R128_CLOCK_CNTL_DATA 0x000c
# define R128_PLL_WR_EN (1 << 7)
#define R128_MCLK_CNTL 0x000f
# define R128_FORCE_GCP (1 << 16)
# define R128_FORCE_PIPE3D_CPP (1 << 17)
#define R128_GEN_RESET_CNTL 0x00f0
# define R128_SOFT_RESET_GUI (1 << 0)
#define R128_PM4_BUFFER_CNTL 0x0704
# define R128_PM4_NONPM4 (0 << 28)
# define R128_PM4_192PIO (1 << 28)
# define R128_PM4_192BM (2 << 28)
# define R128_PM4_128PIO_64INDBM (3 << 28)
# define R128_PM4_128BM_64INDBM (4 << 28)
# define R128_PM4_64PIO_128INDBM (5 << 28)
# define R128_PM4_64BM_128INDBM (6 << 28)
# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28)
# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28)
# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28)
#define R128_PM4_BUFFER_DL_RPTR 0x0710
#define R128_PM4_BUFFER_DL_WPTR 0x0714
# define R128_PM4_BUFFER_DL_DONE (1 << 31)
#define R128_PM4_VC_FPU_SETUP 0x071c
#define R128_PM4_STAT 0x07b8
# define R128_PM4_FIFOCNT_MASK 0x0fff
# define R128_PM4_BUSY (1 << 16)
# define R128_PM4_GUI_ACTIVE (1 << 31)
#define R128_PM4_FIFO_DATA_EVEN 0x1000
#define R128_PM4_FIFO_DATA_ODD 0x1004
#define R128_CCE_PACKET0 0x00000000
#define R128_CCE_PACKET1 0x40000000
#define R128_CCE_PACKET2 0x80000000
# define R128_CCE_PACKET_MASK 0xC0000000
# define R128_CCE_PACKET_COUNT_MASK 0x3fff0000
# define R128_CCE_PACKET0_REG_MASK 0x000007ff
# define R128_CCE_PACKET1_REG0_MASK 0x000007ff
# define R128_CCE_PACKET1_REG1_MASK 0x003ff800
#define R128_TIMEOUT 2000000
/* WARNING!!! MAGIC NUMBERS!!! The number of regions already added to
the kernel must be specified here. This must match the order the X
server uses for instantiating register regions, or must be passed in
a new ioctl. */
#define R128_SAREA() 0
#define R128_FB() 1
#define R128_AGP_RING() 2
#define R128_AGP_READ_PTR() 3
#define R128_AGP_VERTBUFS() 4
#define R128_AGP_INDIRECTBUFS() 5
#define R128_AGP_TEXTURES() 6
#define R128_REG(reg) 7
#define R128_BASE(reg) ((u32)((drm_device_t *)dev)->maplist[R128_REG(reg)]->handle)
#define R128_ADDR(reg) (R128_BASE(reg) + reg)
#define R128_DEREF(reg) *(__volatile__ int *)R128_ADDR(reg)
#define R128_READ(reg) R128_DEREF(reg)
#define R128_WRITE(reg,val) do { R128_DEREF(reg) = val; } while (0)
#define R128_DEREF8(reg) *(__volatile__ char *)R128_ADDR(reg)
#define R128_READ8(reg) R128_DEREF8(reg)
#define R128_WRITE8(reg,val) do { R128_DEREF8(reg) = val; } while (0)
#define R128_WRITE_PLL(addr,val) \
do { \
R128_WRITE8(R128_CLOCK_CNTL_INDEX, ((addr) & 0x1f) | R128_PLL_WR_EN); \
R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \
} while (0)
static int R128_READ_PLL(drm_device_t *dev, int addr)
int R128_READ_PLL(drm_device_t *dev, int addr)
{
R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
return R128_READ(R128_CLOCK_CNTL_DATA);
}
static void r128_flush_write_combine(void)
{
int xchangeDummy;
__asm__ volatile("push %%eax ;"
"xchg %%eax, %0 ;"
"pop %%eax" : : "m" (xchangeDummy));
__asm__ volatile("push %%eax ;"
"push %%ebx ;"
"push %%ecx ;"
"push %%edx ;"
"movl $0,%%eax ;"
"cpuid ;"
"pop %%edx ;"
"pop %%ecx ;"
"pop %%ebx ;"
"pop %%eax" : /* no outputs */ : /* no inputs */ );
}
static int r128_do_cleanup_cce(drm_device_t *dev)
{
#define DO_REMAPFREE(v) \
do { \
drm_map_t *_m; \
_m = dev->maplist[(v)]; \
if (_m->handle && _m->size) \
drm_ioremapfree(_m->handle, _m->size); \
} while (0)
if (dev->dev_private) {
DO_REMAPFREE(R128_AGP_RING());
DO_REMAPFREE(R128_AGP_READ_PTR());
DO_REMAPFREE(R128_AGP_VERTBUFS());
DO_REMAPFREE(R128_AGP_INDIRECTBUFS());
DO_REMAPFREE(R128_AGP_TEXTURES());
drm_free(dev->dev_private, sizeof(drm_r128_private_t),
DRM_MEM_DRIVER);
dev->dev_private = NULL;
}
return 0;
}
static int r128_do_init_cce(drm_device_t *dev, drm_r128_init_t *init)
{
drm_r128_private_t *dev_priv;
@ -138,6 +104,14 @@ static int r128_do_init_cce(drm_device_t *dev, drm_r128_init_t *init)
memset(dev_priv, 0, sizeof(drm_r128_private_t));
dev_priv->usec_timeout = init->usec_timeout;
if (dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
dev->dev_private = NULL;
return -EINVAL;
}
dev_priv->cce_mode = init->cce_mode;
dev_priv->cce_fifo_size = init->cce_fifo_size;
dev_priv->cce_is_bm_mode =
@ -167,6 +141,7 @@ static int r128_do_init_cce(drm_device_t *dev, drm_r128_init_t *init)
#endif
dev_priv->ring_size = init->ring_size;
dev_priv->ring_sizel2qw = drm_order(init->ring_size/8);
dev_priv->ring_entries = init->ring_size/sizeof(u32);
dev_priv->ring_read_ptr = (__volatile__ u32 *)
dev->maplist[R128_AGP_READ_PTR()]->handle;
@ -174,25 +149,8 @@ static int r128_do_init_cce(drm_device_t *dev, drm_r128_init_t *init)
dev_priv->ring_end = ((u32 *)dev->maplist[R128_AGP_RING()]->handle
+ dev_priv->ring_entries);
return 0;
}
static int r128_do_cleanup_cce(drm_device_t *dev)
{
#define DO_REMAPFREE(v) \
do { \
drm_map_t *_m; \
_m = dev->maplist[(v)]; \
drm_ioremapfree(_m->handle, _m->size); \
} while (0)
DO_REMAPFREE(R128_AGP_RING());
DO_REMAPFREE(R128_AGP_READ_PTR());
DO_REMAPFREE(R128_AGP_VERTBUFS());
#if 0
DO_REMAPFREE(R128_AGP_INDIRECTBUFS());
DO_REMAPFREE(R128_AGP_TEXTURES());
#endif
dev_priv->submit_age = 0;
R128_WRITE(R128_VB_AGE_REG, dev_priv->submit_age);
return 0;
}
@ -217,28 +175,74 @@ int r128_init_cce(struct inode *inode, struct file *filp,
return -EINVAL;
}
static void r128_mark_vertbufs_done(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
int i;
for (i = 0; i < dma->buf_count; i++) {
drm_buf_t *buf = dma->buflist[i];
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = 0;
}
}
static int r128_do_engine_flush(drm_device_t *dev)
{
u32 tmp;
int i;
drm_r128_private_t *dev_priv = dev->dev_private;
u32 tmp;
int i;
tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
for (i = 0; i < R128_TIMEOUT; i++)
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY))
return 0;
udelay(1);
}
return -EBUSY;
}
static int r128_do_wait_for_fifo(drm_device_t *dev, int entries)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
for (i = 0; i < dev_priv->usec_timeout; i++) {
int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
if (slots >= entries) return 0;
udelay(1);
}
(void)r128_do_engine_reset(dev);
return -EBUSY;
}
static int r128_do_wait_for_idle(drm_device_t *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int i, ret;
if (!(ret = r128_do_wait_for_fifo(dev, 64))) return ret;
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
r128_do_engine_flush(dev);
return 0;
}
udelay(1);
}
(void)r128_do_engine_reset(dev);
return -EBUSY;
}
int r128_do_engine_reset(drm_device_t *dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *)dev->dev_private;
drm_r128_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
int ret;
ret = r128_do_engine_flush(dev);
(void)r128_do_engine_flush(dev);
clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
@ -265,33 +269,49 @@ int r128_do_engine_reset(drm_device_t *dev)
dev_priv->sarea_priv->ring_write = 0;
}
/* Reset the CCE mode */
r128_do_wait_for_idle(dev);
R128_WRITE(R128_PM4_BUFFER_CNTL,
dev_priv->cce_mode | dev_priv->ring_sizel2qw);
(void)R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */
R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
r128_mark_vertbufs_done(dev);
return 0;
}
int r128_engine_reset(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
int r128_eng_reset(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
dev->lock.pid != current->pid) {
DRM_ERROR("r128_eng_reset called without holding the lock\n");
return -EINVAL;
}
return r128_do_engine_reset(dev);
}
static int r128_do_wait_for_fifo(drm_device_t *dev, int entries)
static int r128_do_cce_wait_for_fifo(drm_device_t *dev, int entries)
{
int i;
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
for (i = 0; i < R128_TIMEOUT; i++) {
for (i = 0; i < dev_priv->usec_timeout; i++) {
int slots = R128_READ(R128_PM4_STAT) & R128_PM4_FIFOCNT_MASK;
if (slots >= entries) return 0;
udelay(1);
}
(void)r128_do_engine_reset(dev);
return -EBUSY;
}
int r128_do_wait_for_idle(drm_device_t *dev)
int r128_do_cce_wait_for_idle(drm_device_t *dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *)dev->dev_private;
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
if (dev_priv->cce_is_bm_mode) {
@ -300,63 +320,61 @@ int r128_do_wait_for_idle(drm_device_t *dev)
R128_WRITE(R128_PM4_BUFFER_DL_WPTR,
tmp | R128_PM4_BUFFER_DL_DONE);
for (i = 0; i < R128_TIMEOUT; i++) {
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (*dev_priv->ring_read_ptr == dev_priv->sarea_priv->ring_write) {
int pm4stat = R128_READ(R128_PM4_STAT);
if ((pm4stat & R128_PM4_FIFOCNT_MASK) == dev_priv->cce_fifo_size &&
!(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE)))
if ((pm4stat & R128_PM4_FIFOCNT_MASK) >= dev_priv->cce_fifo_size &&
!(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) {
r128_mark_vertbufs_done(dev);
return 0;
}
}
udelay(1);
}
(void)r128_do_engine_reset(dev);
return -EBUSY;
} else {
int ret = r128_do_wait_for_fifo(dev, dev_priv->cce_fifo_size);
int ret = r128_do_cce_wait_for_fifo(dev, dev_priv->cce_fifo_size);
if (ret < 0) return ret;
for (i = 0; i < R128_TIMEOUT; i++) {
for (i = 0; i < dev_priv->usec_timeout; i++) {
int pm4stat = R128_READ(R128_PM4_STAT);
if (!(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) {
r128_mark_vertbufs_done(dev);
return r128_do_engine_flush(dev);
}
udelay(1);
}
(void)r128_do_engine_reset(dev);
return -EBUSY;
}
}
int r128_wait_for_idle(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
int r128_wait_idle(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
return r128_do_wait_for_idle(dev);
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
dev->lock.pid != current->pid) {
DRM_ERROR("r128_wait_idle called without holding the lock\n");
return -EINVAL;
}
return r128_do_cce_wait_for_idle(dev);
}
static int r128_submit_packets_ring_secure(drm_device_t *dev,
drm_r128_packet_t *packet)
u32 *commands, int count)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *)dev->dev_private;
drm_r128_private_t *dev_priv = dev->dev_private;
int write = dev_priv->sarea_priv->ring_write;
int *write_ptr = dev_priv->ring_start + write;
int count = packet->count;
int ret = 0;
int timeout;
u32 *buffer, *commands;
int size;
u32 tmp = 0;
int psize = 0;
int writing = 1;
if (count >= dev_priv->ring_entries) {
count = dev_priv->ring_entries-1;
ret = packet->count - count;
}
size = count * sizeof(u32);
if ((commands = buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
copy_from_user_ret(buffer, packet->buffer, size, -EFAULT);
int timeout;
while (count > 0) {
tmp = *commands++;
@ -402,10 +420,9 @@ static int r128_submit_packets_ring_secure(drm_device_t *dev,
timeout = 0;
while (write == *dev_priv->ring_read_ptr) {
(void)R128_READ(R128_PM4_BUFFER_DL_RPTR);
if (timeout++ >= R128_TIMEOUT) {
kfree(buffer);
if (timeout++ >= dev_priv->usec_timeout)
return r128_do_engine_reset(dev);
}
udelay(1);
}
count--;
}
@ -416,29 +433,23 @@ static int r128_submit_packets_ring_secure(drm_device_t *dev,
write * sizeof(u32));
}
/* Make sure WC cache has been flushed */
r128_flush_write_combine();
dev_priv->sarea_priv->ring_write = write;
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write);
kfree(buffer);
return ret;
return 0;
}
static int r128_submit_packets_pio_secure(drm_device_t *dev,
drm_r128_packet_t *packet)
u32 *commands, int count)
{
u32 *buffer, *commands;
int count = packet->count;
int size;
int ret;
u32 tmp = 0;
int psize = 0;
int writing = 1;
int addr = R128_PM4_FIFO_DATA_EVEN;
size = count * sizeof(u32);
if ((commands = buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
copy_from_user_ret(buffer, packet->buffer, size, -EFAULT);
int ret;
while (count > 0) {
tmp = *commands++;
@ -474,10 +485,8 @@ static int r128_submit_packets_pio_secure(drm_device_t *dev,
psize--;
if (writing) {
if ((ret = r128_do_wait_for_fifo(dev, 1)) < 0) {
kfree(buffer);
if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0)
return ret;
}
R128_WRITE(addr, tmp);
addr ^= 0x0004;
}
@ -486,53 +495,34 @@ static int r128_submit_packets_pio_secure(drm_device_t *dev,
}
if (addr == R128_PM4_FIFO_DATA_ODD) {
if ((ret = r128_do_wait_for_fifo(dev, 1)) < 0) {
kfree(buffer);
return ret;
}
if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0) return ret;
R128_WRITE(addr, R128_CCE_PACKET2);
}
kfree(buffer);
return 0;
}
static int r128_submit_packets_ring(drm_device_t *dev,
drm_r128_packet_t *packet)
u32 *commands, int count)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *)dev->dev_private;
drm_r128_private_t *dev_priv = dev->dev_private;
int write = dev_priv->sarea_priv->ring_write;
int *write_ptr = dev_priv->ring_start + write;
int count = packet->count;
int ret = 0;
int timeout;
u32 *buffer, *commands;
int size;
if (count >= dev_priv->ring_entries) {
count = dev_priv->ring_entries-1;
ret = packet->count - count;
}
size = count * sizeof(u32);
if ((commands = buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
copy_from_user_ret(buffer, packet->buffer, size, -EFAULT);
while (count > 0) {
write++;
*write_ptr++ = *commands++;
if (write >= dev_priv->ring_entries) {
write = 0;
write = 0;
write_ptr = dev_priv->ring_start;
}
timeout = 0;
while (write == *dev_priv->ring_read_ptr) {
(void)R128_READ(R128_PM4_BUFFER_DL_RPTR);
if (timeout++ >= R128_TIMEOUT) {
kfree(buffer);
if (timeout++ >= dev_priv->usec_timeout)
return r128_do_engine_reset(dev);
}
udelay(1);
}
count--;
}
@ -543,70 +533,277 @@ static int r128_submit_packets_ring(drm_device_t *dev,
write * sizeof(u32));
}
/* Make sure WC cache has been flushed */
r128_flush_write_combine();
dev_priv->sarea_priv->ring_write = write;
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write);
kfree(buffer);
return ret;
return 0;
}
static int r128_submit_packets_pio(drm_device_t *dev,
drm_r128_packet_t *packet)
u32 *commands, int count)
{
u32 *buffer, *commands;
int count = packet->count;
int size;
int ret;
size = count * sizeof(u32);
if ((commands = buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
copy_from_user_ret(buffer, packet->buffer, size, -EFAULT);
while (count > 1) {
if ((ret = r128_do_wait_for_fifo(dev, 2)) < 0) {
kfree(buffer);
return ret;
}
if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret;
R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++);
R128_WRITE(R128_PM4_FIFO_DATA_ODD, *commands++);
count -= 2;
}
if (count) {
if ((ret = r128_do_wait_for_fifo(dev, 2)) < 0) {
kfree(buffer);
return ret;
}
if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret;
R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++);
R128_WRITE(R128_PM4_FIFO_DATA_ODD, R128_CCE_PACKET2);
}
kfree(buffer);
return 0;
}
int r128_submit_packets(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
static int r128_do_submit_packets(drm_device_t *dev, u32 *buffer, int count)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int ret;
if (dev_priv->cce_is_bm_mode) {
if (dev_priv->cce_secure)
ret = r128_submit_packets_ring_secure(dev, buffer, count);
else
ret = r128_submit_packets_ring(dev, buffer, count);
} else {
if (dev_priv->cce_secure)
ret = r128_submit_packets_pio_secure(dev,
buffer, count);
else
ret = r128_submit_packets_pio(dev, buffer, count);
}
return ret;
}
int r128_submit_pkt(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_r128_private_t *dev_priv = (drm_r128_private_t *)dev->dev_private;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_packet_t packet;
u32 *buffer;
int count;
int size;
int ret = 0;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
dev->lock.pid != current->pid) {
DRM_ERROR("r128_submit_pkt called without holding the lock\n");
return -EINVAL;
}
copy_from_user_ret(&packet, (drm_r128_packet_t *)arg, sizeof(packet),
-EFAULT);
if (dev_priv->cce_secure) {
if (dev_priv->cce_is_bm_mode)
return r128_submit_packets_ring_secure(dev, &packet);
count = packet.count;
size = count * sizeof(u32);
if (dev_priv->cce_is_bm_mode) {
int left = 0;
if (count >= dev_priv->ring_entries) {
count = dev_priv->ring_entries-1;
left = packet.count - count;
}
if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
copy_from_user_ret(buffer, packet.buffer, size, -EFAULT);
if (dev_priv->cce_secure)
ret = r128_submit_packets_ring_secure(dev,
buffer, count);
else
return r128_submit_packets_pio_secure(dev, &packet);
ret = r128_submit_packets_ring(dev, buffer, count);
if (!ret) ret = left;
} else {
if (dev_priv->cce_is_bm_mode)
return r128_submit_packets_ring(dev, &packet);
if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
copy_from_user_ret(buffer, packet.buffer, size, -EFAULT);
if (dev_priv->cce_secure)
ret = r128_submit_packets_pio_secure(dev,
buffer, count);
else
return r128_submit_packets_pio(dev, &packet);
ret = r128_submit_packets_pio(dev, buffer, count);
}
kfree(buffer);
return ret;
}
static int r128_send_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v)
{
drm_device_dma_t *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv;
drm_buf_t *buf;
int i;
u32 cce[2];
if (++dev_priv->submit_age == R128_MAX_VBUF_AGE) {
dev_priv->submit_age = 0;
r128_do_cce_wait_for_idle(dev);
}
for (i = 0; i < v->send_count; i++) {
int idx = v->send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
continue;
}
buf = dma->buflist[idx];
if (buf->pid != current->pid) {
DRM_ERROR("Process %d using buffer owned by %d\n",
current->pid, buf->pid);
return -EINVAL;
}
if (buf->pending) {
DRM_ERROR("Sending pending buffer:"
" buffer %d, offset %d\n",
v->send_indices[i], i);
return -EINVAL;
}
buf->pending = 1;
/* FIXME: Add support for sending vertex buffer to the
CCE here instead of in client code. The v->prim
holds the primitive type that should be drawn.
This will require us to loop over the clip rects here
as well, which implies that we extend the kernel
driver to allow cliprects to be stored here. Note
that the cliprects could possibly come from the X
server instead of the client, but this will require
additional changes to the DRI to allow for this
optimization. */
buf_priv = buf->dev_private;
buf_priv->age = dev_priv->submit_age;
}
/* Make sure WC cache has been flushed (if in PIO mode) */
if (!dev_priv->cce_is_bm_mode) r128_flush_write_combine();
/* Submit a CCE packet that writes submit_age to R128_VB_AGE_REG */
cce[0] = R128CCE0(R128_CCE_PACKET0, R128_VB_AGE_REG, 0);
cce[1] = dev_priv->submit_age;
r128_do_submit_packets(dev, cce, 2);
return 0;
}
static drm_buf_t *r128_freelist_get(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
drm_r128_buf_priv_t *buf_priv;
drm_buf_t *buf;
int i, t;
/* FIXME: Optimize -- use freelist code */
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
if (buf->pid == 0) return buf;
}
for (t = 0; t < 5; t++) { /* FIXME: Arbitrary */
u32 done_age = R128_READ(R128_VB_AGE_REG);
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
if (buf->pending && buf_priv->age <= done_age) {
/* The buffer has been processed, so it
can now be used */
buf->pending = 0;
return buf;
}
}
}
return NULL;
}
static int r128_get_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v)
{
drm_buf_t *buf;
int i;
for (i = v->granted_count; i < v->request_count; i++) {
buf = r128_freelist_get(dev);
if (!buf) break;
buf->pid = current->pid;
copy_to_user_ret(&v->request_indices[i],
&buf->idx,
sizeof(buf->idx),
-EFAULT);
copy_to_user_ret(&v->request_sizes[i],
&buf->total,
sizeof(buf->total),
-EFAULT);
++v->granted_count;
}
return 0;
}
int r128_vertex_buf(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
drm_r128_vertex_t v;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
dev->lock.pid != current->pid) {
DRM_ERROR("r128_vertex_buf called without holding the lock\n");
return -EINVAL;
}
copy_from_user_ret(&v, (drm_r128_vertex_t *)arg, sizeof(v), -EFAULT);
DRM_DEBUG("%d: %d send, %d req\n",
current->pid, v.send_count, v.request_count);
if (v.send_count < 0 || v.send_count > dma->buf_count) {
DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
current->pid, v.send_count, dma->buf_count);
return -EINVAL;
}
if (v.request_count < 0 || v.request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
current->pid, v.request_count, dma->buf_count);
return -EINVAL;
}
if (v.send_count) {
retcode = r128_send_vertbufs(dev, &v);
}
v.granted_count = 0;
if (!retcode && v.request_count) {
retcode = r128_get_vertbufs(dev, &v);
}
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, v.granted_count);
copy_to_user_ret((drm_r128_vertex_t *)arg, &v, sizeof(v), -EFAULT);
return retcode;
}

View file

@ -35,15 +35,16 @@
* defines in the Xserver file (xf86drmR128.h)
*/
typedef struct drm_r128_init {
enum {
R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02
enum {
R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02
} func;
int sarea_priv_offset;
int cce_mode;
int cce_fifo_size;
int cce_secure;
int ring_size;
int usec_timeout;
} drm_r128_init_t;
typedef struct drm_r128_packet {
@ -52,6 +53,30 @@ typedef struct drm_r128_packet {
int flags;
} drm_r128_packet_t;
typedef enum drm_r128_prim {
_DRM_R128_PRIM_NONE = 0x0001,
_DRM_R128_PRIM_POINT = 0x0002,
_DRM_R128_PRIM_LINE = 0x0004,
_DRM_R128_PRIM_POLY_LINE = 0x0008,
_DRM_R128_PRIM_TRI_LIST = 0x0010,
_DRM_R128_PRIM_TRI_FAN = 0x0020,
_DRM_R128_PRIM_TRI_STRIP = 0x0040,
_DRM_R128_PRIM_TRI_TYPE2 = 0x0080
} drm_r128_prim_t;
typedef struct drm_r128_vertex {
/* Indices here refer to the offset into
buflist in drm_buf_get_t. */
int send_count; /* Number of buffers to send */
int *send_indices; /* List of handles to buffers */
int *send_sizes; /* Lengths of data to send */
drm_r128_prim_t prim; /* Primitive type */
int request_count; /* Number of buffers requested */
int *request_indices; /* Buffer information */
int *request_sizes;
int granted_count; /* Number of buffers granted */
} drm_r128_vertex_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (r128_sarea.h)
*/

View file

@ -38,10 +38,10 @@ EXPORT_SYMBOL(r128_cleanup);
#define R128_NAME "r128"
#define R128_DESC "r128"
#define R128_DATE "20000405"
#define R128_DATE "20000415"
#define R128_MAJOR 0
#define R128_MINOR 0
#define R128_PATCHLEVEL 2
#define R128_PATCHLEVEL 4
static drm_device_t r128_device;
drm_ctx_t r128_res_ctx;
@ -74,6 +74,11 @@ static drm_ioctl_desc_t r128_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 },
@ -97,10 +102,11 @@ static drm_ioctl_desc_t r128_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_ENGINE_RESET)] = { r128_engine_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_SUBMIT_PACKETS)] = { r128_submit_packets, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_WAIT_FOR_IDLE)] = { r128_wait_for_idle, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_eng_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_submit_pkt, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_W4IDLE)] = { r128_wait_idle, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_vertex_buf, 1, 0 },
};
#define R128_IOCTL_COUNT DRM_ARRAY_SIZE(r128_ioctls)
@ -156,6 +162,8 @@ static int r128_setup(drm_device_t *dev)
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
drm_dma_setup(dev);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
@ -182,7 +190,6 @@ static int r128_setup(drm_device_t *dev)
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
@ -314,6 +321,9 @@ static int r128_takedown(drm_device_t *dev)
dev->map_count = 0;
}
drm_dma_takedown(dev);
dev->queue_count = 0;
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;

View file

@ -46,8 +46,18 @@ typedef struct drm_r128_private {
u32 *ring_start;
u32 *ring_end;
int ring_size;
int ring_sizel2qw;
int ring_entries;
int submit_age;
int usec_timeout;
} drm_r128_private_t;
typedef struct drm_r128_buf_priv {
u32 age;
} drm_r128_buf_priv_t;
/* r128_drv.c */
extern int r128_init(void);
extern void r128_cleanup(void);
@ -65,12 +75,20 @@ extern int r128_unlock(struct inode *inode, struct file *filp,
/* r128_dma.c */
extern int r128_init_cce(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_engine_reset(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_submit_packets(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_wait_for_idle(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_eng_reset(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_submit_pkt(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_wait_idle(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_vertex_buf(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* r128_bufs.c */
extern int r128_addbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_mapbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* r128_context.c */
extern int r128_resctx(struct inode *inode, struct file *filp,
@ -90,4 +108,119 @@ extern int r128_rmctx(struct inode *inode, struct file *filp,
extern int r128_context_switch(drm_device_t *dev, int old, int new);
extern int r128_context_switch_complete(drm_device_t *dev, int new);
/* Register definitions, register access macros and drmAddMap constants
* for Rage 128 kernel driver.
*/
#define R128_PC_NGUI_CTLSTAT 0x0184
# define R128_PC_FLUSH_ALL 0x00ff
# define R128_PC_BUSY (1 << 31)
#define R128_CLOCK_CNTL_INDEX 0x0008
#define R128_CLOCK_CNTL_DATA 0x000c
# define R128_PLL_WR_EN (1 << 7)
#define R128_MCLK_CNTL 0x000f
# define R128_FORCE_GCP (1 << 16)
# define R128_FORCE_PIPE3D_CPP (1 << 17)
#define R128_GEN_RESET_CNTL 0x00f0
# define R128_SOFT_RESET_GUI (1 << 0)
#define R128_PM4_BUFFER_CNTL 0x0704
# define R128_PM4_NONPM4 (0 << 28)
# define R128_PM4_192PIO (1 << 28)
# define R128_PM4_192BM (2 << 28)
# define R128_PM4_128PIO_64INDBM (3 << 28)
# define R128_PM4_128BM_64INDBM (4 << 28)
# define R128_PM4_64PIO_128INDBM (5 << 28)
# define R128_PM4_64BM_128INDBM (6 << 28)
# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28)
# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28)
# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28)
#define R128_PM4_BUFFER_DL_RPTR 0x0710
#define R128_PM4_BUFFER_DL_WPTR 0x0714
# define R128_PM4_BUFFER_DL_DONE (1 << 31)
#define R128_PM4_VC_FPU_SETUP 0x071c
#define R128_PM4_STAT 0x07b8
# define R128_PM4_FIFOCNT_MASK 0x0fff
# define R128_PM4_BUSY (1 << 16)
# define R128_PM4_GUI_ACTIVE (1 << 31)
#define R128_PM4_BUFFER_ADDR 0x07f0
#define R128_PM4_MICRO_CNTL 0x07fc
# define R128_PM4_MICRO_FREERUN (1 << 30)
#define R128_PM4_FIFO_DATA_EVEN 0x1000
#define R128_PM4_FIFO_DATA_ODD 0x1004
#define R128_GUI_SCRATCH_REG0 0x15e0
#define R128_GUI_SCRATCH_REG1 0x15e4
#define R128_GUI_SCRATCH_REG2 0x15e8
#define R128_GUI_SCRATCH_REG3 0x15ec
#define R128_GUI_SCRATCH_REG4 0x15f0
#define R128_GUI_SCRATCH_REG5 0x15f4
#define R128_GUI_STAT 0x1740
# define R128_GUI_FIFOCNT_MASK 0x0fff
# define R128_GUI_ACTIVE (1 << 31)
/* CCE command packets */
#define R128_CCE_PACKET0 0x00000000
#define R128_CCE_PACKET1 0x40000000
#define R128_CCE_PACKET2 0x80000000
# define R128_CCE_PACKET_MASK 0xC0000000
# define R128_CCE_PACKET_COUNT_MASK 0x3fff0000
# define R128_CCE_PACKET0_REG_MASK 0x000007ff
# define R128_CCE_PACKET1_REG0_MASK 0x000007ff
# define R128_CCE_PACKET1_REG1_MASK 0x003ff800
#define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */
/* WARNING!!! MAGIC NUMBERS!!! The number of regions already added to
the kernel must be specified here. This must match the order the X
server uses for instantiating register regions, or must be passed in
a new ioctl. */
#define R128_SAREA() 0
#define R128_FB() 1
#define R128_AGP_RING() 2
#define R128_AGP_READ_PTR() 3
#define R128_AGP_VERTBUFS() 4
#define R128_AGP_INDIRECTBUFS() 5
#define R128_AGP_TEXTURES() 6
#define R128_REG(reg) 7
#define R128_BASE(reg) \
((u32)((drm_device_t *)dev)->maplist[R128_REG(reg)]->handle)
#define R128_ADDR(reg) (R128_BASE(reg) + reg)
#define R128_DEREF(reg) *(__volatile__ int *)R128_ADDR(reg)
#define R128_READ(reg) R128_DEREF(reg)
#define R128_WRITE(reg,val) do { R128_DEREF(reg) = val; } while (0)
#define R128_DEREF8(reg) *(__volatile__ char *)R128_ADDR(reg)
#define R128_READ8(reg) R128_DEREF8(reg)
#define R128_WRITE8(reg,val) do { R128_DEREF8(reg) = val; } while (0)
#define R128_WRITE_PLL(addr,val) \
do { \
R128_WRITE8(R128_CLOCK_CNTL_INDEX, ((addr) & 0x1f) | R128_PLL_WR_EN); \
R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \
} while (0)
extern int R128_READ_PLL(drm_device_t *dev, int addr);
#define R128CCE0(p,r,n) ((p) | ((n) << 16) | ((r) >> 2))
#define R128CCE1(p,r1,r2) ((p) | (((r2) >> 2) << 11) | ((r1) >> 2))
#define R128CCE2(p) ((p))
#define R128CCE3(p,n) ((p) | ((n) << 16))
#endif

View file

@ -1,6 +1,6 @@
/* vm.c -- Memory mapping for DRM -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
* Revised: Tue Apr 11 01:55:12 2000 by kevin@precisioninsight.com
* Revised: Sat Apr 15 00:12:30 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
@ -246,6 +246,18 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
/* Check for valid size. */
if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
vma->vm_flags &= VM_MAYWRITE;
#if defined(__i386__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
/* Ye gads this is ugly. With more thought
we could move this up higher and use
`protection_map' instead. */
vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
__pte(pgprot_val(vma->vm_page_prot)))));
#endif
}
switch (map->type) {
case _DRM_FRAME_BUFFER:
@ -281,19 +293,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL; /* This should never happen. */
}
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
if (map->flags & _DRM_READ_ONLY) {
#if defined(__i386__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
/* Ye gads this is ugly. With more thought
we could move this up higher and use
`protection_map' instead. */
vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
__pte(pgprot_val(vma->vm_page_prot)))));
#endif
}
#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
/* In Linux 2.2.3 and above, this is
handled in do_mmap() in mm/mmap.c. */

View file

@ -1,6 +1,6 @@
/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Sun Apr 9 20:33:27 2000 by kevin@precisioninsight.com
* Revised: Sat Apr 15 01:40:31 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
@ -351,9 +351,10 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44)
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_ENGINE_RESET DRM_IO( 0x41)
#define DRM_IOCTL_R128_SUBMIT_PACKETS DRM_IOW( 0x42, drm_r128_packet_t)
#define DRM_IOCTL_R128_WAIT_FOR_IDLE DRM_IO( 0x43)
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x41)
#define DRM_IOCTL_R128_W4IDLE DRM_IO( 0x42)
#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x43, drm_r128_packet_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x44, drm_r128_vertex_t)
#endif

View file

@ -1,6 +1,6 @@
/* drm.h -- Header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
* Revised: Sun Apr 9 20:33:27 2000 by kevin@precisioninsight.com
* Revised: Sat Apr 15 01:40:31 2000 by kevin@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
@ -351,9 +351,10 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_I810_GETAGE DRM_IO ( 0x44)
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_ENGINE_RESET DRM_IO( 0x41)
#define DRM_IOCTL_R128_SUBMIT_PACKETS DRM_IOW( 0x42, drm_r128_packet_t)
#define DRM_IOCTL_R128_WAIT_FOR_IDLE DRM_IO( 0x43)
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x41)
#define DRM_IOCTL_R128_W4IDLE DRM_IO( 0x42)
#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x43, drm_r128_packet_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x44, drm_r128_vertex_t)
#endif