This check-in contains three large chunks of code:

1. Radeon DRI support
- Add Radeon server-side DRI support
- Add Radeon kernel module support
2. Latest Rage 128 code
- Merge ati-4-1-1-branch code (for Rage 128)
- Bring Radeon server-side code up to date with the Rage 128 code
3. Misc bug fixes
- Fix initial DRI lock problem
- Fix static server build problem
Notes:
1. No Radeon 3D code has been checked in. It is still under development and
    is not yet ready for testing.
2. The X server locks up when doing lots of screen-to-screen copies (e.g.,
    during "x11perf -copywinwin500"), so it is recommended that
    non-developers continue to use the trunk code until this bug is
    resolved.
This commit is contained in:
Kevin E Martin 2000-11-13 23:35:02 +00:00
parent 94071289a4
commit 1aa2f345c7
24 changed files with 8858 additions and 1377 deletions

View file

@ -6,7 +6,7 @@
# drm.o is a fake target -- it is never built
# The real targets are in the module-list
O_TARGET := drm.o
module-list := gamma.o tdfx.o r128.o ffb.o mga.o i810.o
module-list := gamma.o tdfx.o r128.o radeon.o ffb.o mga.o i810.o
export-objs := $(patsubst %.o,%_drv.o,$(module-list))
# libs-objs are included in every module so that radical changes to the
@ -36,18 +36,23 @@ else
endif
endif
gamma-objs := $(lib-objs) gamma_drv.o gamma_dma.o
tdfx-objs := $(lib-objs) tdfx_drv.o tdfx_context.o
r128-objs := $(lib-objs) r128_drv.o r128_dma.o r128_context.o r128_bufs.o
ffb-objs := $(lib-objs) ffb_drv.o ffb_context.o
mga-objs := $(lib-objs) mga_drv.o mga_dma.o mga_context.o mga_bufs.o \
mga_state.o
i810-objs := $(lib-objs) i810_drv.o i810_dma.o i810_context.o i810_bufs.o
gamma-objs := $(lib-objs) gamma_drv.o gamma_dma.o
tdfx-objs := $(lib-objs) tdfx_drv.o tdfx_context.o
r128-objs := $(lib-objs) r128_drv.o r128_cce.o r128_context.o \
r128_bufs.o r128_state.o
radeon-objs := $(lib-objs) radeon_drv.o radeon_cp.o radeon_context.o \
radeon_bufs.o radeon_state.o
ffb-objs := $(lib-objs) ffb_drv.o ffb_context.o
mga-objs := $(lib-objs) mga_drv.o mga_dma.o mga_context.o \
mga_bufs.o mga_state.o
i810-objs := $(lib-objs) i810_drv.o i810_dma.o i810_context.o \
i810_bufs.o
obj-$(CONFIG_DRM_GAMMA) += gamma.o $(gamma-objs)
obj-$(CONFIG_DRM_TDFX) += tdfx.o $(tdfx-objs)
obj-$(CONFIG_DRM_R128) += r128.o $(r128-objs)
obj-$(CONFIG_DRM_FFB) += ffb.o $(ffb-objs)
obj-$(CONFIG_DRM_GAMMA) += gamma.o $(gamma-objs)
obj-$(CONFIG_DRM_TDFX) += tdfx.o $(tdfx-objs)
obj-$(CONFIG_DRM_R128) += r128.o $(r128-objs)
obj-$(CONFIG_DRM_RADEON) += radeon.o $(radeon-objs)
obj-$(CONFIG_DRM_FFB) += ffb.o $(ffb-objs)
ifneq ($CONFIG_AGP),)
obj-$(CONFIG_DRM_MGA) += mga.o $(mga-objs)
@ -84,5 +89,8 @@ i810.o: $(i810-objs)
r128.o: $(r128-objs)
$(LD) -r -o $@ $(r128-objs)
radeon.o: $(radeon-objs)
$(LD) -r -o $@ $(radeon-objs)
ffb.o: $(ffb-objs)
$(LD) -r -o $@ $(ffb-objs)

View file

@ -24,8 +24,10 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com>
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
@ -33,15 +35,15 @@
#include "drmP.h"
#include "r128_drv.h"
#define R128_NAME "r128"
#define R128_DESC "ATI Rage 128"
#define R128_DATE "20000928"
#define R128_MAJOR 1
#define R128_MINOR 0
#define R128_PATCHLEVEL 0
#define R128_NAME "r128"
#define R128_DESC "ATI Rage 128"
#define R128_DATE "20001106"
#define R128_MAJOR 2
#define R128_MINOR 0
#define R128_PATCHLEVEL 0
static drm_device_t r128_device;
drm_ctx_t r128_res_ctx;
static drm_device_t r128_device;
drm_ctx_t r128_res_ctx;
static struct file_operations r128_fops = {
#if LINUX_VERSION_CODE >= 0x020400
@ -65,52 +67,59 @@ static struct miscdevice r128_misc = {
};
static drm_ioctl_desc_t r128_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
#endif
[DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_eng_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_FLUSH)] = { r128_eng_flush, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_submit_pkt, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_IDLE)] = { r128_cce_idle, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_vertex_buf, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_cce_init, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_START)] = { r128_cce_start, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_engine_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_SWAP)] = { r128_cce_swap, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_CLEAR)] = { r128_cce_clear, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_cce_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_INDICES)] = { r128_cce_indices, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_BLIT)] = { r128_cce_blit, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_cce_packet, 1, 0 },
};
#define R128_IOCTL_COUNT DRM_ARRAY_SIZE(r128_ioctls)
@ -425,13 +434,13 @@ int r128_version(struct inode *inode, struct file *filp, unsigned int cmd,
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = R128_MAJOR;
@ -507,7 +516,6 @@ int r128_release(struct inode *inode, struct file *filp)
}
/* r128_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
@ -542,6 +550,12 @@ int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
}
}
#if 0
if ( retcode ) {
DRM_INFO( "%s 0x%x ret = %d\n", __FUNCTION__, nr, retcode );
}
#endif
atomic_dec(&dev->ioctl_count);
return retcode;
}
@ -573,33 +587,10 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
lock.context, current->pid, dev->lock.hw_lock->lock,
lock.flags);
#if 0
/* dev->queue_count == 0 right now for
r128. FIXME? */
if (lock.context < 0 || lock.context >= dev->queue_count)
if (lock.context < 0)
return -EINVAL;
#endif
if (!ret) {
#if 0
if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
!= lock.context) {
long j = jiffies - dev->lock.lock_time;
if (lock.context == r128_res_ctx.handle &&
j >= 0 && j < DRM_LOCK_SLICE) {
/* Can't take lock if we just had it and
there is contention. */
DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
lock.context, current->pid, j,
dev->lock.lock_time, jiffies);
current->state = TASK_INTERRUPTIBLE;
current->policy |= SCHED_YIELD;
schedule_timeout(DRM_LOCK_SLICE-j);
DRM_DEBUG("jiffies=%d\n", jiffies);
}
}
#endif
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
@ -618,9 +609,6 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
/* Contention */
atomic_inc(&dev->total_sleeps);
#if 1
current->policy |= SCHED_YIELD;
#endif
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
@ -631,32 +619,6 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
#if 0
if (!ret && dev->last_context != lock.context &&
lock.context != r128_res_ctx.handle &&
dev->last_context != r128_res_ctx.handle) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != lock.context */
r128_context_switch(dev, dev->last_context, lock.context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == lock.context
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
current->policy |= SCHED_YIELD;
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
ret = -EINTR;
} else if (dev->last_context != lock.context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context, lock.context);
}
}
#endif
if (!ret) {
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
@ -671,6 +633,7 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
}
if (lock.flags & _DRM_LOCK_QUIESCENT) {
/* Make hardware quiescent */
DRM_DEBUG( "not quiescent!\n" );
#if 0
r128_quiescent(dev);
#endif

719
linux-core/radeon_drv.c Normal file
View file

@ -0,0 +1,719 @@
/* radeon_drv.c -- ATI Radeon driver -*- linux-c -*-
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Kevin E. Martin <martin@valinux.com>
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#include <linux/config.h>
#include "drmP.h"
#include "radeon_drv.h"
#define RADEON_NAME "radeon"
#define RADEON_DESC "ATI Radeon"
#define RADEON_DATE "20001108"
#define RADEON_MAJOR 1
#define RADEON_MINOR 0
#define RADEON_PATCHLEVEL 0
static drm_device_t radeon_device;
drm_ctx_t radeon_res_ctx;
static struct file_operations radeon_fops = {
#if LINUX_VERSION_CODE >= 0x020400
/* This started being used during 2.4.0-test */
owner: THIS_MODULE,
#endif
open: radeon_open,
flush: drm_flush,
release: radeon_release,
ioctl: radeon_ioctl,
mmap: drm_mmap,
read: drm_read,
fasync: drm_fasync,
poll: drm_poll,
};
static struct miscdevice radeon_misc = {
minor: MISC_DYNAMIC_MINOR,
name: RADEON_NAME,
fops: &radeon_fops,
};
static drm_ioctl_desc_t radeon_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { radeon_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { radeon_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { radeon_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { radeon_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { radeon_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { radeon_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { radeon_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { radeon_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { radeon_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { radeon_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { radeon_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { radeon_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
#endif
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_SWAP)] = { radeon_cp_swap, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_CLEAR)] = { radeon_cp_clear, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_VERTEX)] = { radeon_cp_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INDICES)] = { radeon_cp_indices, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_BLIT)] = { radeon_cp_blit, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_PACKET)] = { radeon_cp_packet, 1, 0 },
};
#define RADEON_IOCTL_COUNT DRM_ARRAY_SIZE(radeon_ioctls)
#ifdef MODULE
static char *radeon = NULL;
#endif
MODULE_AUTHOR("VA Linux Systems, Inc.");
MODULE_DESCRIPTION("radeon");
MODULE_PARM(radeon, "s");
#ifndef MODULE
/* radeon_options is called by the kernel to parse command-line options
* passed via the boot-loader (e.g., LILO). It calls the insmod option
* routine, drm_parse_drm.
*/
static int __init radeon_options(char *str)
{
drm_parse_options(str);
return 1;
}
__setup("radeon=", radeon_options);
#endif
static int radeon_setup(drm_device_t *dev)
{
int i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
drm_dma_setup(dev);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
init_timer(&dev->timer);
init_waitqueue_head(&dev->context_wait);
dev->ctx_start = 0;
dev->lck_start = 0;
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
dev->buf_async = NULL;
init_waitqueue_head(&dev->buf_readers);
init_waitqueue_head(&dev->buf_writers);
radeon_res_ctx.handle = -1;
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int radeon_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
down(&dev->struct_sem);
del_timer(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
/* Clear AGP information */
if (dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
/* Remove AGP resources, but leave dev->agp
intact until radeon_cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
if (entry->bound) drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
if (dev->agp->acquired && drm_agp.release)
(*drm_agp.release)();
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
#endif
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
case _DRM_AGP:
/* Do nothing here, because this is all
handled in the AGP/GART driver. */
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
drm_dma_takedown(dev);
dev->queue_count = 0;
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wake_up_interruptible(&dev->lock.lock_queue);
}
up(&dev->struct_sem);
return 0;
}
/* radeon_init is called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported). */
static int radeon_init(void)
{
int retcode;
drm_device_t *dev = &radeon_device;
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
dev->count_lock = SPIN_LOCK_UNLOCKED;
sema_init(&dev->struct_sem, 1);
#ifdef MODULE
drm_parse_options(radeon);
#endif
if ((retcode = misc_register(&radeon_misc))) {
DRM_ERROR("Cannot register \"%s\"\n", RADEON_NAME);
return retcode;
}
dev->device = MKDEV(MISC_MAJOR, radeon_misc.minor);
dev->name = RADEON_NAME;
drm_mem_init();
drm_proc_init(dev);
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
dev->agp = drm_agp_init();
if (dev->agp == NULL) {
DRM_ERROR("Cannot initialize agpgart module.\n");
drm_proc_cleanup();
misc_deregister(&radeon_misc);
radeon_takedown(dev);
return -ENOMEM;
}
#ifdef CONFIG_MTRR
dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size*1024*1024,
MTRR_TYPE_WRCOMB,
1);
#endif
#endif
if((retcode = drm_ctxbitmap_init(dev))) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
drm_proc_cleanup();
misc_deregister(&radeon_misc);
radeon_takedown(dev);
return retcode;
}
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
RADEON_NAME,
RADEON_MAJOR,
RADEON_MINOR,
RADEON_PATCHLEVEL,
RADEON_DATE,
radeon_misc.minor);
return 0;
}
/* radeon_cleanup is called via cleanup_module at module unload time. */
static void radeon_cleanup(void)
{
drm_device_t *dev = &radeon_device;
DRM_DEBUG("\n");
drm_proc_cleanup();
if (misc_deregister(&radeon_misc)) {
DRM_ERROR("Cannot unload module\n");
} else {
DRM_INFO("Module unloaded\n");
}
drm_ctxbitmap_cleanup(dev);
radeon_takedown(dev);
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
if (dev->agp) {
drm_agp_uninit();
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
#endif
}
module_init(radeon_init);
module_exit(radeon_cleanup);
int radeon_version(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_version_t version;
int len;
if (copy_from_user(&version,
(drm_version_t *)arg,
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = RADEON_MAJOR;
version.version_minor = RADEON_MINOR;
version.version_patchlevel = RADEON_PATCHLEVEL;
DRM_COPY(version.name, RADEON_NAME);
DRM_COPY(version.date, RADEON_DATE);
DRM_COPY(version.desc, RADEON_DESC);
if (copy_to_user((drm_version_t *)arg,
&version,
sizeof(version)))
return -EFAULT;
return 0;
}
int radeon_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = &radeon_device;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_open_helper(inode, filp, dev))) {
#if LINUX_VERSION_CODE < 0x020333
MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
atomic_inc(&dev->total_open);
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return radeon_setup(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
int radeon_release(struct inode *inode, struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev;
int retcode = 0;
lock_kernel();
dev = priv->dev;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_release(inode, filp))) {
#if LINUX_VERSION_CODE < 0x020333
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
atomic_inc(&dev->total_close);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
spin_unlock(&dev->count_lock);
unlock_kernel();
return -EBUSY;
}
spin_unlock(&dev->count_lock);
unlock_kernel();
return radeon_takedown(dev);
}
spin_unlock(&dev->count_lock);
}
unlock_kernel();
return retcode;
}
/* radeon_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int radeon_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
int nr = DRM_IOCTL_NR(cmd);
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
drm_ioctl_t *func;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
current->pid, cmd, nr, dev->device, priv->authenticated);
if (nr >= RADEON_IOCTL_COUNT) {
retcode = -EINVAL;
} else {
ioctl = &radeon_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = -EACCES;
} else {
retcode = (func)(inode, filp, cmd, arg);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
int radeon_lock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_lock_t lock;
#if DRM_DMA_HISTOGRAM
cycles_t start;
dev->lck_start = start = get_cycles();
#endif
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, current->pid, dev->lock.hw_lock->lock,
lock.flags);
if (lock.context < 0 /* || lock.context >= dev->queue_count */)
return -EINVAL;
if (!ret) {
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
lock.context)) {
dev->lock.pid = current->pid;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
#if 1
current->policy |= SCHED_YIELD;
#endif
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
#if 0
if (!ret && dev->last_context != lock.context &&
lock.context != radeon_res_ctx.handle &&
dev->last_context != radeon_res_ctx.handle) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != lock.context */
radeon_context_switch(dev, dev->last_context, lock.context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == lock.context
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
current->policy |= SCHED_YIELD;
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
ret = -EINTR;
} else if (dev->last_context != lock.context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context, lock.context);
}
}
#endif
if (!ret) {
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
sigaddset(&dev->sigmask, SIGTTIN);
sigaddset(&dev->sigmask, SIGTTOU);
dev->sigdata.context = lock.context;
dev->sigdata.lock = dev->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
if (lock.flags & _DRM_LOCK_READY) {
/* Wait for space in DMA/FIFO */
}
if (lock.flags & _DRM_LOCK_QUIESCENT) {
/* Make hardware quiescent */
DRM_DEBUG("not quiescent!\n");
#if 0
radeon_quiescent(dev);
#endif
}
}
#if LINUX_VERSION_CODE < 0x020400
if (lock.context != radeon_res_ctx.handle) {
current->counter = 5;
current->priority = DEF_PRIORITY/4;
}
#endif
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
#endif
return ret;
}
int radeon_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_lock_t lock;
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
/* FIXME: Try to send data to card here */
if (!dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
#if LINUX_VERSION_CODE < 0x020400
if (lock.context != radeon_res_ctx.handle) {
current->counter = 5;
current->priority = DEF_PRIORITY;
}
#endif
unblock_all_signals();
return 0;
}

View file

@ -6,7 +6,7 @@
# drm.o is a fake target -- it is never built
# The real targets are in the module-list
O_TARGET := drm.o
module-list := gamma.o tdfx.o r128.o ffb.o mga.o i810.o
module-list := gamma.o tdfx.o r128.o radeon.o ffb.o mga.o i810.o
export-objs := $(patsubst %.o,%_drv.o,$(module-list))
# libs-objs are included in every module so that radical changes to the
@ -36,18 +36,23 @@ else
endif
endif
gamma-objs := $(lib-objs) gamma_drv.o gamma_dma.o
tdfx-objs := $(lib-objs) tdfx_drv.o tdfx_context.o
r128-objs := $(lib-objs) r128_drv.o r128_dma.o r128_context.o r128_bufs.o
ffb-objs := $(lib-objs) ffb_drv.o ffb_context.o
mga-objs := $(lib-objs) mga_drv.o mga_dma.o mga_context.o mga_bufs.o \
mga_state.o
i810-objs := $(lib-objs) i810_drv.o i810_dma.o i810_context.o i810_bufs.o
gamma-objs := $(lib-objs) gamma_drv.o gamma_dma.o
tdfx-objs := $(lib-objs) tdfx_drv.o tdfx_context.o
r128-objs := $(lib-objs) r128_drv.o r128_cce.o r128_context.o \
r128_bufs.o r128_state.o
radeon-objs := $(lib-objs) radeon_drv.o radeon_cp.o radeon_context.o \
radeon_bufs.o radeon_state.o
ffb-objs := $(lib-objs) ffb_drv.o ffb_context.o
mga-objs := $(lib-objs) mga_drv.o mga_dma.o mga_context.o \
mga_bufs.o mga_state.o
i810-objs := $(lib-objs) i810_drv.o i810_dma.o i810_context.o \
i810_bufs.o
obj-$(CONFIG_DRM_GAMMA) += gamma.o $(gamma-objs)
obj-$(CONFIG_DRM_TDFX) += tdfx.o $(tdfx-objs)
obj-$(CONFIG_DRM_R128) += r128.o $(r128-objs)
obj-$(CONFIG_DRM_FFB) += ffb.o $(ffb-objs)
obj-$(CONFIG_DRM_GAMMA) += gamma.o $(gamma-objs)
obj-$(CONFIG_DRM_TDFX) += tdfx.o $(tdfx-objs)
obj-$(CONFIG_DRM_R128) += r128.o $(r128-objs)
obj-$(CONFIG_DRM_RADEON) += radeon.o $(radeon-objs)
obj-$(CONFIG_DRM_FFB) += ffb.o $(ffb-objs)
ifneq ($CONFIG_AGP),)
obj-$(CONFIG_DRM_MGA) += mga.o $(mga-objs)
@ -84,5 +89,8 @@ i810.o: $(i810-objs)
r128.o: $(r128-objs)
$(LD) -r -o $@ $(r128-objs)
radeon.o: $(radeon-objs)
$(LD) -r -o $@ $(radeon-objs)
ffb.o: $(ffb-objs)
$(LD) -r -o $@ $(ffb-objs)

View file

@ -47,7 +47,7 @@
# **** End of SMP/MODVERSIONS detection
MODS= gamma.o tdfx.o r128.o
MODS= gamma.o tdfx.o r128.o radeon.o
LIBS= libdrm.a
DRMOBJS= init.o memory.o proc.o auth.o context.o drawable.o bufs.o \
@ -60,9 +60,13 @@ GAMMAHEADERS= gamma_drv.h $(DRMHEADERS)
TDFXOBJS= tdfx_drv.o tdfx_context.o
TDFXHEADERS= tdfx_drv.h $(DRMHEADERS)
R128OBJS= r128_drv.o r128_dma.o r128_bufs.o r128_context.o
R128OBJS= r128_drv.o r128_cce.o r128_bufs.o r128_context.o r128_state.o
R128HEADERS= r128_drv.h r128_drm.h $(DRMHEADERS)
RADEONOBJS= radeon_drv.o radeon_cp.o radeon_bufs.o radeon_context.o \
radeon_state.o
RADEONHEADERS= radeon_drv.h radeon_drm.h $(DRMHEADERS)
INC= /usr/include
CFLAGS= -O2 $(WARNINGS)
@ -207,6 +211,11 @@ r128_drv.o: r128_drv.c
r128.o: $(R128OBJS) $(LIBS)
$(LD) -r $^ -o $@
radeon_drv.o: radeon_drv.c
$(CC) $(MODCFLAGS) -DEXPORT_SYMTAB -I$(TREE) -c $< -o $@
radeon.o: $(RADEONOBJS) $(LIBS)
$(LD) -r $^ -o $@
sis.o: $(SISOBJS) $(LIBS)
$(LD) -r $^ -o $@
@ -239,6 +248,7 @@ $(DRMOBJS): $(DRMHEADERS)
$(GAMMAOBJS): $(GAMMAHEADERS)
$(TDFXOBJS): $(TDFXHEADERS)
$(R128OBJS): $(R128HEADERS)
$(RADEONOBJS): $(RADEONHEADERS)
ifeq ($(AGP),1)
$(MGAOBJS): $(MGAHEADERS)
$(I810OBJS): $(I810HEADERS)

View file

@ -82,6 +82,7 @@ typedef struct drm_clip_rect {
#include "mga_drm.h"
#include "i810_drm.h"
#include "r128_drm.h"
#include "radeon_drm.h"
#include "sis_drm.h"
typedef struct drm_version {
@ -363,12 +364,32 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_I810_DOCOPY DRM_IO ( 0x48)
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x41)
#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42)
#define DRM_IOCTL_R128_IDLE DRM_IO( 0x43)
#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41)
#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t)
#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43)
#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x46)
#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47)
#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t)
#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t)
#define DRM_IOCTL_R128_PACKET DRM_IOWR(0x4c, drm_r128_packet_t)
/* Radeon specific ioctls */
#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x40)
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x41, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x42)
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x43, drm_radeon_cp_stop_t)
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x44)
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x45)
#define DRM_IOCTL_RADEON_CP_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_RADEON_CP_CLEAR DRM_IOW( 0x47, drm_radeon_clear_t)
#define DRM_IOCTL_RADEON_CP_VERTEX DRM_IOW( 0x48, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_CP_INDICES DRM_IOW( 0x49, drm_radeon_indices_t)
#define DRM_IOCTL_RADEON_CP_BLIT DRM_IOW( 0x4a, drm_radeon_blit_t)
#define DRM_IOCTL_RADEON_CP_PACKET DRM_IOWR(0x4b, drm_radeon_packet_t)
/* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR( 0x44, drm_sis_mem_t)

View file

@ -11,11 +11,11 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
@ -23,11 +23,11 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*
* Authors: Kevin E. Martin <martin@valinux.com>
* Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*
*/
#define __NO_VERSION__
@ -60,9 +60,7 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dma) return -EINVAL;
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request)))
if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
return -EFAULT;
count = request.count;
@ -94,7 +92,7 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
@ -102,7 +100,7 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
@ -111,7 +109,7 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
@ -173,9 +171,7 @@ int r128_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd,
request.count = entry->buf_count;
request.size = size;
if (copy_to_user((drm_buf_desc_t *)arg,
&request,
sizeof(request)))
if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request)))
return -EFAULT;
dma->flags = _DRM_DMA_USE_AGP;
@ -195,9 +191,7 @@ int r128_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
if (!dev_priv || dev_priv->is_pci) return -EINVAL;
if (copy_from_user(&request,
(drm_buf_desc_t *)arg,
sizeof(request)))
if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
return -EFAULT;
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
@ -234,25 +228,23 @@ int r128_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
if (copy_from_user(&request,
(drm_buf_map_t *)arg,
sizeof(request)))
if (copy_from_user(&request, (drm_buf_map_t *)arg, sizeof(request)))
return -EFAULT;
if (request.count >= dma->buf_count) {
if (dma->flags & _DRM_DMA_USE_AGP) {
drm_map_t *map;
map = dev_priv->agp_vertbufs;
map = dev_priv->buffers;
if (!map) {
retcode = -EINVAL;
goto done;
}
down(&current->mm->mmap_sem);
virtual = do_mmap(filp, 0, map->size,
virtual = do_mmap(filp, 0, map->size,
PROT_READ|PROT_WRITE,
MAP_SHARED,
MAP_SHARED,
(unsigned long)map->offset);
up(&current->mm->mmap_sem);
} else {
@ -300,9 +292,7 @@ int r128_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
if (copy_to_user((drm_buf_map_t *)arg,
&request,
sizeof(request)))
if (copy_to_user((drm_buf_map_t *)arg, &request, sizeof(request)))
return -EFAULT;
return retcode;

1237
linux/r128_cce.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -11,11 +11,11 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
@ -23,7 +23,7 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*
* Author: Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
@ -53,21 +53,21 @@ int r128_context_switch(drm_device_t *dev, int old, int new)
#if DRM_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
if (drm_flags & DRM_FLAG_NOCTX) {
r128_context_switch_complete(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
drm_write_string(dev, buf);
}
return 0;
}
@ -75,7 +75,7 @@ int r128_context_switch_complete(drm_device_t *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
@ -86,11 +86,11 @@ int r128_context_switch_complete(drm_device_t *dev, int new)
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
- dev->ctx_start)]);
#endif
clear_bit(0, &dev->context_flag);
wake_up(&dev->context_wait);
return 0;
}
@ -109,9 +109,7 @@ int r128_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (copy_to_user(&res.contexts[i],
&i,
sizeof(i)))
if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
return -EFAULT;
}
}

View file

@ -1,909 +0,0 @@
/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Kevin E. Martin <martin@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "r128_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#define DO_REMAP(_m) (_m)->handle = drm_ioremap((_m)->offset, (_m)->size)
#define DO_REMAPFREE(_m) \
do { \
if ((_m)->handle && (_m)->size) \
drm_ioremapfree((_m)->handle, (_m)->size); \
} while (0)
#define DO_FIND_MAP(_m, _o) \
do { \
int _i; \
for (_i = 0; _i < dev->map_count; _i++) { \
if (dev->maplist[_i]->offset == _o) { \
_m = dev->maplist[_i]; \
break; \
} \
} \
} while (0)
#define R128_MAX_VBUF_AGE 0x10000000
#define R128_VB_AGE_REG R128_GUI_SCRATCH_REG0
int R128_READ_PLL(drm_device_t *dev, int addr)
{
drm_r128_private_t *dev_priv = dev->dev_private;
R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
return R128_READ(R128_CLOCK_CNTL_DATA);
}
#define r128_flush_write_combine() mb()
static void r128_status(drm_device_t *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
printk("GUI_STAT = 0x%08x\n",
(unsigned int)R128_READ(R128_GUI_STAT));
printk("PM4_STAT = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_STAT));
printk("PM4_BUFFER_DL_WPTR = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR));
printk("PM4_BUFFER_DL_RPTR = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR));
}
static int r128_do_cleanup_cce(drm_device_t *dev)
{
if (dev->dev_private) {
drm_r128_private_t *dev_priv = dev->dev_private;
if (!dev_priv->is_pci) {
DO_REMAPFREE(dev_priv->agp_ring);
DO_REMAPFREE(dev_priv->agp_read_ptr);
DO_REMAPFREE(dev_priv->agp_vertbufs);
DO_REMAPFREE(dev_priv->agp_indbufs);
DO_REMAPFREE(dev_priv->agp_textures);
}
drm_free(dev->dev_private, sizeof(drm_r128_private_t),
DRM_MEM_DRIVER);
dev->dev_private = NULL;
}
return 0;
}
static int r128_do_init_cce(drm_device_t *dev, drm_r128_init_t *init)
{
drm_r128_private_t *dev_priv;
int i;
dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL) return -ENOMEM;
dev->dev_private = (void *)dev_priv;
memset(dev_priv, 0, sizeof(drm_r128_private_t));
dev_priv->is_pci = init->is_pci;
dev_priv->usec_timeout = init->usec_timeout;
if (dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
dev->dev_private = NULL;
return -EINVAL;
}
dev_priv->cce_mode = init->cce_mode;
dev_priv->cce_fifo_size = init->cce_fifo_size;
dev_priv->cce_is_bm_mode =
((init->cce_mode == R128_PM4_192BM) ||
(init->cce_mode == R128_PM4_128BM_64INDBM) ||
(init->cce_mode == R128_PM4_64BM_128INDBM) ||
(init->cce_mode == R128_PM4_64BM_64VCBM_64INDBM));
dev_priv->cce_secure = init->cce_secure;
if (dev_priv->cce_is_bm_mode && dev_priv->is_pci) {
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
dev->dev_private = NULL;
return -EINVAL;
}
for (i = 0; i < dev->map_count; i++) {
if (dev->maplist[i]->type == _DRM_SHM) {
dev_priv->sarea = dev->maplist[i];
break;
}
}
DO_FIND_MAP(dev_priv->fb, init->fb_offset);
if (!dev_priv->is_pci) {
DO_FIND_MAP(dev_priv->agp_ring, init->agp_ring_offset);
DO_FIND_MAP(dev_priv->agp_read_ptr, init->agp_read_ptr_offset);
DO_FIND_MAP(dev_priv->agp_vertbufs, init->agp_vertbufs_offset);
DO_FIND_MAP(dev_priv->agp_indbufs, init->agp_indbufs_offset);
DO_FIND_MAP(dev_priv->agp_textures, init->agp_textures_offset);
}
DO_FIND_MAP(dev_priv->mmio, init->mmio_offset);
dev_priv->sarea_priv =
(drm_r128_sarea_t *)((u8 *)dev_priv->sarea->handle +
init->sarea_priv_offset);
if (!dev_priv->is_pci) {
DO_REMAP(dev_priv->agp_ring);
DO_REMAP(dev_priv->agp_read_ptr);
DO_REMAP(dev_priv->agp_vertbufs);
#if 0
DO_REMAP(dev_priv->agp_indirectbufs);
DO_REMAP(dev_priv->agp_textures);
#endif
dev_priv->ring_size = init->ring_size;
dev_priv->ring_sizel2qw = drm_order(init->ring_size/8);
dev_priv->ring_entries = init->ring_size/sizeof(u32);
dev_priv->ring_read_ptr = ((__volatile__ u32 *)
dev_priv->agp_read_ptr->handle);
dev_priv->ring_start = (u32 *)dev_priv->agp_ring->handle;
dev_priv->ring_end = ((u32 *)dev_priv->agp_ring->handle
+ dev_priv->ring_entries);
}
dev_priv->submit_age = 0;
R128_WRITE(R128_VB_AGE_REG, dev_priv->submit_age);
return 0;
}
int r128_init_cce(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_r128_init_t init;
if (copy_from_user(&init, (drm_r128_init_t *)arg, sizeof(init)))
return -EFAULT;
switch (init.func) {
case R128_INIT_CCE:
return r128_do_init_cce(dev, &init);
case R128_CLEANUP_CCE:
return r128_do_cleanup_cce(dev);
}
return -EINVAL;
}
static void r128_mark_vertbufs_done(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
int i;
for (i = 0; i < dma->buf_count; i++) {
drm_buf_t *buf = dma->buflist[i];
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = 0;
}
}
static int r128_do_pixcache_flush(drm_device_t *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 tmp;
int i;
tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY))
return 0;
udelay(1);
}
return -EBUSY;
}
static int r128_do_wait_for_fifo(drm_device_t *dev, int entries)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
for (i = 0; i < dev_priv->usec_timeout; i++) {
int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
if (slots >= entries) return 0;
udelay(1);
}
return -EBUSY;
}
static int r128_do_wait_for_idle(drm_device_t *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int i, ret;
if (!(ret = r128_do_wait_for_fifo(dev, 64))) return ret;
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
(void)r128_do_pixcache_flush(dev);
return 0;
}
udelay(1);
}
return -EBUSY;
}
int r128_do_engine_reset(drm_device_t *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
(void)r128_do_pixcache_flush(dev);
clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
R128_WRITE_PLL(R128_MCLK_CNTL,
mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP);
gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
(void)R128_READ(R128_GEN_RESET_CNTL);
R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
(void)R128_READ(R128_GEN_RESET_CNTL);
R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl);
/* For CCE ring buffer only */
if (dev_priv->cce_is_bm_mode) {
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
*dev_priv->ring_read_ptr = 0;
dev_priv->sarea_priv->ring_write = 0;
}
/* Reset the CCE mode */
(void)r128_do_wait_for_idle(dev);
R128_WRITE(R128_PM4_BUFFER_CNTL,
dev_priv->cce_mode | dev_priv->ring_sizel2qw);
(void)R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */
R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
r128_mark_vertbufs_done(dev);
return 0;
}
int r128_eng_reset(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
dev->lock.pid != current->pid) {
DRM_ERROR("r128_eng_reset called without holding the lock\n");
return -EINVAL;
}
return r128_do_engine_reset(dev);
}
static int r128_do_engine_flush(drm_device_t *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 tmp;
tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR);
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp | R128_PM4_BUFFER_DL_DONE);
return 0;
}
int r128_eng_flush(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
dev->lock.pid != current->pid) {
DRM_ERROR("r128_eng_flush called without holding the lock\n");
return -EINVAL;
}
return r128_do_engine_flush(dev);
}
static int r128_do_cce_wait_for_fifo(drm_device_t *dev, int entries)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
for (i = 0; i < dev_priv->usec_timeout; i++) {
int slots = R128_READ(R128_PM4_STAT) & R128_PM4_FIFOCNT_MASK;
if (slots >= entries) return 0;
udelay(1);
}
return -EBUSY;
}
int r128_do_cce_wait_for_idle(drm_device_t *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
if (dev_priv->cce_is_bm_mode) {
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (*dev_priv->ring_read_ptr == dev_priv->sarea_priv->ring_write) {
int pm4stat = R128_READ(R128_PM4_STAT);
if ((pm4stat & R128_PM4_FIFOCNT_MASK) >= dev_priv->cce_fifo_size &&
!(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) {
return r128_do_pixcache_flush(dev);
}
}
udelay(1);
}
return -EBUSY;
} else {
int ret = r128_do_cce_wait_for_fifo(dev, dev_priv->cce_fifo_size);
if (ret < 0) return ret;
for (i = 0; i < dev_priv->usec_timeout; i++) {
int pm4stat = R128_READ(R128_PM4_STAT);
if (!(pm4stat & (R128_PM4_BUSY | R128_PM4_GUI_ACTIVE))) {
return r128_do_pixcache_flush(dev);
}
udelay(1);
}
return -EBUSY;
}
}
int r128_cce_idle(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
dev->lock.pid != current->pid) {
DRM_ERROR("r128_wait_idle called without holding the lock\n");
return -EINVAL;
}
return r128_do_cce_wait_for_idle(dev);
}
static int r128_submit_packets_ring_secure(drm_device_t *dev,
u32 *commands, int *count)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int write = dev_priv->sarea_priv->ring_write;
int *write_ptr = dev_priv->ring_start + write;
int c = *count;
u32 tmp = 0;
int psize = 0;
int writing = 1;
int timeout;
while (c > 0) {
tmp = *commands++;
if (!psize) {
writing = 1;
if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET0) {
if ((tmp & R128_CCE_PACKET0_REG_MASK) <= (0x1004 >> 2)) {
if ((tmp & R128_CCE_PACKET0_REG_MASK) !=
(R128_PM4_VC_FPU_SETUP >> 2)) {
writing = 0;
}
}
psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
} else if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET1) {
if ((tmp & R128_CCE_PACKET1_REG0_MASK) <= (0x1004 >> 2)) {
if ((tmp & R128_CCE_PACKET1_REG0_MASK) !=
(R128_PM4_VC_FPU_SETUP >> 2)) {
writing = 0;
}
} else if ((tmp & R128_CCE_PACKET1_REG1_MASK) <=
(0x1004 << 9)) {
if ((tmp & R128_CCE_PACKET1_REG1_MASK) !=
(R128_PM4_VC_FPU_SETUP << 9)) {
writing = 0;
}
}
psize = 3;
} else {
psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
}
}
psize--;
if (writing) {
write++;
*write_ptr++ = tmp;
}
if (write >= dev_priv->ring_entries) {
write = 0;
write_ptr = dev_priv->ring_start;
}
timeout = 0;
while (write == *dev_priv->ring_read_ptr) {
(void)R128_READ(R128_PM4_BUFFER_DL_RPTR);
if (timeout++ >= dev_priv->usec_timeout)
return -EBUSY;
udelay(1);
}
c--;
}
if (write < 32)
memcpy(dev_priv->ring_end,
dev_priv->ring_start,
write * sizeof(u32));
/* Make sure WC cache has been flushed */
r128_flush_write_combine();
dev_priv->sarea_priv->ring_write = write;
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write);
*count = 0;
return 0;
}
static int r128_submit_packets_pio_secure(drm_device_t *dev,
u32 *commands, int *count)
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 tmp = 0;
int psize = 0;
int writing = 1;
int addr = R128_PM4_FIFO_DATA_EVEN;
int ret;
while (*count > 0) {
tmp = *commands++;
if (!psize) {
writing = 1;
if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET0) {
if ((tmp & R128_CCE_PACKET0_REG_MASK) <= (0x1004 >> 2)) {
if ((tmp & R128_CCE_PACKET0_REG_MASK) !=
(R128_PM4_VC_FPU_SETUP >> 2)) {
writing = 0;
}
}
psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
} else if ((tmp & R128_CCE_PACKET_MASK) == R128_CCE_PACKET1) {
if ((tmp & R128_CCE_PACKET1_REG0_MASK) <= (0x1004 >> 2)) {
if ((tmp & R128_CCE_PACKET1_REG0_MASK) !=
(R128_PM4_VC_FPU_SETUP >> 2)) {
writing = 0;
}
} else if ((tmp & R128_CCE_PACKET1_REG1_MASK) <=
(0x1004 << 9)) {
if ((tmp & R128_CCE_PACKET1_REG1_MASK) !=
(R128_PM4_VC_FPU_SETUP << 9)) {
writing = 0;
}
}
psize = 3;
} else {
psize = ((tmp & R128_CCE_PACKET_COUNT_MASK) >> 16) + 2;
}
}
psize--;
if (writing) {
if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0)
return ret;
R128_WRITE(addr, tmp);
addr ^= 0x0004;
}
*count -= 1;
}
if (addr == R128_PM4_FIFO_DATA_ODD) {
if ((ret = r128_do_cce_wait_for_fifo(dev, 1)) < 0) return ret;
R128_WRITE(addr, R128_CCE_PACKET2);
}
return 0;
}
static int r128_submit_packets_ring(drm_device_t *dev,
u32 *commands, int *count)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int write = dev_priv->sarea_priv->ring_write;
int *write_ptr = dev_priv->ring_start + write;
int c = *count;
int timeout;
while (c > 0) {
write++;
*write_ptr++ = *commands++;
if (write >= dev_priv->ring_entries) {
write = 0;
write_ptr = dev_priv->ring_start;
}
timeout = 0;
while (write == *dev_priv->ring_read_ptr) {
(void)R128_READ(R128_PM4_BUFFER_DL_RPTR);
if (timeout++ >= dev_priv->usec_timeout)
return -EBUSY;
udelay(1);
}
c--;
}
if (write < 32)
memcpy(dev_priv->ring_end,
dev_priv->ring_start,
write * sizeof(u32));
/* Make sure WC cache has been flushed */
r128_flush_write_combine();
dev_priv->sarea_priv->ring_write = write;
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, write);
*count = 0;
return 0;
}
static int r128_submit_packets_pio(drm_device_t *dev,
u32 *commands, int *count)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int ret;
while (*count > 1) {
if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret;
R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++);
R128_WRITE(R128_PM4_FIFO_DATA_ODD, *commands++);
*count -= 2;
}
if (*count) {
if ((ret = r128_do_cce_wait_for_fifo(dev, 2)) < 0) return ret;
R128_WRITE(R128_PM4_FIFO_DATA_EVEN, *commands++);
R128_WRITE(R128_PM4_FIFO_DATA_ODD, R128_CCE_PACKET2);
*count = 0;
}
return 0;
}
static int r128_do_submit_packets(drm_device_t *dev, u32 *buffer, int count)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int c = count;
int ret;
if (dev_priv->cce_is_bm_mode) {
int left = 0;
if (c >= dev_priv->ring_entries) {
c = dev_priv->ring_entries-1;
left = count - c;
}
/* Since this is only used by the kernel we can use the
insecure ring buffer submit packet routine */
ret = r128_submit_packets_ring(dev, buffer, &c);
c += left;
} else {
/* Since this is only used by the kernel we can use the
insecure PIO submit packet routine */
ret = r128_submit_packets_pio(dev, buffer, &c);
}
if (ret < 0) return ret;
else return c;
}
int r128_submit_pkt(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_packet_t packet;
u32 *buffer;
int c;
int size;
int ret = 0;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
dev->lock.pid != current->pid) {
DRM_ERROR("r128_submit_pkt called without holding the lock\n");
return -EINVAL;
}
if (copy_from_user(&packet, (drm_r128_packet_t *)arg, sizeof(packet)))
return -EFAULT;
c = packet.count;
size = c * sizeof(*buffer);
if (dev_priv->cce_is_bm_mode) {
int left = 0;
if (c >= dev_priv->ring_entries) {
c = dev_priv->ring_entries-1;
size = c * sizeof(*buffer);
left = packet.count - c;
}
if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
if (copy_from_user(buffer, packet.buffer, size))
return -EFAULT;
if (dev_priv->cce_secure)
ret = r128_submit_packets_ring_secure(dev, buffer, &c);
else
ret = r128_submit_packets_ring(dev, buffer, &c);
c += left;
} else {
if ((buffer = kmalloc(size, 0)) == NULL) return -ENOMEM;
if (copy_from_user(buffer, packet.buffer, size))
return -EFAULT;
if (dev_priv->cce_secure)
ret = r128_submit_packets_pio_secure(dev, buffer, &c);
else
ret = r128_submit_packets_pio(dev, buffer, &c);
}
kfree(buffer);
packet.count = c;
if (copy_to_user((drm_r128_packet_t *)arg, &packet, sizeof(packet)))
return -EFAULT;
if (ret) return ret;
else if (c > 0) return -EAGAIN;
return 0;
}
static int r128_send_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v)
{
drm_device_dma_t *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv;
drm_buf_t *buf;
int i, ret;
u32 cce[2];
/* Make sure we have valid data */
for (i = 0; i < v->send_count; i++) {
int idx = v->send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
return -EINVAL;
}
buf = dma->buflist[idx];
if (buf->pid != current->pid) {
DRM_ERROR("Process %d using buffer owned by %d\n",
current->pid, buf->pid);
return -EINVAL;
}
if (buf->pending) {
DRM_ERROR("Sending pending buffer:"
" buffer %d, offset %d\n",
v->send_indices[i], i);
return -EINVAL;
}
}
/* Wait for idle, if we've wrapped to make sure that all pending
buffers have been processed */
if (dev_priv->submit_age == R128_MAX_VBUF_AGE) {
if ((ret = r128_do_cce_wait_for_idle(dev)) < 0) return ret;
dev_priv->submit_age = 0;
r128_mark_vertbufs_done(dev);
}
/* Make sure WC cache has been flushed (if in PIO mode) */
if (!dev_priv->cce_is_bm_mode) r128_flush_write_combine();
/* FIXME: Add support for sending vertex buffer to the CCE here
instead of in client code. The v->prim holds the primitive
type that should be drawn. Loop over the list buffers in
send_indices[] and submit a packet for each VB.
This will require us to loop over the clip rects here as
well, which implies that we extend the kernel driver to allow
cliprects to be stored here. Note that the cliprects could
possibly come from the X server instead of the client, but
this will require additional changes to the DRI to allow for
this optimization. */
/* Submit a CCE packet that writes submit_age to R128_VB_AGE_REG */
cce[0] = R128CCE0(R128_CCE_PACKET0, R128_VB_AGE_REG, 0);
cce[1] = dev_priv->submit_age;
if ((ret = r128_do_submit_packets(dev, cce, 2)) < 0) {
/* Until we add support for sending VBs to the CCE in
this routine, we can recover from this error. After
we add that support, we won't be able to easily
recover, so we will probably have to implement
another mechanism for handling timeouts from packets
submitted directly by the kernel. */
return ret;
}
/* Now that the submit packet request has succeeded, we can mark
the buffers as pending */
for (i = 0; i < v->send_count; i++) {
buf = dma->buflist[v->send_indices[i]];
buf->pending = 1;
buf_priv = buf->dev_private;
buf_priv->age = dev_priv->submit_age;
}
dev_priv->submit_age++;
return 0;
}
static drm_buf_t *r128_freelist_get(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv;
drm_buf_t *buf;
int i, t;
/* FIXME: Optimize -- use freelist code */
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
if (buf->pid == 0) return buf;
}
for (t = 0; t < dev_priv->usec_timeout; t++) {
u32 done_age = R128_READ(R128_VB_AGE_REG);
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
if (buf->pending && buf_priv->age <= done_age) {
/* The buffer has been processed, so it
can now be used */
buf->pending = 0;
return buf;
}
}
udelay(1);
}
r128_status(dev);
return NULL;
}
static int r128_get_vertbufs(drm_device_t *dev, drm_r128_vertex_t *v)
{
drm_buf_t *buf;
int i;
for (i = v->granted_count; i < v->request_count; i++) {
buf = r128_freelist_get(dev);
if (!buf) break;
buf->pid = current->pid;
if (copy_to_user(&v->request_indices[i],
&buf->idx,
sizeof(buf->idx)) ||
copy_to_user(&v->request_sizes[i],
&buf->total,
sizeof(buf->total)))
return -EFAULT;
++v->granted_count;
}
return 0;
}
int r128_vertex_buf(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
drm_r128_vertex_t v;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
dev->lock.pid != current->pid) {
DRM_ERROR("r128_vertex_buf called without holding the lock\n");
return -EINVAL;
}
if (!dev_priv || dev_priv->is_pci) {
DRM_ERROR("r128_vertex_buf called with a PCI card\n");
return -EINVAL;
}
if (copy_from_user(&v, (drm_r128_vertex_t *)arg, sizeof(v)))
return -EFAULT;
DRM_DEBUG("%d: %d send, %d req\n",
current->pid, v.send_count, v.request_count);
if (v.send_count < 0 || v.send_count > dma->buf_count) {
DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
current->pid, v.send_count, dma->buf_count);
return -EINVAL;
}
if (v.request_count < 0 || v.request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
current->pid, v.request_count, dma->buf_count);
return -EINVAL;
}
if (v.send_count) {
retcode = r128_send_vertbufs(dev, &v);
}
v.granted_count = 0;
if (!retcode && v.request_count) {
retcode = r128_get_vertbufs(dev, &v);
}
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, v.granted_count);
if (copy_to_user((drm_r128_vertex_t *)arg, &v, sizeof(v)))
return -EFAULT;
return retcode;
}

View file

@ -11,11 +11,11 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
@ -24,88 +24,241 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Kevin E. Martin <martin@valinux.com>
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#ifndef _R128_DRM_H_
#define _R128_DRM_H_
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (r128_sarea.h)
*/
#ifndef __R128_SAREA_DEFINES__
#define __R128_SAREA_DEFINES__
/* What needs to be changed for the current vertex buffer?
*/
#define R128_UPLOAD_CONTEXT 0x001
#define R128_UPLOAD_SETUP 0x002
#define R128_UPLOAD_TEX0 0x004
#define R128_UPLOAD_TEX1 0x008
#define R128_UPLOAD_TEX0IMAGES 0x010
#define R128_UPLOAD_TEX1IMAGES 0x020
#define R128_UPLOAD_CORE 0x040
#define R128_UPLOAD_MASKS 0x080
#define R128_UPLOAD_WINDOW 0x100
#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
#define R128_REQUIRE_QUIESCENCE 0x400
#define R128_UPLOAD_ALL 0x7ff
#define R128_FRONT 0x1
#define R128_BACK 0x2
#define R128_DEPTH 0x4
/* Primitive types
*/
#define R128_POINTS 0x1
#define R128_LINES 0x2
#define R128_LINE_STRIP 0x3
#define R128_TRIANGLES 0x4
#define R128_TRIANGLE_FAN 0x5
#define R128_TRIANGLE_STRIP 0x6
/* Vertex/indirect buffer size
*/
#if 1
#define R128_BUFFER_SIZE 16384
#else
#define R128_BUFFER_SIZE (128 * 1024)
#endif
/* Byte offsets for indirect buffer data
*/
#define R128_INDEX_PRIM_OFFSET 20
#define R128_HOSTDATA_BLIT_OFFSET 32
/* 2048x2048 @ 32bpp texture requires this many indirect buffers
*/
#define R128_MAX_BLIT_BUFFERS ((2048 * 2048 * 4) / R128_BUFFER_SIZE)
/* Keep these small for testing.
*/
#define R128_NR_SAREA_CLIPRECTS 12
/* There are 2 heaps (local/AGP). Each region within a heap is a
* minimum of 64k, and there are at most 64 of them per heap.
*/
#define R128_LOCAL_TEX_HEAP 0
#define R128_AGP_TEX_HEAP 1
#define R128_NR_TEX_HEAPS 2
#define R128_NR_TEX_REGIONS 16
#define R128_LOG_TEX_GRANULARITY 16
#define R128_NR_CONTEXT_REGS 12
#define R128_TEX_MAXLEVELS 11
#endif /* __R128_SAREA_DEFINES__ */
typedef struct {
/* Context state - can be written in one large chunk */
unsigned int dst_pitch_offset_c;
unsigned int dp_gui_master_cntl_c;
unsigned int sc_top_left_c;
unsigned int sc_bottom_right_c;
unsigned int z_offset_c;
unsigned int z_pitch_c;
unsigned int z_sten_cntl_c;
unsigned int tex_cntl_c;
unsigned int misc_3d_state_cntl_reg;
unsigned int texture_clr_cmp_clr_c;
unsigned int texture_clr_cmp_msk_c;
unsigned int fog_color_c;
/* Texture state */
unsigned int tex_size_pitch_c;
unsigned int constant_color_c;
/* Setup state */
unsigned int pm4_vc_fpu_setup;
unsigned int setup_cntl;
/* Mask state */
unsigned int dp_write_mask;
unsigned int sten_ref_mask_c;
unsigned int plane_3d_mask_c;
/* Window state */
unsigned int window_xy_offset;
/* Core state */
unsigned int scale_3d_cntl;
} drm_r128_context_regs_t;
/* Setup registers for each texture unit */
typedef struct {
unsigned int tex_cntl;
unsigned int tex_combine_cntl;
unsigned int tex_size_pitch;
unsigned int tex_offset[R128_TEX_MAXLEVELS];
unsigned int tex_border_color;
} drm_r128_texture_regs_t;
typedef struct drm_r128_tex_region {
unsigned char next, prev;
unsigned char in_use;
int age;
} drm_r128_tex_region_t;
typedef struct drm_r128_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex buffer.
*/
drm_r128_context_regs_t context_state;
drm_r128_texture_regs_t tex_state[R128_NR_TEX_HEAPS];
unsigned int dirty;
unsigned int vertsize;
unsigned int vc_format;
/* The current cliprects, or a subset thereof.
*/
drm_clip_rect_t boxes[R128_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
*/
unsigned int last_frame;
unsigned int last_dispatch;
drm_r128_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1];
int tex_age[R128_NR_TEX_HEAPS];
int ctx_owner;
} drm_r128_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmR128.h)
*/
typedef struct drm_r128_init {
enum {
enum {
R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02
} func;
int sarea_priv_offset;
int is_pci;
int cce_mode;
int cce_fifo_size;
int cce_secure;
int ring_size;
int usec_timeout;
int fb_offset;
int agp_ring_offset;
int agp_read_ptr_offset;
int agp_vertbufs_offset;
int agp_indbufs_offset;
int agp_textures_offset;
int mmio_offset;
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int front_x, front_y;
unsigned int back_offset, back_pitch;
unsigned int back_x, back_y;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
unsigned int depth_x, depth_y;
unsigned int fb_offset;
unsigned int mmio_offset;
unsigned int ring_offset;
unsigned int ring_rptr_offset;
unsigned int buffers_offset;
unsigned int agp_textures_offset;
} drm_r128_init_t;
typedef struct drm_r128_packet {
void *buffer;
int count;
int flags;
} drm_r128_packet_t;
typedef struct drm_r128_cce_stop {
int flush;
int idle;
} drm_r128_cce_stop_t;
typedef enum drm_r128_prim {
_DRM_R128_PRIM_NONE = 0x0001,
_DRM_R128_PRIM_POINT = 0x0002,
_DRM_R128_PRIM_LINE = 0x0004,
_DRM_R128_PRIM_POLY_LINE = 0x0008,
_DRM_R128_PRIM_TRI_LIST = 0x0010,
_DRM_R128_PRIM_TRI_FAN = 0x0020,
_DRM_R128_PRIM_TRI_STRIP = 0x0040,
_DRM_R128_PRIM_TRI_TYPE2 = 0x0080
} drm_r128_prim_t;
typedef struct drm_r128_clear {
unsigned int flags;
int x, y, w, h;
unsigned int clear_color;
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask;
} drm_r128_clear_t;
typedef struct drm_r128_vertex {
/* Indices here refer to the offset into
buflist in drm_buf_get_t. */
int send_count; /* Number of buffers to send */
int *send_indices; /* List of handles to buffers */
int *send_sizes; /* Lengths of data to send */
drm_r128_prim_t prim; /* Primitive type */
int request_count; /* Number of buffers requested */
int *request_indices; /* Buffer information */
int *request_sizes;
int granted_count; /* Number of buffers granted */
int prim;
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
} drm_r128_vertex_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (r128_sarea.h)
*/
#define R128_LOCAL_TEX_HEAP 0
#define R128_AGP_TEX_HEAP 1
#define R128_NR_TEX_HEAPS 2
#define R128_NR_TEX_REGIONS 64
#define R128_LOG_TEX_GRANULARITY 16
typedef struct drm_r128_indices {
int prim;
int idx;
int start;
int end;
int discard; /* Client finished with buffer? */
} drm_r128_indices_t;
typedef struct drm_tex_region {
unsigned char next, prev;
unsigned char in_use;
int age;
} drm_tex_region_t;
typedef struct drm_r128_blit_rect {
int index;
unsigned short x, y;
unsigned short width, height;
int padding;
} drm_r128_blit_rect_t;
typedef struct drm_r128_sarea {
drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS+1];
int tex_age[R128_NR_TEX_HEAPS];
int ctx_owner;
int ring_write;
} drm_r128_sarea_t;
typedef struct drm_r128_blit {
int pitch;
int offset;
int format;
drm_r128_blit_rect_t *rects;
int count;
} drm_r128_blit_t;
typedef struct drm_r128_packet {
unsigned int *buffer;
int count;
int flags;
} drm_r128_packet_t;
#endif

View file

@ -24,8 +24,10 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com>
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
@ -33,15 +35,15 @@
#include "drmP.h"
#include "r128_drv.h"
#define R128_NAME "r128"
#define R128_DESC "ATI Rage 128"
#define R128_DATE "20000928"
#define R128_MAJOR 1
#define R128_MINOR 0
#define R128_PATCHLEVEL 0
#define R128_NAME "r128"
#define R128_DESC "ATI Rage 128"
#define R128_DATE "20001106"
#define R128_MAJOR 2
#define R128_MINOR 0
#define R128_PATCHLEVEL 0
static drm_device_t r128_device;
drm_ctx_t r128_res_ctx;
static drm_device_t r128_device;
drm_ctx_t r128_res_ctx;
static struct file_operations r128_fops = {
#if LINUX_VERSION_CODE >= 0x020400
@ -65,52 +67,59 @@ static struct miscdevice r128_misc = {
};
static drm_ioctl_desc_t r128_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { r128_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { r128_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { r128_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { r128_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { r128_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { r128_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { r128_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { r128_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { r128_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { r128_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { r128_cce_buffers, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { r128_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { r128_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
#endif
[DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_init_cce, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_eng_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_FLUSH)] = { r128_eng_flush, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_submit_pkt, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_IDLE)] = { r128_cce_idle, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_vertex_buf, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_INIT)] = { r128_cce_init, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_START)] = { r128_cce_start, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_RESET)] = { r128_engine_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_SWAP)] = { r128_cce_swap, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_CLEAR)] = { r128_cce_clear, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_VERTEX)] = { r128_cce_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_INDICES)] = { r128_cce_indices, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_BLIT)] = { r128_cce_blit, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_R128_PACKET)] = { r128_cce_packet, 1, 0 },
};
#define R128_IOCTL_COUNT DRM_ARRAY_SIZE(r128_ioctls)
@ -425,13 +434,13 @@ int r128_version(struct inode *inode, struct file *filp, unsigned int cmd,
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = R128_MAJOR;
@ -507,7 +516,6 @@ int r128_release(struct inode *inode, struct file *filp)
}
/* r128_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
@ -542,6 +550,12 @@ int r128_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
}
}
#if 0
if ( retcode ) {
DRM_INFO( "%s 0x%x ret = %d\n", __FUNCTION__, nr, retcode );
}
#endif
atomic_dec(&dev->ioctl_count);
return retcode;
}
@ -573,33 +587,10 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
lock.context, current->pid, dev->lock.hw_lock->lock,
lock.flags);
#if 0
/* dev->queue_count == 0 right now for
r128. FIXME? */
if (lock.context < 0 || lock.context >= dev->queue_count)
if (lock.context < 0)
return -EINVAL;
#endif
if (!ret) {
#if 0
if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
!= lock.context) {
long j = jiffies - dev->lock.lock_time;
if (lock.context == r128_res_ctx.handle &&
j >= 0 && j < DRM_LOCK_SLICE) {
/* Can't take lock if we just had it and
there is contention. */
DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
lock.context, current->pid, j,
dev->lock.lock_time, jiffies);
current->state = TASK_INTERRUPTIBLE;
current->policy |= SCHED_YIELD;
schedule_timeout(DRM_LOCK_SLICE-j);
DRM_DEBUG("jiffies=%d\n", jiffies);
}
}
#endif
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
@ -618,9 +609,6 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
/* Contention */
atomic_inc(&dev->total_sleeps);
#if 1
current->policy |= SCHED_YIELD;
#endif
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
@ -631,32 +619,6 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
#if 0
if (!ret && dev->last_context != lock.context &&
lock.context != r128_res_ctx.handle &&
dev->last_context != r128_res_ctx.handle) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != lock.context */
r128_context_switch(dev, dev->last_context, lock.context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == lock.context
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
current->policy |= SCHED_YIELD;
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
ret = -EINTR;
} else if (dev->last_context != lock.context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context, lock.context);
}
}
#endif
if (!ret) {
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
@ -671,6 +633,7 @@ int r128_lock(struct inode *inode, struct file *filp, unsigned int cmd,
}
if (lock.flags & _DRM_LOCK_QUIESCENT) {
/* Make hardware quiescent */
DRM_DEBUG( "not quiescent!\n" );
#if 0
r128_quiescent(dev);
#endif

View file

@ -24,75 +24,132 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com>
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#ifndef _R128_DRV_H_
#define _R128_DRV_H_
#ifndef __R128_DRV_H__
#define __R128_DRV_H__
typedef struct drm_r128_freelist {
unsigned int age;
drm_buf_t *buf;
struct drm_r128_freelist *next;
struct drm_r128_freelist *prev;
} drm_r128_freelist_t;
typedef struct drm_r128_ring_buffer {
u32 *start;
u32 *end;
int size;
int size_l2qw;
volatile u32 *head;
u32 tail;
u32 tail_mask;
int space;
} drm_r128_ring_buffer_t;
typedef struct drm_r128_private {
int is_pci;
int cce_mode;
int cce_fifo_size;
int cce_is_bm_mode;
int cce_secure;
drm_r128_ring_buffer_t ring;
drm_r128_sarea_t *sarea_priv;
__volatile__ u32 *ring_read_ptr;
int cce_mode;
int cce_fifo_size;
int cce_secure;
int cce_running;
u32 *ring_start;
u32 *ring_end;
int ring_size;
int ring_sizel2qw;
int ring_entries;
drm_r128_freelist_t *head;
drm_r128_freelist_t *tail;
int submit_age;
int usec_timeout;
int is_pci;
int usec_timeout;
atomic_t idle_count;
drm_map_t *sarea;
drm_map_t *fb;
drm_map_t *agp_ring;
drm_map_t *agp_read_ptr;
drm_map_t *agp_vertbufs;
drm_map_t *agp_indbufs;
drm_map_t *agp_textures;
drm_map_t *mmio;
unsigned int fb_bpp;
unsigned int front_offset;
unsigned int front_pitch;
unsigned int front_x;
unsigned int front_y;
unsigned int back_offset;
unsigned int back_pitch;
unsigned int back_x;
unsigned int back_y;
unsigned int depth_bpp;
unsigned int depth_offset;
unsigned int depth_pitch;
unsigned int depth_x;
unsigned int depth_y;
drm_map_t *sarea;
drm_map_t *fb;
drm_map_t *mmio;
drm_map_t *cce_ring;
drm_map_t *ring_rptr;
drm_map_t *buffers;
drm_map_t *agp_textures;
} drm_r128_private_t;
typedef struct drm_r128_buf_priv {
u32 age;
u32 age;
int prim;
int discard;
int dispatched;
drm_r128_freelist_t *list_entry;
} drm_r128_buf_priv_t;
/* r128_drv.c */
extern int r128_version(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_open(struct inode *inode, struct file *filp);
extern int r128_release(struct inode *inode, struct file *filp);
extern int r128_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_version( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_open( struct inode *inode, struct file *filp );
extern int r128_release( struct inode *inode, struct file *filp );
extern int r128_ioctl( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_lock( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_unlock( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
/* r128_dma.c */
extern int r128_init_cce(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_eng_reset(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_eng_flush(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_submit_pkt(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_cce_idle(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int r128_vertex_buf(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* r128_cce.c */
extern int r128_cce_init( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_cce_start( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_cce_stop( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_cce_reset( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_cce_idle( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_engine_reset( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_cce_packet( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_cce_buffers( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern void r128_freelist_reset( drm_device_t *dev );
extern drm_buf_t *r128_freelist_get( drm_device_t *dev );
extern int r128_wait_ring( drm_r128_private_t *dev_priv, int n );
extern void r128_update_ring_snapshot( drm_r128_private_t *dev_priv );
/* r128_state.c */
extern int r128_cce_clear( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_cce_swap( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_cce_vertex( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_cce_indices( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int r128_cce_blit( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
/* r128_bufs.c */
extern int r128_addbufs(struct inode *inode, struct file *filp,
@ -124,78 +181,203 @@ extern int r128_context_switch_complete(drm_device_t *dev, int new);
* for Rage 128 kernel driver.
*/
#define R128_PC_NGUI_CTLSTAT 0x0184
# define R128_PC_FLUSH_ALL 0x00ff
# define R128_PC_BUSY (1 << 31)
#define R128_AUX_SC_CNTL 0x1660
# define R128_AUX1_SC_EN (1 << 0)
# define R128_AUX1_SC_MODE_OR (0 << 1)
# define R128_AUX1_SC_MODE_NAND (1 << 1)
# define R128_AUX2_SC_EN (1 << 2)
# define R128_AUX2_SC_MODE_OR (0 << 3)
# define R128_AUX2_SC_MODE_NAND (1 << 3)
# define R128_AUX3_SC_EN (1 << 4)
# define R128_AUX3_SC_MODE_OR (0 << 5)
# define R128_AUX3_SC_MODE_NAND (1 << 5)
#define R128_AUX1_SC_LEFT 0x1664
#define R128_AUX1_SC_RIGHT 0x1668
#define R128_AUX1_SC_TOP 0x166c
#define R128_AUX1_SC_BOTTOM 0x1670
#define R128_AUX2_SC_LEFT 0x1674
#define R128_AUX2_SC_RIGHT 0x1678
#define R128_AUX2_SC_TOP 0x167c
#define R128_AUX2_SC_BOTTOM 0x1680
#define R128_AUX3_SC_LEFT 0x1684
#define R128_AUX3_SC_RIGHT 0x1688
#define R128_AUX3_SC_TOP 0x168c
#define R128_AUX3_SC_BOTTOM 0x1690
#define R128_CLOCK_CNTL_INDEX 0x0008
#define R128_CLOCK_CNTL_DATA 0x000c
# define R128_PLL_WR_EN (1 << 7)
#define R128_BUS_CNTL 0x0030
# define R128_BUS_MASTER_DIS (1 << 6)
#define R128_MCLK_CNTL 0x000f
# define R128_FORCE_GCP (1 << 16)
# define R128_FORCE_PIPE3D_CP (1 << 17)
# define R128_FORCE_RCP (1 << 18)
#define R128_CLOCK_CNTL_INDEX 0x0008
#define R128_CLOCK_CNTL_DATA 0x000c
# define R128_PLL_WR_EN (1 << 7)
#define R128_GEN_RESET_CNTL 0x00f0
# define R128_SOFT_RESET_GUI (1 << 0)
#define R128_CONSTANT_COLOR_C 0x1d34
#define R128_PM4_BUFFER_CNTL 0x0704
# define R128_PM4_NONPM4 (0 << 28)
# define R128_PM4_192PIO (1 << 28)
# define R128_PM4_192BM (2 << 28)
# define R128_PM4_128PIO_64INDBM (3 << 28)
# define R128_PM4_128BM_64INDBM (4 << 28)
# define R128_PM4_64PIO_128INDBM (5 << 28)
# define R128_PM4_64BM_128INDBM (6 << 28)
# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28)
# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28)
# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28)
#define R128_DP_GUI_MASTER_CNTL 0x146c
# define R128_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
# define R128_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
# define R128_GMC_BRUSH_SOLID_COLOR (13 << 4)
# define R128_GMC_BRUSH_NONE (15 << 4)
# define R128_GMC_DST_16BPP (4 << 8)
# define R128_GMC_DST_24BPP (5 << 8)
# define R128_GMC_DST_32BPP (6 << 8)
# define R128_GMC_DST_DATATYPE_SHIFT 8
# define R128_GMC_SRC_DATATYPE_COLOR (3 << 12)
# define R128_DP_SRC_SOURCE_MEMORY (2 << 24)
# define R128_DP_SRC_SOURCE_HOST_DATA (3 << 24)
# define R128_GMC_CLR_CMP_CNTL_DIS (1 << 28)
# define R128_GMC_AUX_CLIP_DIS (1 << 29)
# define R128_GMC_WR_MSK_DIS (1 << 30)
# define R128_ROP3_S 0x00cc0000
# define R128_ROP3_P 0x00f00000
#define R128_DP_WRITE_MASK 0x16cc
#define R128_DST_PITCH_OFFSET_C 0x1c80
#define R128_GEN_RESET_CNTL 0x00f0
# define R128_SOFT_RESET_GUI (1 << 0)
#define R128_GUI_SCRATCH_REG0 0x15e0
#define R128_GUI_SCRATCH_REG1 0x15e4
#define R128_GUI_SCRATCH_REG2 0x15e8
#define R128_GUI_SCRATCH_REG3 0x15ec
#define R128_GUI_SCRATCH_REG4 0x15f0
#define R128_GUI_SCRATCH_REG5 0x15f4
#define R128_GUI_STAT 0x1740
# define R128_GUI_FIFOCNT_MASK 0x0fff
# define R128_GUI_ACTIVE (1 << 31)
#define R128_MCLK_CNTL 0x000f
# define R128_FORCE_GCP (1 << 16)
# define R128_FORCE_PIPE3D_CP (1 << 17)
# define R128_FORCE_RCP (1 << 18)
#define R128_PC_GUI_CTLSTAT 0x1748
#define R128_PC_NGUI_CTLSTAT 0x0184
# define R128_PC_FLUSH_GUI (3 << 0)
# define R128_PC_RI_GUI (1 << 2)
# define R128_PC_FLUSH_ALL 0x00ff
# define R128_PC_BUSY (1 << 31)
#define R128_PRIM_TEX_CNTL_C 0x1cb0
#define R128_SCALE_3D_CNTL 0x1a00
#define R128_SEC_TEX_CNTL_C 0x1d00
#define R128_SEC_TEXTURE_BORDER_COLOR_C 0x1d3c
#define R128_SETUP_CNTL 0x1bc4
#define R128_STEN_REF_MASK_C 0x1d40
#define R128_TEX_CNTL_C 0x1c9c
# define R128_TEX_CACHE_FLUSH (1 << 23)
#define R128_WINDOW_XY_OFFSET 0x1bcc
#define R128_PM4_BUFFER_DL_RPTR 0x0710
#define R128_PM4_BUFFER_DL_WPTR 0x0714
# define R128_PM4_BUFFER_DL_DONE (1 << 31)
/* CCE registers
*/
#define R128_PM4_BUFFER_OFFSET 0x0700
#define R128_PM4_BUFFER_CNTL 0x0704
# define R128_PM4_MASK (15 << 28)
# define R128_PM4_NONPM4 (0 << 28)
# define R128_PM4_192PIO (1 << 28)
# define R128_PM4_192BM (2 << 28)
# define R128_PM4_128PIO_64INDBM (3 << 28)
# define R128_PM4_128BM_64INDBM (4 << 28)
# define R128_PM4_64PIO_128INDBM (5 << 28)
# define R128_PM4_64BM_128INDBM (6 << 28)
# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28)
# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28)
# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28)
#define R128_PM4_VC_FPU_SETUP 0x071c
#define R128_PM4_BUFFER_WM_CNTL 0x0708
# define R128_WMA_SHIFT 0
# define R128_WMB_SHIFT 8
# define R128_WMC_SHIFT 16
# define R128_WB_WM_SHIFT 24
#define R128_PM4_STAT 0x07b8
# define R128_PM4_FIFOCNT_MASK 0x0fff
# define R128_PM4_BUSY (1 << 16)
# define R128_PM4_GUI_ACTIVE (1 << 31)
#define R128_PM4_BUFFER_DL_RPTR_ADDR 0x070c
#define R128_PM4_BUFFER_DL_RPTR 0x0710
#define R128_PM4_BUFFER_DL_WPTR 0x0714
# define R128_PM4_BUFFER_DL_DONE (1 << 31)
#define R128_PM4_BUFFER_ADDR 0x07f0
#define R128_PM4_MICRO_CNTL 0x07fc
# define R128_PM4_MICRO_FREERUN (1 << 30)
#define R128_PM4_VC_FPU_SETUP 0x071c
#define R128_PM4_FIFO_DATA_EVEN 0x1000
#define R128_PM4_FIFO_DATA_ODD 0x1004
#define R128_PM4_IW_INDOFF 0x0738
#define R128_PM4_IW_INDSIZE 0x073c
#define R128_GUI_SCRATCH_REG0 0x15e0
#define R128_GUI_SCRATCH_REG1 0x15e4
#define R128_GUI_SCRATCH_REG2 0x15e8
#define R128_GUI_SCRATCH_REG3 0x15ec
#define R128_GUI_SCRATCH_REG4 0x15f0
#define R128_GUI_SCRATCH_REG5 0x15f4
#define R128_PM4_STAT 0x07b8
# define R128_PM4_FIFOCNT_MASK 0x0fff
# define R128_PM4_BUSY (1 << 16)
# define R128_PM4_GUI_ACTIVE (1 << 31)
#define R128_GUI_STAT 0x1740
# define R128_GUI_FIFOCNT_MASK 0x0fff
# define R128_GUI_ACTIVE (1 << 31)
#define R128_PM4_MICROCODE_ADDR 0x07d4
#define R128_PM4_MICROCODE_RADDR 0x07d8
#define R128_PM4_MICROCODE_DATAH 0x07dc
#define R128_PM4_MICROCODE_DATAL 0x07e0
#define R128_PM4_BUFFER_ADDR 0x07f0
#define R128_PM4_MICRO_CNTL 0x07fc
# define R128_PM4_MICRO_FREERUN (1 << 30)
#define R128_PM4_FIFO_DATA_EVEN 0x1000
#define R128_PM4_FIFO_DATA_ODD 0x1004
/* CCE command packets */
#define R128_CCE_PACKET0 0x00000000
#define R128_CCE_PACKET1 0x40000000
#define R128_CCE_PACKET2 0x80000000
# define R128_CCE_PACKET_MASK 0xC0000000
# define R128_CCE_PACKET_COUNT_MASK 0x3fff0000
# define R128_CCE_PACKET0_REG_MASK 0x000007ff
# define R128_CCE_PACKET1_REG0_MASK 0x000007ff
# define R128_CCE_PACKET1_REG1_MASK 0x003ff800
/* CCE command packets
*/
#define R128_CCE_PACKET0 0x00000000
#define R128_CCE_PACKET1 0x40000000
#define R128_CCE_PACKET2 0x80000000
#define R128_CCE_PACKET3 0xC0000000
# define R128_CNTL_HOSTDATA_BLT 0x00009400
# define R128_CNTL_PAINT_MULTI 0x00009A00
# define R128_CNTL_BITBLT_MULTI 0x00009B00
# define R128_3D_RNDR_GEN_INDX_PRIM 0x00002300
#define R128_CCE_PACKET_MASK 0xC0000000
#define R128_CCE_PACKET_COUNT_MASK 0x3fff0000
#define R128_CCE_PACKET0_REG_MASK 0x000007ff
#define R128_CCE_PACKET1_REG0_MASK 0x000007ff
#define R128_CCE_PACKET1_REG1_MASK 0x003ff800
#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE 0x00000000
#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT 0x00000001
#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE 0x00000002
#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE 0x00000003
#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004
#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005
#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006
#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 0x00000007
#define R128_CCE_VC_CNTL_PRIM_WALK_IND 0x00000010
#define R128_CCE_VC_CNTL_PRIM_WALK_LIST 0x00000020
#define R128_CCE_VC_CNTL_PRIM_WALK_RING 0x00000030
#define R128_CCE_VC_CNTL_NUM_SHIFT 16
#define R128_DATATYPE_CI8 2
#define R128_DATATYPE_ARGB1555 3
#define R128_DATATYPE_RGB565 4
#define R128_DATATYPE_RGB888 5
#define R128_DATATYPE_ARGB8888 6
#define R128_DATATYPE_RGB332 7
#define R128_DATATYPE_RGB8 9
#define R128_DATATYPE_ARGB4444 15
/* Constants */
#define R128_AGP_OFFSET 0x02000000
#define R128_WATERMARK_L 16
#define R128_WATERMARK_M 8
#define R128_WATERMARK_N 8
#define R128_WATERMARK_K 128
#define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define R128_LAST_FRAME_REG R128_GUI_SCRATCH_REG0
#define R128_LAST_DISPATCH_REG R128_GUI_SCRATCH_REG1
#define R128_MAX_VB_AGE 0xffffffff
#define R128_MAX_VB_VERTS (0xffff)
#define R128_BASE(reg) ((unsigned long)(dev_priv->mmio->handle))
#define R128_ADDR(reg) (R128_BASE(reg) + reg)
@ -221,4 +403,58 @@ extern int R128_READ_PLL(drm_device_t *dev, int addr);
#define R128CCE2(p) ((p))
#define R128CCE3(p,n) ((p) | ((n) << 16))
#endif
#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \
((n) << 16) | ((reg) >> 2))
#define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \
(((reg1) >> 2) << 11) | ((reg0) >> 2))
#define CCE_PACKET2() (R128_CCE_PACKET2)
#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \
(pkt) | ((n) << 16))
#define r128_flush_write_combine() mb()
#define R128_VERBOSE 0
#define RING_LOCALS int write; unsigned int mask; volatile u32 *ring;
#define BEGIN_RING( n ) do { \
if ( R128_VERBOSE ) { \
DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
n, __FUNCTION__ ); \
} \
if ( dev_priv->ring.space < n * sizeof(u32) ) { \
r128_wait_ring( dev_priv, n * sizeof(u32) ); \
} \
dev_priv->ring.space -= n * sizeof(u32); \
ring = dev_priv->ring.start; \
write = dev_priv->ring.tail; \
mask = dev_priv->ring.tail_mask; \
} while (0)
#define ADVANCE_RING() do { \
if ( R128_VERBOSE ) { \
DRM_INFO( "ADVANCE_RING() tail=0x%06x wr=0x%06x\n", \
write, dev_priv->ring.tail ); \
} \
r128_flush_write_combine(); \
dev_priv->ring.tail = write; \
R128_WRITE( R128_PM4_BUFFER_DL_WPTR, write ); \
} while (0)
#define OUT_RING( x ) do { \
if ( R128_VERBOSE ) { \
DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
(unsigned int)(x), write ); \
} \
ring[write++] = x; \
write &= mask; \
} while (0)
#define R128_PERFORMANCE_BOXES 0
#endif /* __R128_DRV_H__ */

1295
linux/r128_state.c Normal file

File diff suppressed because it is too large Load diff

298
linux/radeon_bufs.c Normal file
View file

@ -0,0 +1,298 @@
/* radeon_bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Kevin E. Martin <martin@valinux.com>
* Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*/
#define __NO_VERSION__
#include <linux/config.h>
#include "drmP.h"
#include "radeon_drv.h"
#include "linux/un.h"
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
int radeon_addbufs_agp(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
if (!dma) return -EINVAL;
if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
return -EFAULT;
count = request.count;
order = drm_order(request.size);
size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = dev->agp->base + request.agp_start;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %ld\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL;
if (dev->queue_count) return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
for (offset = 0;
entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->address = (void *)(agp_offset + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
buf->pid = 0;
buf->dev_priv_size = sizeof(drm_radeon_buf_priv_t);
buf->dev_private = drm_alloc(sizeof(drm_radeon_buf_priv_t),
DRM_MEM_BUFS);
memset(buf->dev_private, 0, buf->dev_priv_size);
#if DRM_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
byte_count += PAGE_SIZE << page_order;
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
DRM_DEBUG("byte_count: %d\n", byte_count);
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
up(&dev->struct_sem);
request.count = entry->buf_count;
request.size = size;
if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request)))
return -EFAULT;
dma->flags = _DRM_DMA_USE_AGP;
atomic_dec(&dev->buf_alloc);
return 0;
}
#endif
int radeon_addbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_buf_desc_t request;
if (!dev_priv || dev_priv->is_pci) return -EINVAL;
if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request)))
return -EFAULT;
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
if (request.flags & _DRM_AGP_BUFFER)
return radeon_addbufs_agp(inode, filp, cmd, arg);
else
#endif
return -EINVAL;
}
int radeon_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
unsigned long address;
drm_buf_map_t request;
int i;
if (!dma || !dev_priv || dev_priv->is_pci) return -EINVAL;
DRM_DEBUG("\n");
spin_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
if (copy_from_user(&request, (drm_buf_map_t *)arg, sizeof(request)))
return -EFAULT;
if (request.count >= dma->buf_count) {
if (dma->flags & _DRM_DMA_USE_AGP) {
drm_map_t *map;
map = dev_priv->buffers;
if (!map) {
retcode = -EINVAL;
goto done;
}
down(&current->mm->mmap_sem);
virtual = do_mmap(filp, 0, map->size,
PROT_READ|PROT_WRITE,
MAP_SHARED,
(unsigned long)map->offset);
up(&current->mm->mmap_sem);
} else {
down(&current->mm->mmap_sem);
virtual = do_mmap(filp, 0, dma->byte_count,
PROT_READ|PROT_WRITE, MAP_SHARED, 0);
up(&current->mm->mmap_sem);
}
if (virtual > -1024UL) {
/* Real error */
retcode = (signed long)virtual;
goto done;
}
request.virtual = (void *)virtual;
for (i = 0; i < dma->buf_count; i++) {
if (copy_to_user(&request.list[i].idx,
&dma->buflist[i]->idx,
sizeof(request.list[0].idx))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].total,
&dma->buflist[i]->total,
sizeof(request.list[0].total))) {
retcode = -EFAULT;
goto done;
}
if (copy_to_user(&request.list[i].used,
&zero,
sizeof(zero))) {
retcode = -EFAULT;
goto done;
}
address = virtual + dma->buflist[i]->offset;
if (copy_to_user(&request.list[i].address,
&address,
sizeof(address))) {
retcode = -EFAULT;
goto done;
}
}
}
done:
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
if (copy_to_user((drm_buf_map_t *)arg, &request, sizeof(request)))
return -EFAULT;
return retcode;
}

215
linux/radeon_context.c Normal file
View file

@ -0,0 +1,215 @@
/* radeon_context.c -- IOCTLs for Radeon contexts -*- linux-c -*-
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Kevin E. Martin <martin@valinux.com>
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "radeon_drv.h"
extern drm_ctx_t radeon_res_ctx;
static int radeon_alloc_queue(drm_device_t *dev)
{
return drm_ctxbitmap_next(dev);
}
int radeon_context_switch(drm_device_t *dev, int old, int new)
{
char buf[64];
atomic_inc(&dev->total_ctx);
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
#if DRM_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
if (drm_flags & DRM_FLAG_NOCTX) {
radeon_context_switch_complete(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
drm_write_string(dev, buf);
}
return 0;
}
int radeon_context_switch_complete(drm_device_t *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
- dev->ctx_start)]);
#endif
clear_bit(0, &dev->context_flag);
wake_up(&dev->context_wait);
return 0;
}
int radeon_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (copy_to_user(&res.contexts[i], &i, sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
return -EFAULT;
return 0;
}
int radeon_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = radeon_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = radeon_alloc_queue(dev);
}
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle == -1) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return -ENOMEM;
}
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int radeon_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
if (ctx.flags==_DRM_CONTEXT_PRESERVED)
radeon_res_ctx.handle=ctx.handle;
return 0;
}
int radeon_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t*)arg, sizeof(ctx)))
return -EFAULT;
/* This is 0, because we don't hanlde any context flags */
ctx.flags = 0;
if (copy_to_user((drm_ctx_t*)arg, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int radeon_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return radeon_context_switch(dev, dev->last_context, ctx.handle);
}
int radeon_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
radeon_context_switch_complete(dev, ctx.handle);
return 0;
}
int radeon_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
drm_ctxbitmap_free(dev, ctx.handle);
return 0;
}

1444
linux/radeon_cp.c Normal file

File diff suppressed because it is too large Load diff

264
linux/radeon_drm.h Normal file
View file

@ -0,0 +1,264 @@
/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#ifndef _RADEON_DRM_H_
#define _RADEON_DRM_H_
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
*/
#ifndef __RADEON_SAREA_DEFINES__
#define __RADEON_SAREA_DEFINES__
/* What needs to be changed for the current vertex buffer?
*/
#define RADEON_UPLOAD_CONTEXT 0x001
#define RADEON_UPLOAD_SETUP 0x002
#define RADEON_UPLOAD_TEX0 0x004
#define RADEON_UPLOAD_TEX1 0x008
#define RADEON_UPLOAD_TEX0IMAGES 0x010
#define RADEON_UPLOAD_TEX1IMAGES 0x020
#define RADEON_UPLOAD_CORE 0x040
#define RADEON_UPLOAD_MASKS 0x080
#define RADEON_UPLOAD_WINDOW 0x100
#define RADEON_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
#define RADEON_REQUIRE_QUIESCENCE 0x400
#define RADEON_UPLOAD_ALL 0x7ff
#define RADEON_FRONT 0x1
#define RADEON_BACK 0x2
#define RADEON_DEPTH 0x4
/* Primitive types
*/
#define RADEON_POINTS 0x1
#define RADEON_LINES 0x2
#define RADEON_LINE_STRIP 0x3
#define RADEON_TRIANGLES 0x4
#define RADEON_TRIANGLE_FAN 0x5
#define RADEON_TRIANGLE_STRIP 0x6
/* Vertex/indirect buffer size
*/
#if 1
#define RADEON_BUFFER_SIZE 16384
#else
#define RADEON_BUFFER_SIZE (128 * 1024)
#endif
/* Byte offsets for indirect buffer data
*/
#define RADEON_INDEX_PRIM_OFFSET 20
#define RADEON_HOSTDATA_BLIT_OFFSET 32
/* 2048x2048 @ 32bpp texture requires this many indirect buffers
*/
#define RADEON_MAX_BLIT_BUFFERS ((2048 * 2048 * 4)/RADEON_BUFFER_SIZE)
/* Keep these small for testing.
*/
#define RADEON_NR_SAREA_CLIPRECTS 12
/* There are 2 heaps (local/AGP). Each region within a heap is a
* minimum of 64k, and there are at most 64 of them per heap.
*/
#define RADEON_LOCAL_TEX_HEAP 0
#define RADEON_AGP_TEX_HEAP 1
#define RADEON_NR_TEX_HEAPS 2
#define RADEON_NR_TEX_REGIONS 16
#define RADEON_LOG_TEX_GRANULARITY 16
#define RADEON_NR_CONTEXT_REGS 12
#define RADEON_TEX_MAXLEVELS 11
#endif /* __RADEON_SAREA_DEFINES__ */
typedef struct {
/* Context state - can be written in one large chunk */
unsigned int dst_pitch_offset_c;
unsigned int dp_gui_master_cntl_c;
unsigned int sc_top_left_c;
unsigned int sc_bottom_right_c;
unsigned int z_offset_c;
unsigned int z_pitch_c;
unsigned int z_sten_cntl_c;
unsigned int tex_cntl_c;
unsigned int misc_3d_state_cntl_reg;
unsigned int texture_clr_cmp_clr_c;
unsigned int texture_clr_cmp_msk_c;
unsigned int fog_color_c;
/* Texture state */
unsigned int tex_size_pitch_c;
unsigned int constant_color_c;
/* Setup state */
unsigned int pm4_vc_fpu_setup;
unsigned int setup_cntl;
/* Mask state */
unsigned int dp_write_mask;
unsigned int sten_ref_mask_c;
unsigned int plane_3d_mask_c;
/* Window state */
unsigned int window_xy_offset;
/* Core state */
unsigned int scale_3d_cntl;
} drm_radeon_context_regs_t;
/* Setup registers for each texture unit */
typedef struct {
unsigned int tex_cntl;
unsigned int tex_combine_cntl;
unsigned int tex_size_pitch;
unsigned int tex_offset[RADEON_TEX_MAXLEVELS];
unsigned int tex_border_color;
} drm_radeon_texture_regs_t;
typedef struct drm_radeon_tex_region {
unsigned char next, prev;
unsigned char in_use;
int age;
} drm_radeon_tex_region_t;
typedef struct drm_radeon_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex buffer.
*/
drm_radeon_context_regs_t context_state;
drm_radeon_texture_regs_t tex_state[RADEON_NR_TEX_HEAPS];
unsigned int dirty;
unsigned int vertsize;
unsigned int vc_format;
/* The current cliprects, or a subset thereof.
*/
drm_clip_rect_t boxes[RADEON_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
*/
unsigned int last_frame;
unsigned int last_dispatch;
drm_radeon_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS+1];
int tex_age[RADEON_NR_TEX_HEAPS];
int ctx_owner;
} drm_radeon_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmRADEON.h)
*/
typedef struct drm_radeon_init {
enum {
RADEON_INIT_CP = 0x01,
RADEON_CLEANUP_CP = 0x02
} func;
int sarea_priv_offset;
int is_pci;
int cp_mode;
int cp_secure;
int agp_size;
int ring_size;
int usec_timeout;
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int front_x, front_y;
unsigned int back_offset, back_pitch;
unsigned int back_x, back_y;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
unsigned int depth_x, depth_y;
unsigned int fb_offset;
unsigned int mmio_offset;
unsigned int ring_offset;
unsigned int ring_rptr_offset;
unsigned int buffers_offset;
unsigned int agp_textures_offset;
} drm_radeon_init_t;
typedef struct drm_radeon_cp_stop {
int flush;
int idle;
} drm_radeon_cp_stop_t;
typedef struct drm_radeon_clear {
unsigned int flags;
int x, y, w, h;
unsigned int clear_color;
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask;
} drm_radeon_clear_t;
typedef struct drm_radeon_vertex {
int prim;
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
} drm_radeon_vertex_t;
typedef struct drm_radeon_indices {
int prim;
int idx;
int start;
int end;
int discard; /* Client finished with buffer? */
} drm_radeon_indices_t;
typedef struct drm_radeon_blit_rect {
int index;
unsigned short x, y;
unsigned short width, height;
int padding;
} drm_radeon_blit_rect_t;
typedef struct drm_radeon_blit {
int pitch;
int offset;
int format;
drm_radeon_blit_rect_t *rects;
int count;
} drm_radeon_blit_t;
typedef struct drm_radeon_packet {
unsigned int *buffer;
int count;
int flags;
} drm_radeon_packet_t;
#endif

719
linux/radeon_drv.c Normal file
View file

@ -0,0 +1,719 @@
/* radeon_drv.c -- ATI Radeon driver -*- linux-c -*-
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Kevin E. Martin <martin@valinux.com>
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#include <linux/config.h>
#include "drmP.h"
#include "radeon_drv.h"
#define RADEON_NAME "radeon"
#define RADEON_DESC "ATI Radeon"
#define RADEON_DATE "20001108"
#define RADEON_MAJOR 1
#define RADEON_MINOR 0
#define RADEON_PATCHLEVEL 0
static drm_device_t radeon_device;
drm_ctx_t radeon_res_ctx;
static struct file_operations radeon_fops = {
#if LINUX_VERSION_CODE >= 0x020400
/* This started being used during 2.4.0-test */
owner: THIS_MODULE,
#endif
open: radeon_open,
flush: drm_flush,
release: radeon_release,
ioctl: radeon_ioctl,
mmap: drm_mmap,
read: drm_read,
fasync: drm_fasync,
poll: drm_poll,
};
static struct miscdevice radeon_misc = {
minor: MISC_DYNAMIC_MINOR,
name: RADEON_NAME,
fops: &radeon_fops,
};
static drm_ioctl_desc_t radeon_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { radeon_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { radeon_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { radeon_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { radeon_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { radeon_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { radeon_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { radeon_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { radeon_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { radeon_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { radeon_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { radeon_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { radeon_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
#endif
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)] = { radeon_cp_init, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)] = { radeon_cp_stop, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)] = { radeon_cp_idle, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_SWAP)] = { radeon_cp_swap, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_CLEAR)] = { radeon_cp_clear, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_VERTEX)] = { radeon_cp_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INDICES)] = { radeon_cp_indices, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_BLIT)] = { radeon_cp_blit, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_PACKET)] = { radeon_cp_packet, 1, 0 },
};
#define RADEON_IOCTL_COUNT DRM_ARRAY_SIZE(radeon_ioctls)
#ifdef MODULE
static char *radeon = NULL;
#endif
MODULE_AUTHOR("VA Linux Systems, Inc.");
MODULE_DESCRIPTION("radeon");
MODULE_PARM(radeon, "s");
#ifndef MODULE
/* radeon_options is called by the kernel to parse command-line options
* passed via the boot-loader (e.g., LILO). It calls the insmod option
* routine, drm_parse_drm.
*/
static int __init radeon_options(char *str)
{
drm_parse_options(str);
return 1;
}
__setup("radeon=", radeon_options);
#endif
static int radeon_setup(drm_device_t *dev)
{
int i;
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
drm_dma_setup(dev);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
init_timer(&dev->timer);
init_waitqueue_head(&dev->context_wait);
dev->ctx_start = 0;
dev->lck_start = 0;
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
dev->buf_async = NULL;
init_waitqueue_head(&dev->buf_readers);
init_waitqueue_head(&dev->buf_writers);
radeon_res_ctx.handle = -1;
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int radeon_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
down(&dev->struct_sem);
del_timer(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
/* Clear AGP information */
if (dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
/* Remove AGP resources, but leave dev->agp
intact until radeon_cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
if (entry->bound) drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
if (dev->agp->acquired && drm_agp.release)
(*drm_agp.release)();
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
#endif
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
case _DRM_AGP:
/* Do nothing here, because this is all
handled in the AGP/GART driver. */
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
drm_dma_takedown(dev);
dev->queue_count = 0;
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wake_up_interruptible(&dev->lock.lock_queue);
}
up(&dev->struct_sem);
return 0;
}
/* radeon_init is called via init_module at module load time, or via
* linux/init/main.c (this is not currently supported). */
static int radeon_init(void)
{
int retcode;
drm_device_t *dev = &radeon_device;
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
dev->count_lock = SPIN_LOCK_UNLOCKED;
sema_init(&dev->struct_sem, 1);
#ifdef MODULE
drm_parse_options(radeon);
#endif
if ((retcode = misc_register(&radeon_misc))) {
DRM_ERROR("Cannot register \"%s\"\n", RADEON_NAME);
return retcode;
}
dev->device = MKDEV(MISC_MAJOR, radeon_misc.minor);
dev->name = RADEON_NAME;
drm_mem_init();
drm_proc_init(dev);
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
dev->agp = drm_agp_init();
if (dev->agp == NULL) {
DRM_ERROR("Cannot initialize agpgart module.\n");
drm_proc_cleanup();
misc_deregister(&radeon_misc);
radeon_takedown(dev);
return -ENOMEM;
}
#ifdef CONFIG_MTRR
dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size*1024*1024,
MTRR_TYPE_WRCOMB,
1);
#endif
#endif
if((retcode = drm_ctxbitmap_init(dev))) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
drm_proc_cleanup();
misc_deregister(&radeon_misc);
radeon_takedown(dev);
return retcode;
}
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
RADEON_NAME,
RADEON_MAJOR,
RADEON_MINOR,
RADEON_PATCHLEVEL,
RADEON_DATE,
radeon_misc.minor);
return 0;
}
/* radeon_cleanup is called via cleanup_module at module unload time. */
static void radeon_cleanup(void)
{
drm_device_t *dev = &radeon_device;
DRM_DEBUG("\n");
drm_proc_cleanup();
if (misc_deregister(&radeon_misc)) {
DRM_ERROR("Cannot unload module\n");
} else {
DRM_INFO("Module unloaded\n");
}
drm_ctxbitmap_cleanup(dev);
radeon_takedown(dev);
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
if (dev->agp) {
drm_agp_uninit();
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
#endif
}
module_init(radeon_init);
module_exit(radeon_cleanup);
int radeon_version(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_version_t version;
int len;
if (copy_from_user(&version,
(drm_version_t *)arg,
sizeof(version)))
return -EFAULT;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
if (copy_to_user(name, value, len)) \
return -EFAULT; \
}
version.version_major = RADEON_MAJOR;
version.version_minor = RADEON_MINOR;
version.version_patchlevel = RADEON_PATCHLEVEL;
DRM_COPY(version.name, RADEON_NAME);
DRM_COPY(version.date, RADEON_DATE);
DRM_COPY(version.desc, RADEON_DESC);
if (copy_to_user((drm_version_t *)arg,
&version,
sizeof(version)))
return -EFAULT;
return 0;
}
int radeon_open(struct inode *inode, struct file *filp)
{
drm_device_t *dev = &radeon_device;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_open_helper(inode, filp, dev))) {
#if LINUX_VERSION_CODE < 0x020333
MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
atomic_inc(&dev->total_open);
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return radeon_setup(dev);
}
spin_unlock(&dev->count_lock);
}
return retcode;
}
int radeon_release(struct inode *inode, struct file *filp)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev;
int retcode = 0;
lock_kernel();
dev = priv->dev;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (!(retcode = drm_release(inode, filp))) {
#if LINUX_VERSION_CODE < 0x020333
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
#endif
atomic_inc(&dev->total_close);
spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
spin_unlock(&dev->count_lock);
unlock_kernel();
return -EBUSY;
}
spin_unlock(&dev->count_lock);
unlock_kernel();
return radeon_takedown(dev);
}
spin_unlock(&dev->count_lock);
}
unlock_kernel();
return retcode;
}
/* radeon_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int radeon_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
int nr = DRM_IOCTL_NR(cmd);
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
drm_ioctl_t *func;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
current->pid, cmd, nr, dev->device, priv->authenticated);
if (nr >= RADEON_IOCTL_COUNT) {
retcode = -EINVAL;
} else {
ioctl = &radeon_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = -EACCES;
} else {
retcode = (func)(inode, filp, cmd, arg);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
int radeon_lock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_lock_t lock;
#if DRM_DMA_HISTOGRAM
cycles_t start;
dev->lck_start = start = get_cycles();
#endif
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, current->pid, dev->lock.hw_lock->lock,
lock.flags);
if (lock.context < 0 /* || lock.context >= dev->queue_count */)
return -EINVAL;
if (!ret) {
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
lock.context)) {
dev->lock.pid = current->pid;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
#if 1
current->policy |= SCHED_YIELD;
#endif
schedule();
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
#if 0
if (!ret && dev->last_context != lock.context &&
lock.context != radeon_res_ctx.handle &&
dev->last_context != radeon_res_ctx.handle) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != lock.context */
radeon_context_switch(dev, dev->last_context, lock.context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == lock.context
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
current->policy |= SCHED_YIELD;
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
ret = -EINTR;
} else if (dev->last_context != lock.context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context, lock.context);
}
}
#endif
if (!ret) {
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
sigaddset(&dev->sigmask, SIGTTIN);
sigaddset(&dev->sigmask, SIGTTOU);
dev->sigdata.context = lock.context;
dev->sigdata.lock = dev->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
if (lock.flags & _DRM_LOCK_READY) {
/* Wait for space in DMA/FIFO */
}
if (lock.flags & _DRM_LOCK_QUIESCENT) {
/* Make hardware quiescent */
DRM_DEBUG("not quiescent!\n");
#if 0
radeon_quiescent(dev);
#endif
}
}
#if LINUX_VERSION_CODE < 0x020400
if (lock.context != radeon_res_ctx.handle) {
current->counter = 5;
current->priority = DEF_PRIORITY/4;
}
#endif
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
#if DRM_DMA_HISTOGRAM
atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
#endif
return ret;
}
int radeon_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_lock_t lock;
if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
return -EFAULT;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
current->pid, lock.context);
return -EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
/* FIXME: Try to send data to card here */
if (!dev->context_flag) {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
#if LINUX_VERSION_CODE < 0x020400
if (lock.context != radeon_res_ctx.handle) {
current->counter = 5;
current->priority = DEF_PRIORITY;
}
#endif
unblock_all_signals();
return 0;
}

461
linux/radeon_drv.h Normal file
View file

@ -0,0 +1,461 @@
/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*-
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#ifndef __RADEON_DRV_H__
#define __RADEON_DRV_H__
typedef struct drm_radeon_freelist {
unsigned int age;
drm_buf_t *buf;
struct drm_radeon_freelist *next;
struct drm_radeon_freelist *prev;
} drm_radeon_freelist_t;
typedef struct drm_radeon_ring_buffer {
u32 *start;
u32 *end;
int size;
int size_l2qw;
volatile u32 *head;
u32 tail;
u32 tail_mask;
int space;
} drm_radeon_ring_buffer_t;
typedef struct drm_radeon_private {
drm_radeon_ring_buffer_t ring;
drm_radeon_sarea_t *sarea_priv;
int agp_size;
int cp_mode;
int cp_secure;
int cp_running;
drm_radeon_freelist_t *head;
drm_radeon_freelist_t *tail;
int usec_timeout;
int is_pci;
atomic_t idle_count;
unsigned int fb_bpp;
unsigned int front_offset;
unsigned int front_pitch;
unsigned int front_x;
unsigned int front_y;
unsigned int back_offset;
unsigned int back_pitch;
unsigned int back_x;
unsigned int back_y;
unsigned int depth_bpp;
unsigned int depth_offset;
unsigned int depth_pitch;
unsigned int depth_x;
unsigned int depth_y;
drm_map_t *sarea;
drm_map_t *fb;
drm_map_t *mmio;
drm_map_t *cp_ring;
drm_map_t *ring_rptr;
drm_map_t *buffers;
drm_map_t *agp_textures;
} drm_radeon_private_t;
typedef struct drm_radeon_buf_priv {
u32 age;
int prim;
int discard;
int dispatched;
drm_radeon_freelist_t *list_entry;
} drm_radeon_buf_priv_t;
/* radeon_drv.c */
extern int radeon_version( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_open( struct inode *inode, struct file *filp );
extern int radeon_release( struct inode *inode, struct file *filp );
extern int radeon_ioctl( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_lock( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_unlock( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
/* radeon_cp.c */
extern int radeon_cp_init( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_start( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_stop( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_reset( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_idle( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_engine_reset( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_packet( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_buffers( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern void radeon_freelist_reset( drm_device_t *dev );
extern drm_buf_t *radeon_freelist_get( drm_device_t *dev );
extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n );
extern void radeon_update_ring_snapshot( drm_radeon_private_t *dev_priv );
/* radeon_state.c */
extern int radeon_cp_clear( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_swap( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_vertex( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_indices( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int radeon_cp_blit( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
/* radeon_bufs.c */
extern int radeon_addbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int radeon_mapbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
/* radeon_context.c */
extern int radeon_resctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int radeon_addctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int radeon_modctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int radeon_getctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int radeon_switchctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int radeon_newctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int radeon_rmctx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int radeon_context_switch(drm_device_t *dev, int old, int new);
extern int radeon_context_switch_complete(drm_device_t *dev, int new);
/* Register definitions, register access macros and drmAddMap constants
* for Radeon kernel driver.
*/
#if 0
#define RADEON_AUX_SC_CNTL 0x1660
# define RADEON_AUX1_SC_EN (1 << 0)
# define RADEON_AUX1_SC_MODE_OR (0 << 1)
# define RADEON_AUX1_SC_MODE_NAND (1 << 1)
# define RADEON_AUX2_SC_EN (1 << 2)
# define RADEON_AUX2_SC_MODE_OR (0 << 3)
# define RADEON_AUX2_SC_MODE_NAND (1 << 3)
# define RADEON_AUX3_SC_EN (1 << 4)
# define RADEON_AUX3_SC_MODE_OR (0 << 5)
# define RADEON_AUX3_SC_MODE_NAND (1 << 5)
#define RADEON_AUX1_SC_LEFT 0x1664
#define RADEON_AUX1_SC_RIGHT 0x1668
#define RADEON_AUX1_SC_TOP 0x166c
#define RADEON_AUX1_SC_BOTTOM 0x1670
#define RADEON_AUX2_SC_LEFT 0x1674
#define RADEON_AUX2_SC_RIGHT 0x1678
#define RADEON_AUX2_SC_TOP 0x167c
#define RADEON_AUX2_SC_BOTTOM 0x1680
#define RADEON_AUX3_SC_LEFT 0x1684
#define RADEON_AUX3_SC_RIGHT 0x1688
#define RADEON_AUX3_SC_TOP 0x168c
#define RADEON_AUX3_SC_BOTTOM 0x1690
#endif
#define RADEON_BUS_CNTL 0x0030
# define RADEON_BUS_MASTER_DIS (1 << 6)
#define RADEON_CLOCK_CNTL_DATA 0x000c
# define RADEON_PLL_WR_EN (1 << 7)
#define RADEON_CLOCK_CNTL_INDEX 0x0008
#define RADEON_CONFIG_APER_SIZE 0x0108
#if 0
#define RADEON_CONSTANT_COLOR_C 0x1d34
#endif
#if 0
#define RADEON_DP_GUI_MASTER_CNTL 0x146c
# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4)
# define RADEON_GMC_BRUSH_NONE (15 << 4)
# define RADEON_GMC_DST_16BPP (4 << 8)
# define RADEON_GMC_DST_24BPP (5 << 8)
# define RADEON_GMC_DST_32BPP (6 << 8)
# define RADEON_GMC_DST_DATATYPE_SHIFT 8
# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12)
# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24)
# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24)
# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28)
# define RADEON_GMC_AUX_CLIP_DIS (1 << 29)
# define RADEON_GMC_WR_MSK_DIS (1 << 30)
# define RADEON_ROP3_S 0x00cc0000
# define RADEON_ROP3_P 0x00f00000
#define RADEON_DP_WRITE_MASK 0x16cc
#define RADEON_DST_PITCH_OFFSET_C 0x1c80
#endif
#if 0
#define RADEON_GEN_RESET_CNTL 0x00f0
# define RADEON_SOFT_RESET_GUI (1 << 0)
#endif
#define RADEON_GUI_SCRATCH_REG0 0x15e0
#define RADEON_GUI_SCRATCH_REG1 0x15e4
#define RADEON_GUI_SCRATCH_REG2 0x15e8
#define RADEON_GUI_SCRATCH_REG3 0x15ec
#define RADEON_GUI_SCRATCH_REG4 0x15f0
#define RADEON_GUI_SCRATCH_REG5 0x15f4
#if 0
#define RADEON_GUI_STAT 0x1740
# define RADEON_GUI_FIFOCNT_MASK 0x0fff
# define RADEON_GUI_ACTIVE (1 << 31)
#endif
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_FB_LOCATION 0x0148
#define RADEON_MCLK_CNTL 0x0012
#if 0
#define RADEON_PC_GUI_CTLSTAT 0x1748
#define RADEON_PC_NGUI_CTLSTAT 0x0184
# define RADEON_PC_FLUSH_GUI (3 << 0)
# define RADEON_PC_RI_GUI (1 << 2)
# define RADEON_PC_FLUSH_ALL 0x00ff
# define RADEON_PC_BUSY (1 << 31)
#define RADEON_PRIM_TEX_CNTL_C 0x1cb0
#endif
#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
# define RADEON_RB2D_DC_FLUSH_ALL 0xf
# define RADEON_RB2D_DC_BUSY (1 << 31)
#define RADEON_RBBM_SOFT_RESET 0x00f0
# define RADEON_SOFT_RESET_CP (1 << 0)
# define RADEON_SOFT_RESET_HI (1 << 1)
# define RADEON_SOFT_RESET_SE (1 << 2)
# define RADEON_SOFT_RESET_RE (1 << 3)
# define RADEON_SOFT_RESET_PP (1 << 4)
# define RADEON_SOFT_RESET_E2 (1 << 5)
# define RADEON_SOFT_RESET_RB (1 << 6)
# define RADEON_SOFT_RESET_HDP (1 << 7)
#define RADEON_RBBM_STATUS 0x0e40
# define RADEON_RBBM_FIFOCNT_MASK 0x007f
# define RADEON_RBBM_ACTIVE (1 << 31)
#if 0
#define RADEON_SCALE_3D_CNTL 0x1a00
#define RADEON_SEC_TEX_CNTL_C 0x1d00
#define RADEON_SEC_TEXTURE_BORDER_COLOR_C 0x1d3c
#define RADEON_SETUP_CNTL 0x1bc4
#define RADEON_STEN_REF_MASK_C 0x1d40
#endif
#if 0
#define RADEON_TEX_CNTL_C 0x1c9c
# define RADEON_TEX_CACHE_FLUSH (1 << 23)
#endif
#if 0
#define RADEON_WINDOW_XY_OFFSET 0x1bcc
#endif
/* CP registers */
#define RADEON_CP_ME_RAM_ADDR 0x07d4
#define RADEON_CP_ME_RAM_RADDR 0x07d8
#define RADEON_CP_ME_RAM_DATAH 0x07dc
#define RADEON_CP_ME_RAM_DATAL 0x07e0
#define RADEON_CP_RB_BASE 0x0700
#define RADEON_CP_RB_CNTL 0x0704
#define RADEON_CP_RB_RPTR_ADDR 0x070c
#define RADEON_CP_RB_RPTR 0x0710
#define RADEON_CP_RB_WPTR 0x0714
#define RADEON_CP_RB_WPTR_DELAY 0x0718
# define RADEON_PRE_WRITE_TIMER_SHIFT 0
# define RADEON_PRE_WRITE_LIMIT_SHIFT 23
#define RADEON_CP_CSQ_CNTL 0x0740
# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0)
# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28)
# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28)
# define RADEON_CSQ_PRIBM_INDDIS (2 << 28)
# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
#define RADEON_AIC_CNTL 0x01d0
# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
#if 0
/* CP command packets */
#define RADEON_CP_PACKET0 0x00000000
#define RADEON_CP_PACKET1 0x40000000
#define RADEON_CP_PACKET2 0x80000000
#define RADEON_CP_PACKET3 0xC0000000
# define RADEON_CNTL_HOSTDATA_BLT 0x00009400
# define RADEON_CNTL_PAINT_MULTI 0x00009A00
# define RADEON_CNTL_BITBLT_MULTI 0x00009B00
# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300
#define RADEON_CP_PACKET_MASK 0xC0000000
#define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
#define RADEON_CP_PACKET0_REG_MASK 0x000007ff
#define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
#define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
#define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE 0x00000000
#define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT 0x00000001
#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE 0x00000002
#define RADEON_CP_VC_CNTL_PRIM_TYPE_POLY_LINE 0x00000003
#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004
#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005
#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006
#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE2 0x00000007
#define RADEON_CP_VC_CNTL_PRIM_WALK_IND 0x00000010
#define RADEON_CP_VC_CNTL_PRIM_WALK_LIST 0x00000020
#define RADEON_CP_VC_CNTL_PRIM_WALK_RING 0x00000030
#define RADEON_CP_VC_CNTL_NUM_SHIFT 16
#define RADEON_DATATYPE_CI8 2
#define RADEON_DATATYPE_ARGB1555 3
#define RADEON_DATATYPE_RGB565 4
#define RADEON_DATATYPE_RGB888 5
#define RADEON_DATATYPE_ARGB8888 6
#define RADEON_DATATYPE_RGB332 7
#define RADEON_DATATYPE_RGB8 9
#define RADEON_DATATYPE_ARGB4444 15
#endif
/* Constants */
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define RADEON_LAST_FRAME_REG RADEON_GUI_SCRATCH_REG0
#define RADEON_LAST_DISPATCH_REG RADEON_GUI_SCRATCH_REG1
#define RADEON_MAX_VB_AGE 0xffffffff
#define RADEON_MAX_VB_VERTS (0xffff)
#define RADEON_BASE(reg) ((u32)(dev_priv->mmio->handle))
#define RADEON_ADDR(reg) (RADEON_BASE(reg) + reg)
#define RADEON_DEREF(reg) *(__volatile__ u32 *)RADEON_ADDR(reg)
#define RADEON_READ(reg) RADEON_DEREF(reg)
#define RADEON_WRITE(reg,val) do { RADEON_DEREF(reg) = val; } while (0)
#define RADEON_DEREF8(reg) *(__volatile__ u8 *)RADEON_ADDR(reg)
#define RADEON_READ8(reg) RADEON_DEREF8(reg)
#define RADEON_WRITE8(reg,val) do { RADEON_DEREF8(reg) = val; } while (0)
#define RADEON_WRITE_PLL(addr,val) \
do { \
RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \
((addr) & 0x1f) | RADEON_PLL_WR_EN); \
RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \
} while (0)
extern int RADEON_READ_PLL(drm_device_t *dev, int addr);
#define CP_PACKET0( reg, n ) \
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
#define CP_PACKET1( reg0, reg1 ) \
(RADEON_CP_PACKET1 | (((reg1) >> 2) << 11) | ((reg0) >> 2))
#define CP_PACKET2() \
(RADEON_CP_PACKET2)
#define CP_PACKET3( pkt, n ) \
(RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
#define radeon_flush_write_combine() mb()
#define RADEON_VERBOSE 0
#define RING_LOCALS int write; unsigned int mask; volatile u32 *ring;
#define BEGIN_RING( n ) do { \
if ( RADEON_VERBOSE ) { \
DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
n, __FUNCTION__ ); \
} \
if ( dev_priv->ring.space < n * sizeof(u32) ) { \
radeon_wait_ring( dev_priv, n * sizeof(u32) ); \
} \
dev_priv->ring.space -= n * sizeof(u32); \
ring = dev_priv->ring.start; \
write = dev_priv->ring.tail; \
mask = dev_priv->ring.tail_mask; \
} while (0)
#define ADVANCE_RING() do { \
if ( RADEON_VERBOSE ) { \
DRM_INFO( "ADVANCE_RING() tail=0x%06x wr=0x%06x\n", \
write, dev_priv->ring.tail ); \
} \
radeon_flush_write_combine(); \
dev_priv->ring.tail = write; \
RADEON_WRITE( RADEON_CP_RB_WPTR, write ); \
} while (0)
#define OUT_RING( x ) do { \
if ( RADEON_VERBOSE ) { \
DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
(unsigned int)(x), write ); \
} \
ring[write++] = x; \
write &= mask; \
} while (0)
#define RADEON_PERFORMANCE_BOXES 0
#endif /* __RADEON_DRV_H__ */

1334
linux/radeon_state.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,11 +1,12 @@
#ifndef _sis_drm_h_
#define _sis_drm_h_
#ifndef _sis_drm_public_h_
#define _sis_drm_public_h_
typedef struct {
int context;
unsigned int offset;
unsigned int size;
unsigned long free;
unsigned int free;
} drm_sis_mem_t;
typedef struct {
@ -16,4 +17,15 @@ typedef struct {
unsigned int left, right;
} drm_sis_flip_t;
#define SIS_IOCTL_FB_ALLOC DRM_IOWR( 0x44, drm_sis_mem_t)
#define SIS_IOCTL_FB_FREE DRM_IOW( 0x45, drm_sis_mem_t)
#define SIS_IOCTL_AGP_INIT DRM_IOWR( 0x53, drm_sis_agp_t)
#define SIS_IOCTL_AGP_ALLOC DRM_IOWR( 0x54, drm_sis_mem_t)
#define SIS_IOCTL_AGP_FREE DRM_IOW( 0x55, drm_sis_mem_t)
#define SIS_IOCTL_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define SIS_IOCTL_FLIP_INIT DRM_IO( 0x49)
#define SIS_IOCTL_FLIP_FINAL DRM_IO( 0x50)
#endif

View file

@ -82,6 +82,7 @@ typedef struct drm_clip_rect {
#include "mga_drm.h"
#include "i810_drm.h"
#include "r128_drm.h"
#include "radeon_drm.h"
#include "sis_drm.h"
typedef struct drm_version {
@ -363,12 +364,32 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_I810_DOCOPY DRM_IO ( 0x48)
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x41)
#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42)
#define DRM_IOCTL_R128_IDLE DRM_IO( 0x43)
#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41)
#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t)
#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43)
#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x46)
#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47)
#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t)
#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t)
#define DRM_IOCTL_R128_PACKET DRM_IOWR(0x4c, drm_r128_packet_t)
/* Radeon specific ioctls */
#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x40)
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x41, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x42)
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x43, drm_radeon_cp_stop_t)
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x44)
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x45)
#define DRM_IOCTL_RADEON_CP_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_RADEON_CP_CLEAR DRM_IOW( 0x47, drm_radeon_clear_t)
#define DRM_IOCTL_RADEON_CP_VERTEX DRM_IOW( 0x48, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_CP_INDICES DRM_IOW( 0x49, drm_radeon_indices_t)
#define DRM_IOCTL_RADEON_CP_BLIT DRM_IOW( 0x4a, drm_radeon_blit_t)
#define DRM_IOCTL_RADEON_CP_PACKET DRM_IOWR(0x4b, drm_radeon_packet_t)
/* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR( 0x44, drm_sis_mem_t)

View file

@ -82,6 +82,7 @@ typedef struct drm_clip_rect {
#include "mga_drm.h"
#include "i810_drm.h"
#include "r128_drm.h"
#include "radeon_drm.h"
#include "sis_drm.h"
typedef struct drm_version {
@ -363,12 +364,32 @@ typedef struct drm_agp_info {
#define DRM_IOCTL_I810_DOCOPY DRM_IO ( 0x48)
/* Rage 128 specific ioctls */
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x41)
#define DRM_IOCTL_R128_FLUSH DRM_IO( 0x42)
#define DRM_IOCTL_R128_IDLE DRM_IO( 0x43)
#define DRM_IOCTL_R128_PACKET DRM_IOW( 0x44, drm_r128_packet_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x45, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INIT DRM_IOW( 0x40, drm_r128_init_t)
#define DRM_IOCTL_R128_CCE_START DRM_IO( 0x41)
#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( 0x42, drm_r128_cce_stop_t)
#define DRM_IOCTL_R128_CCE_RESET DRM_IO( 0x43)
#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( 0x44)
#define DRM_IOCTL_R128_RESET DRM_IO( 0x46)
#define DRM_IOCTL_R128_SWAP DRM_IO( 0x47)
#define DRM_IOCTL_R128_CLEAR DRM_IOW( 0x48, drm_r128_clear_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( 0x49, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INDICES DRM_IOW( 0x4a, drm_r128_indices_t)
#define DRM_IOCTL_R128_BLIT DRM_IOW( 0x4b, drm_r128_blit_t)
#define DRM_IOCTL_R128_PACKET DRM_IOWR(0x4c, drm_r128_packet_t)
/* Radeon specific ioctls */
#define DRM_IOCTL_RADEON_RESET DRM_IO( 0x40)
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( 0x41, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( 0x42)
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( 0x43, drm_radeon_cp_stop_t)
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( 0x44)
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( 0x45)
#define DRM_IOCTL_RADEON_CP_SWAP DRM_IO( 0x46)
#define DRM_IOCTL_RADEON_CP_CLEAR DRM_IOW( 0x47, drm_radeon_clear_t)
#define DRM_IOCTL_RADEON_CP_VERTEX DRM_IOW( 0x48, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_CP_INDICES DRM_IOW( 0x49, drm_radeon_indices_t)
#define DRM_IOCTL_RADEON_CP_BLIT DRM_IOW( 0x4a, drm_radeon_blit_t)
#define DRM_IOCTL_RADEON_CP_PACKET DRM_IOWR(0x4b, drm_radeon_packet_t)
/* SiS specific ioctls */
#define SIS_IOCTL_FB_ALLOC DRM_IOWR( 0x44, drm_sis_mem_t)