mirror of
https://gitlab.freedesktop.org/mesa/drm.git
synced 2026-05-05 17:08:03 +02:00
major realigment of DRM CVS with kernel code, makes integration much easier
This commit is contained in:
parent
585f34c3e5
commit
4791dc8856
23 changed files with 658 additions and 606 deletions
|
|
@ -44,10 +44,17 @@ LINUXDIR := $(shell if [ -e /lib/modules/$(RUNNING_REL)/source ]; then \
|
|||
endif
|
||||
|
||||
ifndef O
|
||||
O := $(LINUXDIR)
|
||||
O := $(shell if [ -e /lib/modules/$(RUNNING_REL)/build ]; then \
|
||||
echo /lib/modules/$(RUNNING_REL)/build; \
|
||||
else echo ""; fi)
|
||||
#O := $(LINUXDIR)
|
||||
endif
|
||||
|
||||
ifdef ARCH
|
||||
MACHINE := $(ARCH)
|
||||
else
|
||||
MACHINE := $(shell uname -m)
|
||||
endif
|
||||
|
||||
# Modules for all architectures
|
||||
MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
|
||||
|
|
@ -61,7 +68,7 @@ endif
|
|||
|
||||
ifneq (,$(findstring sparc64,$(MACHINE)))
|
||||
ARCHSPARC64 := 1
|
||||
MODULE_LIST += ffb.o
|
||||
#MODULE_LIST += ffb.o
|
||||
endif
|
||||
|
||||
DRM_MODULES ?= $(MODULE_LIST)
|
||||
|
|
@ -370,7 +377,6 @@ endif
|
|||
|
||||
# These require AGP support
|
||||
|
||||
ifdef CONFIG_AGP
|
||||
ifneq (,$(findstring i810,$(DRM_MODULES)))
|
||||
CONFIG_DRM_I810 := m
|
||||
endif
|
||||
|
|
@ -380,7 +386,6 @@ endif
|
|||
ifneq (,$(findstring i915,$(DRM_MODULES)))
|
||||
CONFIG_DRM_I915 := m
|
||||
endif
|
||||
endif
|
||||
|
||||
include $(DRMSRCDIR)/Makefile.kernel
|
||||
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ static void drm_ati_free_pcigart_table(void *address)
|
|||
free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER);
|
||||
}
|
||||
|
||||
int drm_ati_pcigart_cleanup(drm_device_t * dev, drm_ati_pcigart_info *gart_info)
|
||||
int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
|
||||
{
|
||||
drm_sg_mem_t *entry = dev->sg;
|
||||
unsigned long pages;
|
||||
|
|
@ -125,7 +125,8 @@ int drm_ati_pcigart_cleanup(drm_device_t * dev, drm_ati_pcigart_info *gart_info)
|
|||
}
|
||||
|
||||
|
||||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN && gart_info->addr) {
|
||||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
|
||||
&& gart_info->addr) {
|
||||
drm_ati_free_pcigart_table(gart_info->addr);
|
||||
gart_info->addr=0;
|
||||
}
|
||||
|
|
@ -147,8 +148,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info)
|
|||
goto done;
|
||||
}
|
||||
|
||||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
|
||||
{
|
||||
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
|
||||
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
|
||||
|
||||
address = drm_ati_alloc_pcigart_table();
|
||||
|
|
@ -224,5 +224,3 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info)
|
|||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_ati_pcigart_init);
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -555,7 +555,7 @@ struct drm_driver {
|
|||
int (*context_dtor) (struct drm_device * dev, int context);
|
||||
int (*kernel_context_switch) (struct drm_device * dev, int old,
|
||||
int new);
|
||||
int (*kernel_context_switch_unlock) (struct drm_device * dev);
|
||||
void (*kernel_context_switch_unlock) (struct drm_device * dev);
|
||||
int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
|
||||
int (*dri_library_name) (struct drm_device * dev, char * buf);
|
||||
|
||||
|
|
@ -859,8 +859,6 @@ extern int drm_newctx(struct inode *inode, struct file *filp,
|
|||
extern int drm_rmctx(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
extern int drm_context_switch_complete(drm_device_t * dev, int new);
|
||||
|
||||
extern int drm_ctxbitmap_init(drm_device_t * dev);
|
||||
extern void drm_ctxbitmap_cleanup(drm_device_t * dev);
|
||||
extern void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle);
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
|
|||
}
|
||||
|
||||
/**
|
||||
* Acquire the AGP device
|
||||
* Acquire the AGP device.
|
||||
*
|
||||
* \param dev DRM device that is to acquire AGP.
|
||||
* \return zero on success or a negative number on failure.
|
||||
|
|
@ -144,7 +144,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
|
|||
}
|
||||
|
||||
/**
|
||||
* Release the AGP device
|
||||
* Release the AGP device.
|
||||
*
|
||||
* \param dev DRM device that is to release AGP.
|
||||
* \return zero on success or a negative number on failure.
|
||||
|
|
@ -519,7 +519,8 @@ DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type)
|
|||
return agp_allocate_memory(pages, type);
|
||||
}
|
||||
#else
|
||||
DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type)
|
||||
DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge,
|
||||
size_t pages, u32 type)
|
||||
{
|
||||
return agp_allocate_memory(bridge, pages, type);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,7 +87,8 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
|
|||
* associated the magic number hash key in drm_device::magiclist, while holding
|
||||
* the drm_device::struct_sem lock.
|
||||
*/
|
||||
static int drm_add_magic(drm_device_t * dev, drm_file_t * priv, drm_magic_t magic)
|
||||
static int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
|
||||
drm_magic_t magic)
|
||||
{
|
||||
int hash;
|
||||
drm_magic_entry_t *entry;
|
||||
|
|
@ -174,7 +175,7 @@ int drm_getmagic(struct inode *inode, struct file *filp,
|
|||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
static drm_magic_t sequence = 0;
|
||||
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
drm_auth_t auth;
|
||||
|
|
|
|||
|
|
@ -65,7 +65,6 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Used to allocate 32-bit handles for mappings.
|
||||
*/
|
||||
|
|
@ -73,7 +72,8 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
|
|||
#define END_RANGE 0x40000000
|
||||
|
||||
#ifdef _LP64
|
||||
static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev)
|
||||
static __inline__ unsigned int HandleID(unsigned long lhandle,
|
||||
drm_device_t *dev)
|
||||
{
|
||||
static unsigned int map32_handle = START_RANGE;
|
||||
unsigned int hash;
|
||||
|
|
@ -83,12 +83,12 @@ static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev
|
|||
map32_handle += PAGE_SIZE;
|
||||
if (map32_handle > END_RANGE)
|
||||
map32_handle = START_RANGE;
|
||||
} else
|
||||
} else
|
||||
hash = lhandle;
|
||||
|
||||
while (1) {
|
||||
drm_map_list_t *_entry;
|
||||
list_for_each_entry(_entry, &dev->maplist->head,head) {
|
||||
list_for_each_entry(_entry, &dev->maplist->head, head) {
|
||||
if (_entry->user_token == hash)
|
||||
break;
|
||||
}
|
||||
|
|
@ -116,7 +116,7 @@ static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev
|
|||
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
|
||||
* applicable and if supported by the kernel.
|
||||
*/
|
||||
int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
||||
static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
||||
unsigned int size, drm_map_type_t type,
|
||||
drm_map_flags_t flags, drm_map_list_t ** maplist)
|
||||
{
|
||||
|
|
@ -269,8 +269,8 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
}
|
||||
map->offset += (unsigned long)dev->sg->virtual;
|
||||
break;
|
||||
case _DRM_CONSISTENT: {
|
||||
/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G.
|
||||
case _DRM_CONSISTENT:
|
||||
/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
|
||||
* As we're limiting the address to 2^32-1 (or less),
|
||||
* casting it down to 32 bits is no problem, but we
|
||||
* need to point to a 64bit variable first. */
|
||||
|
|
@ -283,7 +283,6 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
map->offset = (unsigned long)dmah->busaddr;
|
||||
kfree(dmah);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
return -EINVAL;
|
||||
|
|
@ -301,7 +300,7 @@ int drm_addmap_core(drm_device_t * dev, unsigned int offset,
|
|||
list_add(&list->head, &dev->maplist->head);
|
||||
/* Assign a 32-bit handle */
|
||||
/* We do it here so that dev->struct_sem protects the increment */
|
||||
list->user_token = HandleID(map->type==_DRM_SHM
|
||||
list->user_token = HandleID(map->type == _DRM_SHM
|
||||
? (unsigned long)map->handle
|
||||
: map->offset, dev);
|
||||
up(&dev->struct_sem);
|
||||
|
|
@ -338,22 +337,24 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
|
|||
if (!(filp->f_mode & 3))
|
||||
return -EACCES; /* Require read/write */
|
||||
|
||||
if (copy_from_user(& map, argp, sizeof(map))) {
|
||||
if (copy_from_user(&map, argp, sizeof(map))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
|
||||
return -EPERM;
|
||||
|
||||
err = drm_addmap_core( dev, map.offset, map.size, map.type, map.flags,
|
||||
&maplist);
|
||||
err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
|
||||
&maplist);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
|
||||
return -EFAULT;
|
||||
if (put_user((void *)maplist->user_token, &argp->handle))
|
||||
|
||||
/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
|
||||
if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -405,9 +406,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
|
|||
case _DRM_FRAME_BUFFER:
|
||||
if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
|
||||
int retcode;
|
||||
retcode = mtrr_del(map->mtrr, map->offset,
|
||||
map->size);
|
||||
DRM_DEBUG ("mtrr_del=%d\n", retcode);
|
||||
retcode = mtrr_del(map->mtrr, map->offset, map->size);
|
||||
DRM_DEBUG("mtrr_del=%d\n", retcode);
|
||||
}
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
|
|
@ -469,7 +469,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
|
|||
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
|
||||
|
||||
if (r_list->map &&
|
||||
r_list->user_token == (unsigned long) request.handle &&
|
||||
r_list->user_token == (unsigned long)request.handle &&
|
||||
r_list->map->flags & _DRM_REMOVABLE) {
|
||||
map = r_list->map;
|
||||
break;
|
||||
|
|
@ -874,7 +874,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = dev->driver->dev_priv_size;
|
||||
buf->dev_private = drm_alloc(dev->driver->dev_priv_size,
|
||||
buf->dev_private = drm_alloc(buf->dev_priv_size,
|
||||
DRM_MEM_BUFS);
|
||||
if (!buf->dev_private) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
|
|
@ -1040,7 +1040,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
|
||||
buf->offset = (dma->byte_count + offset);
|
||||
buf->bus_address = agp_offset + offset;
|
||||
buf->address = (void *)(agp_offset + offset
|
||||
buf->address = (void *)(agp_offset + offset
|
||||
+ (unsigned long)dev->sg->virtual);
|
||||
buf->next = NULL;
|
||||
buf->waiting = 0;
|
||||
|
|
@ -1049,8 +1049,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
buf->filp = NULL;
|
||||
|
||||
buf->dev_priv_size = dev->driver->dev_priv_size;
|
||||
buf->dev_private = drm_alloc(dev->driver->dev_priv_size,
|
||||
DRM_MEM_BUFS);
|
||||
buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
|
||||
if (!buf->dev_private) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
|
|
@ -1107,7 +1106,6 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
|
||||
{
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
|
|
@ -1127,7 +1125,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
|
|||
|
||||
if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
if (!dma)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
@ -1292,7 +1290,6 @@ int drm_addbufs(struct inode *inode, struct file *filp,
|
|||
drm_device_t *dev = priv->head->dev;
|
||||
int ret;
|
||||
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
@ -1301,25 +1298,23 @@ int drm_addbufs(struct inode *inode, struct file *filp,
|
|||
return -EFAULT;
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (request.flags & _DRM_AGP_BUFFER) {
|
||||
ret = drm_addbufs_agp(dev, & request);
|
||||
}
|
||||
if (request.flags & _DRM_AGP_BUFFER)
|
||||
ret = drm_addbufs_agp(dev, &request);
|
||||
else
|
||||
#endif
|
||||
if (request.flags & _DRM_SG_BUFFER)
|
||||
ret = drm_addbufs_sg(dev, & request);
|
||||
else if (request.flags & _DRM_FB_BUFFER)
|
||||
ret = drm_addbufs_fb(dev, & request);
|
||||
else
|
||||
ret = drm_addbufs_pci(dev, & request);
|
||||
ret = drm_addbufs_sg(dev, &request);
|
||||
else if (request.flags & _DRM_FB_BUFFER)
|
||||
ret = drm_addbufs_fb(dev, &request);
|
||||
else
|
||||
ret = drm_addbufs_pci(dev, &request);
|
||||
|
||||
if (ret == 0) {
|
||||
if (copy_to_user( (void __user *) arg, &request,
|
||||
if (copy_to_user((void __user *) arg, &request,
|
||||
sizeof(request))) {
|
||||
ret = -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -1566,10 +1561,10 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
|
|||
|
||||
if (request.count >= dma->buf_count) {
|
||||
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
|
||||
|| (drm_core_check_feature(dev, DRIVER_SG)
|
||||
&& (dma->flags & _DRM_DMA_USE_SG))
|
||||
|| (drm_core_check_feature(dev, DRIVER_FB_DMA)
|
||||
&& (dma->flags & _DRM_DMA_USE_FB))) {
|
||||
|| (drm_core_check_feature(dev, DRIVER_SG)
|
||||
&& (dma->flags & _DRM_DMA_USE_SG))
|
||||
|| (drm_core_check_feature(dev, DRIVER_FB_DMA)
|
||||
&& (dma->flags & _DRM_DMA_USE_FB))) {
|
||||
drm_map_t *map = dev->agp_buffer_map;
|
||||
unsigned long token = dev->agp_buffer_token;
|
||||
|
||||
|
|
@ -1658,13 +1653,12 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
|
|||
*
|
||||
* \todo Can be made faster.
|
||||
*/
|
||||
int drm_order( unsigned long size )
|
||||
int drm_order(unsigned long size)
|
||||
{
|
||||
int order;
|
||||
unsigned long tmp;
|
||||
|
||||
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
|
||||
;
|
||||
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
|
||||
|
||||
if (size & (size - 1))
|
||||
++order;
|
||||
|
|
|
|||
|
|
@ -235,7 +235,8 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
|
|||
request.handle = NULL;
|
||||
list_for_each_entry(_entry, &dev->maplist->head,head) {
|
||||
if (_entry->map == map) {
|
||||
request.handle = (void *)(unsigned long)_entry->user_token;
|
||||
request.handle =
|
||||
(void *)(unsigned long)_entry->user_token;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
@ -341,7 +342,7 @@ static int drm_context_switch(drm_device_t * dev, int old, int new)
|
|||
* hardware lock is held, clears the drm_device::context_flag and wakes up
|
||||
* drm_device::context_wait.
|
||||
*/
|
||||
int drm_context_switch_complete(drm_device_t * dev, int new)
|
||||
static int drm_context_switch_complete(drm_device_t * dev, int new)
|
||||
{
|
||||
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
|
||||
dev->last_switch = jiffies;
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ static int drm_version(struct inode *inode, struct file *filp,
|
|||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
/** Ioctl table */
|
||||
drm_ioctl_desc_t drm_ioctls[] = {
|
||||
static drm_ioctl_desc_t drm_ioctls[] = {
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0},
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0},
|
||||
|
|
|
|||
|
|
@ -34,20 +34,24 @@
|
|||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/poll.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm_sarea.h"
|
||||
#include <linux/poll.h>
|
||||
|
||||
static int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t * dev);
|
||||
static int drm_open_helper(struct inode *inode, struct file *filp,
|
||||
drm_device_t * dev);
|
||||
|
||||
static int drm_setup(drm_device_t * dev)
|
||||
{
|
||||
drm_local_map_t *map;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (dev->driver->firstopen)
|
||||
dev->driver->firstopen(dev);
|
||||
if (dev->driver->firstopen) {
|
||||
ret = dev->driver->firstopen(dev);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* prebuild the SAREA */
|
||||
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
|
||||
|
|
@ -223,7 +227,8 @@ static int drm_cpu_valid(void)
|
|||
* Creates and initializes a drm_file structure for the file private data in \p
|
||||
* filp and add it into the double linked list in \p dev.
|
||||
*/
|
||||
static int drm_open_helper(struct inode *inode, struct file *filp, drm_device_t * dev)
|
||||
static int drm_open_helper(struct inode *inode, struct file *filp,
|
||||
drm_device_t * dev)
|
||||
{
|
||||
int minor = iminor(inode);
|
||||
drm_file_t *priv;
|
||||
|
|
@ -325,7 +330,7 @@ EXPORT_SYMBOL(drm_fasync);
|
|||
* If the hardware lock is held then free it, and take it again for the kernel
|
||||
* context since it's necessary to reclaim buffers. Unlink the file private
|
||||
* data from its list and free it. Decreases the open count and if it reaches
|
||||
* zero calls takedown().
|
||||
* zero calls drm_lastclose().
|
||||
*/
|
||||
int drm_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -36,7 +36,8 @@
|
|||
#include "drmP.h"
|
||||
|
||||
static int drm_lock_transfer(drm_device_t * dev,
|
||||
__volatile__ unsigned int *lock, unsigned int context);
|
||||
__volatile__ unsigned int *lock,
|
||||
unsigned int context);
|
||||
static int drm_notifier(void *priv);
|
||||
|
||||
/**
|
||||
|
|
@ -125,8 +126,8 @@ int drm_lock(struct inode *inode, struct file *filp,
|
|||
}
|
||||
}
|
||||
|
||||
if (dev->driver->kernel_context_switch
|
||||
&& dev->last_context != lock.context) {
|
||||
if (dev->driver->kernel_context_switch &&
|
||||
dev->last_context != lock.context) {
|
||||
dev->driver->kernel_context_switch(dev, dev->last_context,
|
||||
lock.context);
|
||||
}
|
||||
|
|
@ -163,8 +164,11 @@ int drm_unlock(struct inode *inode, struct file *filp,
|
|||
|
||||
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
|
||||
|
||||
/* kernel_context_switch isn't used by any of the x86 drm
|
||||
* modules but is required by the Sparc driver.
|
||||
*/
|
||||
if (dev->driver->kernel_context_switch_unlock)
|
||||
dev->driver->kernel_context_switch_unlock(dev);
|
||||
dev->driver->kernel_context_switch_unlock(dev, &lock);
|
||||
else {
|
||||
drm_lock_transfer(dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT);
|
||||
|
|
@ -229,7 +233,8 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
|
|||
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
|
||||
*/
|
||||
static int drm_lock_transfer(drm_device_t * dev,
|
||||
__volatile__ unsigned int *lock, unsigned int context)
|
||||
__volatile__ unsigned int *lock,
|
||||
unsigned int context)
|
||||
{
|
||||
unsigned int old, new, prev;
|
||||
|
||||
|
|
|
|||
|
|
@ -16,18 +16,21 @@
|
|||
#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
|
||||
#define DRM_UDELAY(d) udelay(d)
|
||||
#if LINUX_VERSION_CODE <= 0x020608 /* KERNEL_VERSION(2,6,8) */
|
||||
#ifndef __iomem
|
||||
#define __iomem
|
||||
#endif
|
||||
/** Read a byte from a MMIO region */
|
||||
#define DRM_READ8(map, offset) readb(((unsigned long)(map)->handle) + (offset))
|
||||
#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
|
||||
/** Read a word from a MMIO region */
|
||||
#define DRM_READ16(map, offset) readw(((unsigned long)(map)->handle) + (offset))
|
||||
#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset))
|
||||
/** Read a dword from a MMIO region */
|
||||
#define DRM_READ32(map, offset) readl(((unsigned long)(map)->handle) + (offset))
|
||||
#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset))
|
||||
/** Write a byte into a MMIO region */
|
||||
#define DRM_WRITE8(map, offset, val) writeb(val, ((unsigned long)(map)->handle) + (offset))
|
||||
#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset))
|
||||
/** Write a word into a MMIO region */
|
||||
#define DRM_WRITE16(map, offset, val) writew(val, ((unsigned long)(map)->handle) + (offset))
|
||||
#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset))
|
||||
/** Write a dword into a MMIO region */
|
||||
#define DRM_WRITE32(map, offset, val) writel(val, ((unsigned long)(map)->handle) + (offset))
|
||||
#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
|
||||
#else
|
||||
/** Read a byte from a MMIO region */
|
||||
#define DRM_READ8(map, offset) readb((map)->handle + (offset))
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
|
|||
unsigned long addr;
|
||||
size_t sz;
|
||||
#endif
|
||||
#if DRM_DEBUG_MEMORY
|
||||
#ifdef DRM_DEBUG_MEMORY
|
||||
int area = DRM_MEM_DMA;
|
||||
|
||||
spin_lock(&drm_mem_lock);
|
||||
|
|
@ -85,7 +85,7 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
|
|||
dmah->size = size;
|
||||
dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr);
|
||||
|
||||
#if DRM_DEBUG_MEMORY
|
||||
#ifdef DRM_DEBUG_MEMORY
|
||||
if (dmah->vaddr == NULL) {
|
||||
spin_lock(&drm_mem_lock);
|
||||
++drm_mem_stats[area].fail_count;
|
||||
|
|
@ -126,21 +126,20 @@ EXPORT_SYMBOL(drm_pci_alloc);
|
|||
*
|
||||
* This function is for internal use in the Linux-specific DRM core code.
|
||||
*/
|
||||
void
|
||||
__drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
|
||||
void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
|
||||
{
|
||||
#if 0
|
||||
unsigned long addr;
|
||||
size_t sz;
|
||||
#endif
|
||||
#if DRM_DEBUG_MEMORY
|
||||
#ifdef DRM_DEBUG_MEMORY
|
||||
int area = DRM_MEM_DMA;
|
||||
int alloc_count;
|
||||
int free_count;
|
||||
#endif
|
||||
|
||||
if (!dmah->vaddr) {
|
||||
#if DRM_DEBUG_MEMORY
|
||||
#ifdef DRM_DEBUG_MEMORY
|
||||
DRM_MEM_ERROR(area, "Attempt to free address 0\n");
|
||||
#endif
|
||||
} else {
|
||||
|
|
@ -156,7 +155,7 @@ __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
|
|||
dmah->busaddr);
|
||||
}
|
||||
|
||||
#if DRM_DEBUG_MEMORY
|
||||
#ifdef DRM_DEBUG_MEMORY
|
||||
spin_lock(&drm_mem_lock);
|
||||
free_count = ++drm_mem_stats[area].free_count;
|
||||
alloc_count = drm_mem_stats[area].succeed_count;
|
||||
|
|
@ -175,8 +174,7 @@ __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
|
|||
/**
|
||||
* \brief Free a PCI consistent memory block.
|
||||
*/
|
||||
void
|
||||
drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
|
||||
void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
|
||||
{
|
||||
__drm_pci_free(dev, dmah);
|
||||
kfree(dmah);
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ drm_head_t **drm_heads;
|
|||
struct drm_sysfs_class *drm_class;
|
||||
struct proc_dir_entry *drm_proc_root;
|
||||
|
||||
static int fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
|
||||
static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent,
|
||||
struct drm_driver *driver)
|
||||
{
|
||||
|
|
@ -216,7 +216,8 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
|||
pci_request_regions(pdev, driver->pci_driver.name);
|
||||
pci_enable_device(pdev);
|
||||
}
|
||||
if ((ret = fill_in_dev(dev, pdev, ent, driver))) {
|
||||
if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
|
||||
printk(KERN_ERR "DRM: fill_in_dev failed\n");
|
||||
goto err_g1;
|
||||
}
|
||||
if ((ret = drm_get_head(dev, &dev->primary)))
|
||||
|
|
|
|||
|
|
@ -144,8 +144,8 @@ static struct class_device_attribute class_device_attrs[] = {
|
|||
* Note: the struct drm_sysfs_class passed to this function must have previously been
|
||||
* created with a call to drm_sysfs_create().
|
||||
*/
|
||||
struct class_device *drm_sysfs_device_add(
|
||||
struct drm_sysfs_class *cs, drm_head_t * head)
|
||||
struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
|
||||
drm_head_t * head)
|
||||
{
|
||||
struct simple_dev *s_dev = NULL;
|
||||
int i, retval;
|
||||
|
|
|
|||
|
|
@ -38,8 +38,8 @@
|
|||
#include <linux/efi.h>
|
||||
#endif
|
||||
|
||||
static void drm_vm_close(struct vm_area_struct *vma);
|
||||
static void drm_vm_open(struct vm_area_struct *vma);
|
||||
static void drm_vm_close(struct vm_area_struct *vma);
|
||||
|
||||
/**
|
||||
* \c nopage method for AGP virtual memory.
|
||||
|
|
|
|||
|
|
@ -130,10 +130,9 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
|
|||
buf_priv->currently_mapped = I810_BUF_MAPPED;
|
||||
unlock_kernel();
|
||||
|
||||
if (remap_pfn_range(vma, vma->vm_start,
|
||||
if (io_remap_pfn_range(vma, vma->vm_start,
|
||||
VM_OFFSET(vma) >> PAGE_SHIFT,
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot))
|
||||
vma->vm_end - vma->vm_start, vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1144,8 +1143,8 @@ static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
|
|||
return retcode;
|
||||
}
|
||||
|
||||
static int i810_copybuf(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
static int i810_copybuf(struct inode *inode,
|
||||
struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
/* Never copy - 2.4.x doesn't need it */
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
|
||||
*/
|
||||
/**************************************************************************
|
||||
/*
|
||||
*
|
||||
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
|
|
@ -25,7 +25,7 @@
|
|||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
|
|
|
|||
|
|
@ -51,7 +51,8 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
|
|||
drm_i915_batchbuffer32_t batchbuffer32;
|
||||
drm_i915_batchbuffer_t __user *batchbuffer;
|
||||
|
||||
if (copy_from_user(&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
|
||||
if (copy_from_user
|
||||
(&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
|
||||
return -EFAULT;
|
||||
|
||||
batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
|
||||
|
|
@ -60,13 +61,15 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
|
|||
|| __put_user(batchbuffer32.used, &batchbuffer->used)
|
||||
|| __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
|
||||
|| __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
|
||||
|| __put_user(batchbuffer32.num_cliprects, &batchbuffer->num_cliprects)
|
||||
|| __put_user(batchbuffer32.num_cliprects,
|
||||
&batchbuffer->num_cliprects)
|
||||
|| __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
|
||||
&batchbuffer->cliprects))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_dentry->d_inode, file,
|
||||
DRM_IOCTL_I915_BATCHBUFFER, (unsigned long) batchbuffer);
|
||||
DRM_IOCTL_I915_BATCHBUFFER,
|
||||
(unsigned long) batchbuffer);
|
||||
}
|
||||
|
||||
typedef struct _drm_i915_cmdbuffer32 {
|
||||
|
|
@ -84,7 +87,8 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
|
|||
drm_i915_cmdbuffer32_t cmdbuffer32;
|
||||
drm_i915_cmdbuffer_t __user *cmdbuffer;
|
||||
|
||||
if (copy_from_user(&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
|
||||
if (copy_from_user
|
||||
(&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
|
||||
return -EFAULT;
|
||||
|
||||
cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
|
||||
|
|
@ -197,8 +201,7 @@ drm_ioctl_compat_t *i915_compat_ioctls[] = {
|
|||
* \param arg user argument.
|
||||
* \return zero on success or negative number on failure.
|
||||
*/
|
||||
long i915_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
unsigned int nr = DRM_IOCTL_NR(cmd);
|
||||
drm_ioctl_compat_t *fn = NULL;
|
||||
|
|
|
|||
|
|
@ -39,6 +39,8 @@
|
|||
#include "via_drv.h"
|
||||
#include "via_dmablit.h"
|
||||
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
|
||||
#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
|
||||
#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
|
||||
|
|
@ -106,7 +108,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
|
|||
int num_desc = 0;
|
||||
int cur_line;
|
||||
dma_addr_t next = 0 | VIA_DMA_DPR_EC;
|
||||
drm_via_descriptor_t *desc_ptr = 0;
|
||||
drm_via_descriptor_t *desc_ptr = NULL;
|
||||
|
||||
if (mode == 1)
|
||||
desc_ptr = vsg->desc_pages[cur_descriptor_page];
|
||||
|
|
@ -585,7 +587,7 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
|
|||
int ret = 0;
|
||||
|
||||
vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
vsg->bounce_buffer = 0;
|
||||
vsg->bounce_buffer = NULL;
|
||||
|
||||
vsg->state = dr_via_sg_init;
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@
|
|||
#include "radeon_drv.h"
|
||||
#include "r300_reg.h"
|
||||
|
||||
|
||||
#define R300_SIMULTANEOUS_CLIPRECTS 4
|
||||
|
||||
/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
|
||||
|
|
@ -49,14 +48,12 @@ static const int r300_cliprect_cntl[4] = {
|
|||
0xFFFE
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
|
||||
* buffer, starting with index n.
|
||||
*/
|
||||
static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
|
||||
drm_radeon_kcmd_buffer_t* cmdbuf,
|
||||
int n)
|
||||
static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf, int n)
|
||||
{
|
||||
drm_clip_rect_t box;
|
||||
int nr;
|
||||
|
|
@ -70,38 +67,47 @@ static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
|
|||
DRM_DEBUG("%i cliprects\n", nr);
|
||||
|
||||
if (nr) {
|
||||
BEGIN_RING(6 + nr*2);
|
||||
OUT_RING( CP_PACKET0( R300_RE_CLIPRECT_TL_0, nr*2 - 1 ) );
|
||||
BEGIN_RING(6 + nr * 2);
|
||||
OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
|
||||
|
||||
for(i = 0; i < nr; ++i) {
|
||||
if (DRM_COPY_FROM_USER_UNCHECKED(&box, &cmdbuf->boxes[n+i], sizeof(box))) {
|
||||
for (i = 0; i < nr; ++i) {
|
||||
if (DRM_COPY_FROM_USER_UNCHECKED
|
||||
(&box, &cmdbuf->boxes[n + i], sizeof(box))) {
|
||||
DRM_ERROR("copy cliprect faulted\n");
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
|
||||
box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
|
||||
box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
|
||||
box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
|
||||
box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
|
||||
box.x1 =
|
||||
(box.x1 +
|
||||
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
|
||||
box.y1 =
|
||||
(box.y1 +
|
||||
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
|
||||
box.x2 =
|
||||
(box.x2 +
|
||||
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
|
||||
box.y2 =
|
||||
(box.y2 +
|
||||
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
|
||||
|
||||
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
|
||||
(box.y1 << R300_CLIPRECT_Y_SHIFT));
|
||||
(box.y1 << R300_CLIPRECT_Y_SHIFT));
|
||||
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
|
||||
(box.y2 << R300_CLIPRECT_Y_SHIFT));
|
||||
(box.y2 << R300_CLIPRECT_Y_SHIFT));
|
||||
}
|
||||
|
||||
OUT_RING_REG( R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr-1] );
|
||||
OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
|
||||
|
||||
/* TODO/SECURITY: Force scissors to a safe value, otherwise the
|
||||
* client might be able to trample over memory.
|
||||
* The impact should be very limited, but I'd rather be safe than
|
||||
* sorry.
|
||||
*/
|
||||
OUT_RING( CP_PACKET0( R300_RE_SCISSORS_TL, 1 ) );
|
||||
OUT_RING( 0 );
|
||||
OUT_RING( R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK );
|
||||
* client might be able to trample over memory.
|
||||
* The impact should be very limited, but I'd rather be safe than
|
||||
* sorry.
|
||||
*/
|
||||
OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
|
||||
OUT_RING(0);
|
||||
OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
|
||||
ADVANCE_RING();
|
||||
} else {
|
||||
} else {
|
||||
/* Why we allow zero cliprect rendering:
|
||||
* There are some commands in a command buffer that must be submitted
|
||||
* even when there are no cliprects, e.g. DMA buffer discard
|
||||
|
|
@ -118,28 +124,27 @@ static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
|
|||
* can't produce any fragments.
|
||||
*/
|
||||
BEGIN_RING(2);
|
||||
OUT_RING_REG( R300_RE_CLIPRECT_CNTL, 0 );
|
||||
OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
|
||||
ADVANCE_RING();
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8 r300_reg_flags[0x10000>>2];
|
||||
|
||||
static u8 r300_reg_flags[0x10000 >> 2];
|
||||
|
||||
void r300_init_reg_flags(void)
|
||||
{
|
||||
int i;
|
||||
memset(r300_reg_flags, 0, 0x10000>>2);
|
||||
#define ADD_RANGE_MARK(reg, count,mark) \
|
||||
memset(r300_reg_flags, 0, 0x10000 >> 2);
|
||||
#define ADD_RANGE_MARK(reg, count,mark) \
|
||||
for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
|
||||
r300_reg_flags[i]|=(mark);
|
||||
|
||||
#define MARK_SAFE 1
|
||||
#define MARK_CHECK_OFFSET 2
|
||||
|
||||
#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
|
||||
|
||||
#define MARK_SAFE 1
|
||||
#define MARK_CHECK_OFFSET 2
|
||||
|
||||
#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
|
||||
|
||||
/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
|
||||
ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
|
||||
|
|
@ -194,15 +199,15 @@ void r300_init_reg_flags(void)
|
|||
ADD_RANGE(R300_RB3D_CBLEND, 2);
|
||||
ADD_RANGE(R300_RB3D_COLORMASK, 1);
|
||||
ADD_RANGE(0x4E10, 3);
|
||||
ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
|
||||
ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
|
||||
ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
|
||||
ADD_RANGE(0x4E50, 9);
|
||||
ADD_RANGE(0x4E88, 1);
|
||||
ADD_RANGE(0x4EA0, 2);
|
||||
ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
|
||||
ADD_RANGE(0x4F10, 4);
|
||||
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
|
||||
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
|
||||
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
|
||||
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
|
||||
ADD_RANGE(0x4F28, 1);
|
||||
ADD_RANGE(0x4F30, 2);
|
||||
ADD_RANGE(0x4F44, 1);
|
||||
|
|
@ -213,7 +218,7 @@ void r300_init_reg_flags(void)
|
|||
ADD_RANGE(R300_TX_SIZE_0, 16);
|
||||
ADD_RANGE(R300_TX_FORMAT_0, 16);
|
||||
ADD_RANGE(R300_TX_PITCH_0, 16);
|
||||
/* Texture offset is dangerous and needs more checking */
|
||||
/* Texture offset is dangerous and needs more checking */
|
||||
ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
|
||||
ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
|
||||
ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
|
||||
|
|
@ -226,33 +231,41 @@ void r300_init_reg_flags(void)
|
|||
|
||||
}
|
||||
|
||||
static __inline__ int r300_check_range(unsigned reg, int count)
|
||||
static __inline__ int r300_check_range(unsigned reg, int count)
|
||||
{
|
||||
int i;
|
||||
if(reg & ~0xffff)return -1;
|
||||
for(i=(reg>>2);i<(reg>>2)+count;i++)
|
||||
if(r300_reg_flags[i]!=MARK_SAFE)return 1;
|
||||
if (reg & ~0xffff)
|
||||
return -1;
|
||||
for (i = (reg >> 2); i < (reg >> 2) + count; i++)
|
||||
if (r300_reg_flags[i] != MARK_SAFE)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* we expect offsets passed to the framebuffer to be either within video memory or
|
||||
within AGP space */
|
||||
static __inline__ int r300_check_offset(drm_radeon_private_t* dev_priv, u32 offset)
|
||||
within AGP space */
|
||||
static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv,
|
||||
u32 offset)
|
||||
{
|
||||
/* we realy want to check against end of video aperture
|
||||
but this value is not being kept.
|
||||
This code is correct for now (does the same thing as the
|
||||
code that sets MC_FB_LOCATION) in radeon_cp.c */
|
||||
if((offset>=dev_priv->fb_location) &&
|
||||
(offset<dev_priv->gart_vm_start))return 0;
|
||||
if((offset>=dev_priv->gart_vm_start) &&
|
||||
(offset<dev_priv->gart_vm_start+dev_priv->gart_size))return 0;
|
||||
but this value is not being kept.
|
||||
This code is correct for now (does the same thing as the
|
||||
code that sets MC_FB_LOCATION) in radeon_cp.c */
|
||||
if ((offset >= dev_priv->fb_location) &&
|
||||
(offset < dev_priv->gart_vm_start))
|
||||
return 0;
|
||||
if ((offset >= dev_priv->gart_vm_start) &&
|
||||
(offset < dev_priv->gart_vm_start + dev_priv->gart_size))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf,
|
||||
drm_r300_cmd_header_t header)
|
||||
static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
|
||||
dev_priv,
|
||||
drm_radeon_kcmd_buffer_t
|
||||
* cmdbuf,
|
||||
drm_r300_cmd_header_t
|
||||
header)
|
||||
{
|
||||
int reg;
|
||||
int sz;
|
||||
|
|
@ -262,35 +275,40 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
|
|||
|
||||
sz = header.packet0.count;
|
||||
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
|
||||
|
||||
if((sz>64)||(sz<0)){
|
||||
DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz);
|
||||
|
||||
if ((sz > 64) || (sz < 0)) {
|
||||
DRM_ERROR
|
||||
("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
|
||||
reg, sz);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
for(i=0;i<sz;i++){
|
||||
values[i]=((int *)cmdbuf->buf)[i];
|
||||
switch(r300_reg_flags[(reg>>2)+i]){
|
||||
}
|
||||
for (i = 0; i < sz; i++) {
|
||||
values[i] = ((int *)cmdbuf->buf)[i];
|
||||
switch (r300_reg_flags[(reg >> 2) + i]) {
|
||||
case MARK_SAFE:
|
||||
break;
|
||||
case MARK_CHECK_OFFSET:
|
||||
if(r300_check_offset(dev_priv, (u32)values[i])){
|
||||
DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", reg, sz);
|
||||
if (r300_check_offset(dev_priv, (u32) values[i])) {
|
||||
DRM_ERROR
|
||||
("Offset failed range check (reg=%04x sz=%d)\n",
|
||||
reg, sz);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Register %04x failed check as flag=%02x\n", reg+i*4, r300_reg_flags[(reg>>2)+i]);
|
||||
DRM_ERROR("Register %04x failed check as flag=%02x\n",
|
||||
reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
BEGIN_RING(1+sz);
|
||||
OUT_RING( CP_PACKET0( reg, sz-1 ) );
|
||||
OUT_RING_TABLE( values, sz );
|
||||
}
|
||||
|
||||
BEGIN_RING(1 + sz);
|
||||
OUT_RING(CP_PACKET0(reg, sz - 1));
|
||||
OUT_RING_TABLE(values, sz);
|
||||
ADVANCE_RING();
|
||||
|
||||
cmdbuf->buf += sz*4;
|
||||
cmdbuf->bufsz -= sz*4;
|
||||
cmdbuf->buf += sz * 4;
|
||||
cmdbuf->bufsz -= sz * 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -315,32 +333,33 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
|
|||
if (!sz)
|
||||
return 0;
|
||||
|
||||
if (sz*4 > cmdbuf->bufsz)
|
||||
if (sz * 4 > cmdbuf->bufsz)
|
||||
return DRM_ERR(EINVAL);
|
||||
|
||||
if (reg+sz*4 >= 0x10000){
|
||||
DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, sz);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
if(r300_check_range(reg, sz)){
|
||||
if (reg + sz * 4 >= 0x10000) {
|
||||
DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
|
||||
sz);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
if (r300_check_range(reg, sz)) {
|
||||
/* go and check everything */
|
||||
return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, header);
|
||||
}
|
||||
return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
|
||||
header);
|
||||
}
|
||||
/* the rest of the data is safe to emit, whatever the values the user passed */
|
||||
|
||||
BEGIN_RING(1+sz);
|
||||
OUT_RING( CP_PACKET0( reg, sz-1 ) );
|
||||
OUT_RING_TABLE( (int *)cmdbuf->buf, sz );
|
||||
BEGIN_RING(1 + sz);
|
||||
OUT_RING(CP_PACKET0(reg, sz - 1));
|
||||
OUT_RING_TABLE((int *)cmdbuf->buf, sz);
|
||||
ADVANCE_RING();
|
||||
|
||||
cmdbuf->buf += sz*4;
|
||||
cmdbuf->bufsz -= sz*4;
|
||||
cmdbuf->buf += sz * 4;
|
||||
cmdbuf->bufsz -= sz * 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Uploads user-supplied vertex program instructions or parameters onto
|
||||
* the graphics card.
|
||||
|
|
@ -359,27 +378,26 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
|
|||
|
||||
if (!sz)
|
||||
return 0;
|
||||
if (sz*16 > cmdbuf->bufsz)
|
||||
if (sz * 16 > cmdbuf->bufsz)
|
||||
return DRM_ERR(EINVAL);
|
||||
|
||||
BEGIN_RING(5+sz*4);
|
||||
BEGIN_RING(5 + sz * 4);
|
||||
/* Wait for VAP to come to senses.. */
|
||||
/* there is no need to emit it multiple times, (only once before VAP is programmed,
|
||||
but this optimization is for later */
|
||||
OUT_RING_REG( R300_VAP_PVS_WAITIDLE, 0 );
|
||||
OUT_RING_REG( R300_VAP_PVS_UPLOAD_ADDRESS, addr );
|
||||
OUT_RING( CP_PACKET0_TABLE( R300_VAP_PVS_UPLOAD_DATA, sz*4 - 1 ) );
|
||||
OUT_RING_TABLE((int *)cmdbuf->buf, sz*4);
|
||||
OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0);
|
||||
OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
|
||||
OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
|
||||
OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
|
||||
|
||||
ADVANCE_RING();
|
||||
|
||||
cmdbuf->buf += sz*16;
|
||||
cmdbuf->bufsz -= sz*16;
|
||||
cmdbuf->buf += sz * 16;
|
||||
cmdbuf->bufsz -= sz * 16;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Emit a clear packet from userspace.
|
||||
* Called by r300_emit_packet3.
|
||||
|
|
@ -389,18 +407,18 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
|
|||
{
|
||||
RING_LOCALS;
|
||||
|
||||
if (8*4 > cmdbuf->bufsz)
|
||||
if (8 * 4 > cmdbuf->bufsz)
|
||||
return DRM_ERR(EINVAL);
|
||||
|
||||
BEGIN_RING(10);
|
||||
OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 8 ) );
|
||||
OUT_RING( R300_PRIM_TYPE_POINT|R300_PRIM_WALK_RING|
|
||||
(1<<R300_PRIM_NUM_VERTICES_SHIFT) );
|
||||
OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
|
||||
OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
|
||||
(1 << R300_PRIM_NUM_VERTICES_SHIFT));
|
||||
OUT_RING_TABLE((int *)cmdbuf->buf, 8);
|
||||
ADVANCE_RING();
|
||||
|
||||
cmdbuf->buf += 8*4;
|
||||
cmdbuf->bufsz -= 8*4;
|
||||
cmdbuf->buf += 8 * 4;
|
||||
cmdbuf->bufsz -= 8 * 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -409,64 +427,72 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
|
|||
drm_radeon_kcmd_buffer_t *cmdbuf,
|
||||
u32 header)
|
||||
{
|
||||
int count, i,k;
|
||||
#define MAX_ARRAY_PACKET 64
|
||||
int count, i, k;
|
||||
#define MAX_ARRAY_PACKET 64
|
||||
u32 payload[MAX_ARRAY_PACKET];
|
||||
u32 narrays;
|
||||
RING_LOCALS;
|
||||
|
||||
count=(header>>16) & 0x3fff;
|
||||
|
||||
if((count+1)>MAX_ARRAY_PACKET){
|
||||
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count);
|
||||
count = (header >> 16) & 0x3fff;
|
||||
|
||||
if ((count + 1) > MAX_ARRAY_PACKET) {
|
||||
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
|
||||
count);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
memset(payload, 0, MAX_ARRAY_PACKET*4);
|
||||
memcpy(payload, cmdbuf->buf+4, (count+1)*4);
|
||||
|
||||
}
|
||||
memset(payload, 0, MAX_ARRAY_PACKET * 4);
|
||||
memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
|
||||
|
||||
/* carefully check packet contents */
|
||||
|
||||
narrays=payload[0];
|
||||
k=0;
|
||||
i=1;
|
||||
while((k<narrays) && (i<(count+1))){
|
||||
i++; /* skip attribute field */
|
||||
if(r300_check_offset(dev_priv, payload[i])){
|
||||
DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
|
||||
|
||||
narrays = payload[0];
|
||||
k = 0;
|
||||
i = 1;
|
||||
while ((k < narrays) && (i < (count + 1))) {
|
||||
i++; /* skip attribute field */
|
||||
if (r300_check_offset(dev_priv, payload[i])) {
|
||||
DRM_ERROR
|
||||
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
|
||||
k, i);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
}
|
||||
k++;
|
||||
i++;
|
||||
if(k==narrays)break;
|
||||
if (k == narrays)
|
||||
break;
|
||||
/* have one more to process, they come in pairs */
|
||||
if(r300_check_offset(dev_priv, payload[i])){
|
||||
DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
|
||||
if (r300_check_offset(dev_priv, payload[i])) {
|
||||
DRM_ERROR
|
||||
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
|
||||
k, i);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
}
|
||||
k++;
|
||||
i++;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
/* do the counts match what we expect ? */
|
||||
if((k!=narrays) || (i!=(count+1))){
|
||||
DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count+1);
|
||||
if ((k != narrays) || (i != (count + 1))) {
|
||||
DRM_ERROR
|
||||
("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
|
||||
k, i, narrays, count + 1);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
/* all clear, output packet */
|
||||
|
||||
BEGIN_RING(count+2);
|
||||
BEGIN_RING(count + 2);
|
||||
OUT_RING(header);
|
||||
OUT_RING_TABLE(payload, count+1);
|
||||
OUT_RING_TABLE(payload, count + 1);
|
||||
ADVANCE_RING();
|
||||
|
||||
cmdbuf->buf += (count+2)*4;
|
||||
cmdbuf->bufsz -= (count+2)*4;
|
||||
cmdbuf->buf += (count + 2) * 4;
|
||||
cmdbuf->bufsz -= (count + 2) * 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
u32 header;
|
||||
int count;
|
||||
|
|
@ -475,36 +501,37 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
|
|||
if (4 > cmdbuf->bufsz)
|
||||
return DRM_ERR(EINVAL);
|
||||
|
||||
/* Fixme !! This simply emits a packet without much checking.
|
||||
/* Fixme !! This simply emits a packet without much checking.
|
||||
We need to be smarter. */
|
||||
|
||||
/* obtain first word - actual packet3 header */
|
||||
header = *(u32 *)cmdbuf->buf;
|
||||
header = *(u32 *) cmdbuf->buf;
|
||||
|
||||
/* Is it packet 3 ? */
|
||||
if( (header>>30)!=0x3 ) {
|
||||
if ((header >> 30) != 0x3) {
|
||||
DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
count=(header>>16) & 0x3fff;
|
||||
count = (header >> 16) & 0x3fff;
|
||||
|
||||
/* Check again now that we know how much data to expect */
|
||||
if ((count+2)*4 > cmdbuf->bufsz){
|
||||
DRM_ERROR("Expected packet3 of length %d but have only %d bytes left\n",
|
||||
(count+2)*4, cmdbuf->bufsz);
|
||||
if ((count + 2) * 4 > cmdbuf->bufsz) {
|
||||
DRM_ERROR
|
||||
("Expected packet3 of length %d but have only %d bytes left\n",
|
||||
(count + 2) * 4, cmdbuf->bufsz);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
/* Is it a packet type we know about ? */
|
||||
switch(header & 0xff00){
|
||||
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
|
||||
switch (header & 0xff00) {
|
||||
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
|
||||
return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
|
||||
|
||||
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
|
||||
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
|
||||
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
|
||||
case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
|
||||
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
|
||||
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
|
||||
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
|
||||
case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
|
||||
case RADEON_WAIT_FOR_IDLE:
|
||||
case RADEON_CP_NOP:
|
||||
/* these packets are safe */
|
||||
|
|
@ -512,21 +539,19 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
|
|||
default:
|
||||
DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
BEGIN_RING(count+2);
|
||||
BEGIN_RING(count + 2);
|
||||
OUT_RING(header);
|
||||
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
|
||||
ADVANCE_RING();
|
||||
|
||||
cmdbuf->buf += (count+2)*4;
|
||||
cmdbuf->bufsz -= (count+2)*4;
|
||||
cmdbuf->buf += (count + 2) * 4;
|
||||
cmdbuf->bufsz -= (count + 2) * 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Emit a rendering packet3 from userspace.
|
||||
* Called by r300_do_cp_cmdbuf.
|
||||
|
|
@ -552,16 +577,16 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
|
|||
|
||||
cmdbuf->buf = orig_buf;
|
||||
cmdbuf->bufsz = orig_bufsz;
|
||||
}
|
||||
}
|
||||
|
||||
switch(header.packet3.packet) {
|
||||
switch (header.packet3.packet) {
|
||||
case R300_CMD_PACKET3_CLEAR:
|
||||
DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
|
||||
ret = r300_emit_clear(dev_priv, cmdbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("r300_emit_clear failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case R300_CMD_PACKET3_RAW:
|
||||
|
|
@ -570,18 +595,18 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
|
|||
if (ret) {
|
||||
DRM_ERROR("r300_emit_raw_packet3 failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("bad packet3 type %i at %p\n",
|
||||
header.packet3.packet,
|
||||
cmdbuf->buf - sizeof(header));
|
||||
header.packet3.packet,
|
||||
cmdbuf->buf - sizeof(header));
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
n += R300_SIMULTANEOUS_CLIPRECTS;
|
||||
} while(n < cmdbuf->nbox);
|
||||
} while (n < cmdbuf->nbox);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -600,21 +625,20 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
|
|||
/**
|
||||
* Emit the sequence to pacify R300.
|
||||
*/
|
||||
static __inline__ void r300_pacify(drm_radeon_private_t* dev_priv)
|
||||
static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
|
||||
{
|
||||
RING_LOCALS;
|
||||
|
||||
BEGIN_RING(6);
|
||||
OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) );
|
||||
OUT_RING( 0xa );
|
||||
OUT_RING( CP_PACKET0( 0x4f18, 0 ) );
|
||||
OUT_RING( 0x3 );
|
||||
OUT_RING( CP_PACKET3( RADEON_CP_NOP, 0 ) );
|
||||
OUT_RING( 0x0 );
|
||||
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
|
||||
OUT_RING(0xa);
|
||||
OUT_RING(CP_PACKET0(0x4f18, 0));
|
||||
OUT_RING(0x3);
|
||||
OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
|
||||
OUT_RING(0x0);
|
||||
ADVANCE_RING();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
|
||||
* The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
|
||||
|
|
@ -630,20 +654,19 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
|
|||
buf->used = 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Parses and validates a user-supplied command buffer and emits appropriate
|
||||
* commands on the DMA ring buffer.
|
||||
* Called by the ioctl handler function radeon_cp_cmdbuf.
|
||||
*/
|
||||
int r300_do_cp_cmdbuf(drm_device_t* dev,
|
||||
int r300_do_cp_cmdbuf(drm_device_t *dev,
|
||||
DRMFILE filp,
|
||||
drm_file_t* filp_priv,
|
||||
drm_radeon_kcmd_buffer_t* cmdbuf)
|
||||
drm_file_t *filp_priv,
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_t *buf = NULL;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_t *buf = NULL;
|
||||
int emit_dispatch_age = 0;
|
||||
int ret = 0;
|
||||
|
||||
|
|
@ -657,9 +680,9 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
|
|||
ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
while(cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
|
||||
while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
|
||||
int idx;
|
||||
drm_r300_cmd_header_t header;
|
||||
|
||||
|
|
@ -668,14 +691,14 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
|
|||
cmdbuf->buf += sizeof(header);
|
||||
cmdbuf->bufsz -= sizeof(header);
|
||||
|
||||
switch(header.header.cmd_type) {
|
||||
case R300_CMD_PACKET0:
|
||||
switch (header.header.cmd_type) {
|
||||
case R300_CMD_PACKET0:
|
||||
DRM_DEBUG("R300_CMD_PACKET0\n");
|
||||
ret = r300_emit_packet0(dev_priv, cmdbuf, header);
|
||||
if (ret) {
|
||||
DRM_ERROR("r300_emit_packet0 failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case R300_CMD_VPU:
|
||||
|
|
@ -684,7 +707,7 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
|
|||
if (ret) {
|
||||
DRM_ERROR("r300_emit_vpu failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case R300_CMD_PACKET3:
|
||||
|
|
@ -693,26 +716,26 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
|
|||
if (ret) {
|
||||
DRM_ERROR("r300_emit_packet3 failed\n");
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case R300_CMD_END3D:
|
||||
DRM_DEBUG("R300_CMD_END3D\n");
|
||||
/* TODO:
|
||||
Ideally userspace driver should not need to issue this call,
|
||||
i.e. the drm driver should issue it automatically and prevent
|
||||
lockups.
|
||||
|
||||
In practice, we do not understand why this call is needed and what
|
||||
it does (except for some vague guesses that it has to do with cache
|
||||
coherence) and so the user space driver does it.
|
||||
|
||||
Once we are sure which uses prevent lockups the code could be moved
|
||||
into the kernel and the userspace driver will not
|
||||
need to use this command.
|
||||
/* TODO:
|
||||
Ideally userspace driver should not need to issue this call,
|
||||
i.e. the drm driver should issue it automatically and prevent
|
||||
lockups.
|
||||
|
||||
Note that issuing this command does not hurt anything
|
||||
except, possibly, performance */
|
||||
In practice, we do not understand why this call is needed and what
|
||||
it does (except for some vague guesses that it has to do with cache
|
||||
coherence) and so the user space driver does it.
|
||||
|
||||
Once we are sure which uses prevent lockups the code could be moved
|
||||
into the kernel and the userspace driver will not
|
||||
need to use this command.
|
||||
|
||||
Note that issuing this command does not hurt anything
|
||||
except, possibly, performance */
|
||||
r300_pacify(dev_priv);
|
||||
break;
|
||||
|
||||
|
|
@ -724,7 +747,7 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
|
|||
RING_LOCALS;
|
||||
|
||||
BEGIN_RING(header.delay.count);
|
||||
for(i=0;i<header.delay.count;i++)
|
||||
for (i = 0; i < header.delay.count; i++)
|
||||
OUT_RING(RADEON_CP_PACKET2);
|
||||
ADVANCE_RING();
|
||||
}
|
||||
|
|
@ -732,53 +755,54 @@ int r300_do_cp_cmdbuf(drm_device_t* dev,
|
|||
|
||||
case R300_CMD_DMA_DISCARD:
|
||||
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
|
||||
idx = header.dma.buf_idx;
|
||||
if (idx < 0 || idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
idx, dma->buf_count - 1);
|
||||
idx = header.dma.buf_idx;
|
||||
if (idx < 0 || idx >= dma->buf_count) {
|
||||
DRM_ERROR("buffer index %d (of %d max)\n",
|
||||
idx, dma->buf_count - 1);
|
||||
ret = DRM_ERR(EINVAL);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
buf = dma->buflist[idx];
|
||||
if (buf->filp != filp || buf->pending) {
|
||||
DRM_ERROR("bad buffer %p %p %d\n",
|
||||
buf->filp, filp, buf->pending);
|
||||
ret = DRM_ERR(EINVAL);
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
buf = dma->buflist[idx];
|
||||
if (buf->filp != filp || buf->pending) {
|
||||
DRM_ERROR("bad buffer %p %p %d\n",
|
||||
buf->filp, filp, buf->pending);
|
||||
ret = DRM_ERR(EINVAL);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
emit_dispatch_age = 1;
|
||||
r300_discard_buffer(dev, buf);
|
||||
break;
|
||||
break;
|
||||
|
||||
case R300_CMD_WAIT:
|
||||
/* simple enough, we can do it here */
|
||||
DRM_DEBUG("R300_CMD_WAIT\n");
|
||||
if(header.wait.flags==0)break; /* nothing to do */
|
||||
if (header.wait.flags == 0)
|
||||
break; /* nothing to do */
|
||||
|
||||
{
|
||||
RING_LOCALS;
|
||||
|
||||
BEGIN_RING(2);
|
||||
OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );
|
||||
OUT_RING( (header.wait.flags & 0xf)<<14 );
|
||||
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
OUT_RING((header.wait.flags & 0xf) << 14);
|
||||
ADVANCE_RING();
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("bad cmd_type %i at %p\n",
|
||||
header.header.cmd_type,
|
||||
header.header.cmd_type,
|
||||
cmdbuf->buf - sizeof(header));
|
||||
ret = DRM_ERR(EINVAL);
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG("END\n");
|
||||
|
||||
cleanup:
|
||||
cleanup:
|
||||
r300_pacify(dev_priv);
|
||||
|
||||
/* We emit the vertex buffer age here, outside the pacifier "brackets"
|
||||
|
|
@ -794,7 +818,7 @@ cleanup:
|
|||
BEGIN_RING(2);
|
||||
RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
|
||||
ADVANCE_RING();
|
||||
}
|
||||
}
|
||||
|
||||
COMMIT_RING();
|
||||
|
||||
|
|
|
|||
|
|
@ -944,7 +944,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
|
|||
|
||||
RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
|
||||
|
||||
if (dev_priv->microcode_version==UCODE_R200) {
|
||||
if (dev_priv->microcode_version == UCODE_R200) {
|
||||
DRM_INFO("Loading R200 Microcode\n");
|
||||
for (i = 0; i < 256; i++) {
|
||||
RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
|
||||
|
|
@ -952,13 +952,13 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
|
|||
RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
|
||||
R200_cp_microcode[i][0]);
|
||||
}
|
||||
} else if (dev_priv->microcode_version==UCODE_R300) {
|
||||
} else if (dev_priv->microcode_version == UCODE_R300) {
|
||||
DRM_INFO("Loading R300 Microcode\n");
|
||||
for ( i = 0 ; i < 256 ; i++ ) {
|
||||
RADEON_WRITE( RADEON_CP_ME_RAM_DATAH,
|
||||
R300_cp_microcode[i][1] );
|
||||
RADEON_WRITE( RADEON_CP_ME_RAM_DATAL,
|
||||
R300_cp_microcode[i][0] );
|
||||
for (i = 0; i < 256; i++) {
|
||||
RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
|
||||
R300_cp_microcode[i][1]);
|
||||
RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
|
||||
R300_cp_microcode[i][0]);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < 256; i++) {
|
||||
|
|
@ -1132,12 +1132,13 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
|
|||
(dev_priv->gart_vm_start >> 16)));
|
||||
|
||||
ring_start = (dev_priv->cp_ring->offset
|
||||
- dev->agp->base + dev_priv->gart_vm_start);
|
||||
- dev->agp->base
|
||||
+ dev_priv->gart_vm_start);
|
||||
} else
|
||||
#endif
|
||||
ring_start = (dev_priv->cp_ring->offset
|
||||
- (unsigned long)dev->sg->virtual + dev_priv->gart_vm_start);
|
||||
|
||||
- (unsigned long)dev->sg->virtual
|
||||
+ dev_priv->gart_vm_start);
|
||||
|
||||
RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
|
||||
|
||||
|
|
@ -1163,7 +1164,8 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
|
|||
drm_sg_mem_t *entry = dev->sg;
|
||||
unsigned long tmp_ofs, page_ofs;
|
||||
|
||||
tmp_ofs = dev_priv->ring_rptr->offset - (unsigned long)dev->sg->virtual;
|
||||
tmp_ofs = dev_priv->ring_rptr->offset -
|
||||
(unsigned long)dev->sg->virtual;
|
||||
page_ofs = tmp_ofs >> PAGE_SHIFT;
|
||||
|
||||
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]);
|
||||
|
|
@ -1250,19 +1252,26 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
|
|||
if (on) {
|
||||
|
||||
DRM_DEBUG("programming pcie %08X %08lX %08X\n",
|
||||
dev_priv->gart_vm_start, (long)dev_priv->gart_info.bus_addr,
|
||||
dev_priv->gart_vm_start,
|
||||
(long)dev_priv->gart_info.bus_addr,
|
||||
dev_priv->gart_size);
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, dev_priv->gart_vm_start);
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE, dev_priv->gart_info.bus_addr);
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO, dev_priv->gart_vm_start);
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO, dev_priv->gart_vm_start
|
||||
+ dev_priv->gart_size - 1);
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
|
||||
dev_priv->gart_vm_start);
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
|
||||
dev_priv->gart_info.bus_addr);
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
|
||||
dev_priv->gart_vm_start);
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
|
||||
dev_priv->gart_vm_start +
|
||||
dev_priv->gart_size - 1);
|
||||
|
||||
RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */
|
||||
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, RADEON_PCIE_TX_GART_EN);
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
|
||||
RADEON_PCIE_TX_GART_EN);
|
||||
} else {
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, (tmp & ~RADEON_PCIE_TX_GART_EN) | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
|
||||
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
|
||||
tmp & ~RADEON_PCIE_TX_GART_EN);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1271,8 +1280,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
|
|||
{
|
||||
u32 tmp;
|
||||
|
||||
if (dev_priv->flags & CHIP_IS_PCIE)
|
||||
{
|
||||
if (dev_priv->flags & CHIP_IS_PCIE) {
|
||||
radeon_set_pciegart(dev_priv, on);
|
||||
return;
|
||||
}
|
||||
|
|
@ -1331,14 +1339,13 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
|
||||
switch(init->func) {
|
||||
case RADEON_INIT_R200_CP:
|
||||
dev_priv->microcode_version=UCODE_R200;
|
||||
dev_priv->microcode_version = UCODE_R200;
|
||||
break;
|
||||
case RADEON_INIT_R300_CP:
|
||||
dev_priv->microcode_version=UCODE_R300;
|
||||
dev_priv->microcode_version = UCODE_R300;
|
||||
break;
|
||||
default:
|
||||
dev_priv->microcode_version=UCODE_R100;
|
||||
break;
|
||||
dev_priv->microcode_version = UCODE_R100;
|
||||
}
|
||||
|
||||
dev_priv->do_boxes = 0;
|
||||
|
|
@ -1388,8 +1395,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
*/
|
||||
dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
|
||||
(dev_priv->color_fmt << 10) |
|
||||
(dev_priv->microcode_version == UCODE_R100 ?
|
||||
RADEON_ZBLOCK16 : 0));
|
||||
(dev_priv->microcode_version ==
|
||||
UCODE_R100 ? RADEON_ZBLOCK16 : 0));
|
||||
|
||||
dev_priv->depth_clear.rb3d_zstencilcntl =
|
||||
(dev_priv->depth_fmt |
|
||||
|
|
@ -1503,8 +1510,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
+ dev_priv->fb_location) >> 10));
|
||||
|
||||
dev_priv->gart_size = init->gart_size;
|
||||
|
||||
dev_priv->gart_vm_start = dev_priv->fb_location + RADEON_READ(RADEON_CONFIG_APER_SIZE);
|
||||
dev_priv->gart_vm_start = dev_priv->fb_location
|
||||
+ RADEON_READ(RADEON_CONFIG_APER_SIZE);
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
if (dev_priv->flags & CHIP_IS_AGP)
|
||||
|
|
@ -1514,8 +1521,8 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
else
|
||||
#endif
|
||||
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
|
||||
- (unsigned long)dev->sg->virtual
|
||||
+ dev_priv->gart_vm_start);
|
||||
- (unsigned long)dev->sg->virtual
|
||||
+ dev_priv->gart_vm_start);
|
||||
|
||||
DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
|
||||
DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
|
||||
|
|
@ -1541,24 +1548,33 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
|
|||
{
|
||||
/* if we have an offset set from userspace */
|
||||
if (dev_priv->pcigart_offset) {
|
||||
dev_priv->gart_info.bus_addr = dev_priv->pcigart_offset + dev_priv->fb_location;
|
||||
dev_priv->gart_info.mapping.offset = dev_priv->gart_info.bus_addr;
|
||||
dev_priv->gart_info.mapping.size = RADEON_PCIGART_TABLE_SIZE;
|
||||
drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
|
||||
dev_priv->gart_info.addr = dev_priv->gart_info.mapping.handle;
|
||||
dev_priv->gart_info.bus_addr =
|
||||
dev_priv->pcigart_offset + dev_priv->fb_location;
|
||||
dev_priv->gart_info.mapping.offset =
|
||||
dev_priv->gart_info.bus_addr;
|
||||
dev_priv->gart_info.mapping.size =
|
||||
RADEON_PCIGART_TABLE_SIZE;
|
||||
|
||||
dev_priv->gart_info.is_pcie = !!(dev_priv->flags & CHIP_IS_PCIE);
|
||||
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB;
|
||||
|
||||
DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", dev_priv->gart_info.addr, dev_priv->pcigart_offset);
|
||||
}
|
||||
else {
|
||||
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
|
||||
drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
|
||||
dev_priv->gart_info.addr =
|
||||
dev_priv->gart_info.mapping.handle;
|
||||
|
||||
dev_priv->gart_info.is_pcie =
|
||||
!!(dev_priv->flags & CHIP_IS_PCIE);
|
||||
dev_priv->gart_info.gart_table_location =
|
||||
DRM_ATI_GART_FB;
|
||||
|
||||
DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
|
||||
dev_priv->gart_info.addr,
|
||||
dev_priv->pcigart_offset);
|
||||
} else {
|
||||
dev_priv->gart_info.gart_table_location =
|
||||
DRM_ATI_GART_MAIN;
|
||||
dev_priv->gart_info.addr = NULL;
|
||||
dev_priv->gart_info.bus_addr = 0;
|
||||
if (dev_priv->flags & CHIP_IS_PCIE)
|
||||
{
|
||||
DRM_ERROR("Cannot use PCI Express without GART in FB memory\n");
|
||||
if (dev_priv->flags & CHIP_IS_PCIE) {
|
||||
DRM_ERROR
|
||||
("Cannot use PCI Express without GART in FB memory\n");
|
||||
radeon_do_cleanup_cp(dev);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
|
@ -1682,9 +1698,9 @@ int radeon_cp_init(DRM_IOCTL_ARGS)
|
|||
DRM_COPY_FROM_USER_IOCTL(init, (drm_radeon_init_t __user *) data,
|
||||
sizeof(init));
|
||||
|
||||
if(init.func == RADEON_INIT_R300_CP)
|
||||
if (init.func == RADEON_INIT_R300_CP)
|
||||
r300_init_reg_flags();
|
||||
|
||||
|
||||
switch (init.func) {
|
||||
case RADEON_INIT_CP:
|
||||
case RADEON_INIT_R200_CP:
|
||||
|
|
@ -1773,7 +1789,6 @@ void radeon_do_release(drm_device_t * dev)
|
|||
int i, ret;
|
||||
|
||||
if (dev_priv) {
|
||||
|
||||
if (dev_priv->cp_running) {
|
||||
/* Stop the cp */
|
||||
while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
|
||||
|
|
@ -1797,11 +1812,13 @@ void radeon_do_release(drm_device_t * dev)
|
|||
if (dev_priv->mmio) /* remove this after permanent addmaps */
|
||||
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
|
||||
|
||||
if (dev_priv->mmio) {/* remove all surfaces */
|
||||
if (dev_priv->mmio) { /* remove all surfaces */
|
||||
for (i = 0; i < RADEON_MAX_SURFACES; i++) {
|
||||
RADEON_WRITE(RADEON_SURFACE0_INFO + 16*i, 0);
|
||||
RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16*i, 0);
|
||||
RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16*i, 0);
|
||||
RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
|
||||
RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
|
||||
16 * i, 0);
|
||||
RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
|
||||
16 * i, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2109,7 +2126,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
dev_priv->flags |= CHIP_HAS_HIERZ;
|
||||
break;
|
||||
default:
|
||||
/* all other chips have no hierarchical z buffer */
|
||||
/* all other chips have no hierarchical z buffer */
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -2121,7 +2138,6 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
DRM_DEBUG("%s card detected\n",
|
||||
((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI"))));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -211,10 +211,10 @@ typedef union {
|
|||
* The interface has not been stabilized, so some of these may be removed
|
||||
* and eventually reordered before stabilization.
|
||||
*/
|
||||
#define R300_CMD_PACKET0 1
|
||||
#define R300_CMD_VPU 2 /* emit vertex program upload */
|
||||
#define R300_CMD_PACKET3 3 /* emit a packet3 */
|
||||
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
|
||||
#define R300_CMD_PACKET0 1
|
||||
#define R300_CMD_VPU 2 /* emit vertex program upload */
|
||||
#define R300_CMD_PACKET3 3 /* emit a packet3 */
|
||||
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
|
||||
#define R300_CMD_CP_DELAY 5
|
||||
#define R300_CMD_DMA_DISCARD 6
|
||||
#define R300_CMD_WAIT 7
|
||||
|
|
@ -239,20 +239,20 @@ typedef union {
|
|||
} packet3;
|
||||
struct {
|
||||
unsigned char cmd_type, packet;
|
||||
unsigned short count; /* amount of packet2 to emit */
|
||||
unsigned short count; /* amount of packet2 to emit */
|
||||
} delay;
|
||||
struct {
|
||||
unsigned char cmd_type, buf_idx, pad0, pad1;
|
||||
} dma;
|
||||
struct {
|
||||
unsigned char cmd_type, flags, pad0, pad1;
|
||||
unsigned char cmd_type, flags, pad0, pad1;
|
||||
} wait;
|
||||
} drm_r300_cmd_header_t;
|
||||
|
||||
#define RADEON_FRONT 0x1
|
||||
#define RADEON_BACK 0x2
|
||||
#define RADEON_DEPTH 0x4
|
||||
#define RADEON_STENCIL 0x8
|
||||
#define RADEON_STENCIL 0x8
|
||||
#define RADEON_CLEAR_FASTZ 0x80000000
|
||||
#define RADEON_USE_HIERZ 0x40000000
|
||||
#define RADEON_USE_COMP_ZBUF 0x20000000
|
||||
|
|
@ -696,7 +696,7 @@ typedef struct drm_radeon_setparam {
|
|||
|
||||
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
|
||||
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
|
||||
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
|
||||
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
|
||||
|
||||
/* 1.14: Clients can allocate/free a surface
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@
|
|||
static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
|
||||
dev_priv,
|
||||
drm_file_t * filp_priv,
|
||||
u32 * offset)
|
||||
u32 *offset)
|
||||
{
|
||||
u32 off = *offset;
|
||||
struct drm_radeon_driver_file_fields *radeon_priv;
|
||||
|
|
@ -50,7 +50,6 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
|
|||
return 0;
|
||||
|
||||
radeon_priv = filp_priv->driver_priv;
|
||||
|
||||
off += radeon_priv->radeon_fb_delta;
|
||||
|
||||
DRM_DEBUG("offset fixed up to 0x%x\n", off);
|
||||
|
|
@ -520,7 +519,7 @@ static struct {
|
|||
{RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
|
||||
{RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
|
||||
{RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
|
||||
"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
|
||||
"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
|
||||
{R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
|
||||
{R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
|
||||
{R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
|
||||
|
|
@ -562,7 +561,7 @@ static struct {
|
|||
{R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
|
||||
{R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
|
||||
{R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
|
||||
"R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
|
||||
"R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
|
||||
{R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
|
||||
{R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
|
||||
{R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
|
||||
|
|
@ -810,68 +809,73 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
|
|||
|
||||
/* hyper z clear */
|
||||
/* no docs available, based on reverse engeneering by Stephane Marchesin */
|
||||
if ((flags & (RADEON_DEPTH | RADEON_STENCIL)) && (flags & RADEON_CLEAR_FASTZ)) {
|
||||
if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
|
||||
&& (flags & RADEON_CLEAR_FASTZ)) {
|
||||
|
||||
int i;
|
||||
int depthpixperline = dev_priv->depth_fmt==RADEON_DEPTH_FORMAT_16BIT_INT_Z?
|
||||
(dev_priv->depth_pitch / 2): (dev_priv->depth_pitch / 4);
|
||||
|
||||
int depthpixperline =
|
||||
dev_priv->depth_fmt ==
|
||||
RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
|
||||
2) : (dev_priv->
|
||||
depth_pitch / 4);
|
||||
|
||||
u32 clearmask;
|
||||
|
||||
u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
|
||||
((clear->depth_mask & 0xff) << 24);
|
||||
|
||||
|
||||
((clear->depth_mask & 0xff) << 24);
|
||||
|
||||
/* Make sure we restore the 3D state next time.
|
||||
* we haven't touched any "normal" state - still need this?
|
||||
*/
|
||||
dev_priv->sarea_priv->ctx_owner = 0;
|
||||
|
||||
if ((dev_priv->flags & CHIP_HAS_HIERZ) && (flags & RADEON_USE_HIERZ)) {
|
||||
/* FIXME : reverse engineer that for Rx00 cards */
|
||||
/* FIXME : the mask supposedly contains low-res z values. So can't set
|
||||
just to the max (0xff? or actually 0x3fff?), need to take z clear
|
||||
value into account? */
|
||||
/* pattern seems to work for r100, though get slight
|
||||
rendering errors with glxgears. If hierz is not enabled for r100,
|
||||
only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
|
||||
other ones are ignored, and the same clear mask can be used. That's
|
||||
very different behaviour than R200 which needs different clear mask
|
||||
and different number of tiles to clear if hierz is enabled or not !?!
|
||||
*/
|
||||
clearmask = (0xff<<22)|(0xff<<6)| 0x003f003f;
|
||||
}
|
||||
else {
|
||||
/* clear mask : chooses the clearing pattern.
|
||||
rv250: could be used to clear only parts of macrotiles
|
||||
(but that would get really complicated...)?
|
||||
bit 0 and 1 (either or both of them ?!?!) are used to
|
||||
not clear tile (or maybe one of the bits indicates if the tile is
|
||||
compressed or not), bit 2 and 3 to not clear tile 1,...,.
|
||||
Pattern is as follows:
|
||||
| 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
|
||||
bits -------------------------------------------------
|
||||
| 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
|
||||
rv100: clearmask covers 2x8 4x1 tiles, but one clear still
|
||||
covers 256 pixels ?!?
|
||||
*/
|
||||
if ((dev_priv->flags & CHIP_HAS_HIERZ)
|
||||
&& (flags & RADEON_USE_HIERZ)) {
|
||||
/* FIXME : reverse engineer that for Rx00 cards */
|
||||
/* FIXME : the mask supposedly contains low-res z values. So can't set
|
||||
just to the max (0xff? or actually 0x3fff?), need to take z clear
|
||||
value into account? */
|
||||
/* pattern seems to work for r100, though get slight
|
||||
rendering errors with glxgears. If hierz is not enabled for r100,
|
||||
only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
|
||||
other ones are ignored, and the same clear mask can be used. That's
|
||||
very different behaviour than R200 which needs different clear mask
|
||||
and different number of tiles to clear if hierz is enabled or not !?!
|
||||
*/
|
||||
clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
|
||||
} else {
|
||||
/* clear mask : chooses the clearing pattern.
|
||||
rv250: could be used to clear only parts of macrotiles
|
||||
(but that would get really complicated...)?
|
||||
bit 0 and 1 (either or both of them ?!?!) are used to
|
||||
not clear tile (or maybe one of the bits indicates if the tile is
|
||||
compressed or not), bit 2 and 3 to not clear tile 1,...,.
|
||||
Pattern is as follows:
|
||||
| 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
|
||||
bits -------------------------------------------------
|
||||
| 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
|
||||
rv100: clearmask covers 2x8 4x1 tiles, but one clear still
|
||||
covers 256 pixels ?!?
|
||||
*/
|
||||
clearmask = 0x0;
|
||||
}
|
||||
|
||||
BEGIN_RING( 8 );
|
||||
BEGIN_RING(8);
|
||||
RADEON_WAIT_UNTIL_2D_IDLE();
|
||||
OUT_RING_REG( RADEON_RB3D_DEPTHCLEARVALUE,
|
||||
tempRB3D_DEPTHCLEARVALUE);
|
||||
OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
|
||||
tempRB3D_DEPTHCLEARVALUE);
|
||||
/* what offset is this exactly ? */
|
||||
OUT_RING_REG( RADEON_RB3D_ZMASKOFFSET, 0 );
|
||||
OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
|
||||
/* need ctlstat, otherwise get some strange black flickering */
|
||||
OUT_RING_REG( RADEON_RB3D_ZCACHE_CTLSTAT, RADEON_RB3D_ZC_FLUSH_ALL );
|
||||
OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
|
||||
RADEON_RB3D_ZC_FLUSH_ALL);
|
||||
ADVANCE_RING();
|
||||
|
||||
for (i = 0; i < nbox; i++) {
|
||||
int tileoffset, nrtilesx, nrtilesy, j;
|
||||
/* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
|
||||
if ((dev_priv->flags&CHIP_HAS_HIERZ) && !(dev_priv->microcode_version==UCODE_R200)) {
|
||||
if ((dev_priv->flags & CHIP_HAS_HIERZ)
|
||||
&& !(dev_priv->microcode_version == UCODE_R200)) {
|
||||
/* FIXME : figure this out for r200 (when hierz is enabled). Or
|
||||
maybe r200 actually doesn't need to put the low-res z value into
|
||||
the tile cache like r100, but just needs to clear the hi-level z-buffer?
|
||||
|
|
@ -879,59 +883,74 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
|
|||
R100 seems to operate on 2x1 8x8 tiles, but...
|
||||
odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
|
||||
problematic with resolutions which are not 64 pix aligned? */
|
||||
tileoffset = ((pbox[i].y1 >> 3) * depthpixperline + pbox[i].x1) >> 6;
|
||||
nrtilesx = ((pbox[i].x2 & ~63) - (pbox[i].x1 & ~63)) >> 4;
|
||||
nrtilesy = (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
|
||||
tileoffset =
|
||||
((pbox[i].y1 >> 3) * depthpixperline +
|
||||
pbox[i].x1) >> 6;
|
||||
nrtilesx =
|
||||
((pbox[i].x2 & ~63) -
|
||||
(pbox[i].x1 & ~63)) >> 4;
|
||||
nrtilesy =
|
||||
(pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
|
||||
for (j = 0; j <= nrtilesy; j++) {
|
||||
BEGIN_RING( 4 );
|
||||
OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_ZMASK, 2 ) );
|
||||
BEGIN_RING(4);
|
||||
OUT_RING(CP_PACKET3
|
||||
(RADEON_3D_CLEAR_ZMASK, 2));
|
||||
/* first tile */
|
||||
OUT_RING( tileoffset * 8 );
|
||||
OUT_RING(tileoffset * 8);
|
||||
/* the number of tiles to clear */
|
||||
OUT_RING( nrtilesx + 4 );
|
||||
OUT_RING(nrtilesx + 4);
|
||||
/* clear mask : chooses the clearing pattern. */
|
||||
OUT_RING( clearmask );
|
||||
OUT_RING(clearmask);
|
||||
ADVANCE_RING();
|
||||
tileoffset += depthpixperline >> 6;
|
||||
}
|
||||
}
|
||||
else if (dev_priv->microcode_version==UCODE_R200) {
|
||||
} else if (dev_priv->microcode_version == UCODE_R200) {
|
||||
/* works for rv250. */
|
||||
/* find first macro tile (8x2 4x4 z-pixels on rv250) */
|
||||
tileoffset = ((pbox[i].y1 >> 3) * depthpixperline + pbox[i].x1) >> 5;
|
||||
nrtilesx = (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
|
||||
nrtilesy = (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
|
||||
tileoffset =
|
||||
((pbox[i].y1 >> 3) * depthpixperline +
|
||||
pbox[i].x1) >> 5;
|
||||
nrtilesx =
|
||||
(pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
|
||||
nrtilesy =
|
||||
(pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
|
||||
for (j = 0; j <= nrtilesy; j++) {
|
||||
BEGIN_RING( 4 );
|
||||
OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_ZMASK, 2 ) );
|
||||
BEGIN_RING(4);
|
||||
OUT_RING(CP_PACKET3
|
||||
(RADEON_3D_CLEAR_ZMASK, 2));
|
||||
/* first tile */
|
||||
/* judging by the first tile offset needed, could possibly
|
||||
directly address/clear 4x4 tiles instead of 8x2 * 4x4
|
||||
macro tiles, though would still need clear mask for
|
||||
right/bottom if truely 4x4 granularity is desired ? */
|
||||
OUT_RING( tileoffset * 16 );
|
||||
OUT_RING(tileoffset * 16);
|
||||
/* the number of tiles to clear */
|
||||
OUT_RING( nrtilesx + 1 );
|
||||
OUT_RING(nrtilesx + 1);
|
||||
/* clear mask : chooses the clearing pattern. */
|
||||
OUT_RING( clearmask );
|
||||
OUT_RING(clearmask);
|
||||
ADVANCE_RING();
|
||||
tileoffset += depthpixperline >> 5;
|
||||
}
|
||||
}
|
||||
else { /* rv 100 */
|
||||
} else { /* rv 100 */
|
||||
/* rv100 might not need 64 pix alignment, who knows */
|
||||
/* offsets are, hmm, weird */
|
||||
tileoffset = ((pbox[i].y1 >> 4) * depthpixperline + pbox[i].x1) >> 6;
|
||||
nrtilesx = ((pbox[i].x2 & ~63) - (pbox[i].x1 & ~63)) >> 4;
|
||||
nrtilesy = (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
|
||||
tileoffset =
|
||||
((pbox[i].y1 >> 4) * depthpixperline +
|
||||
pbox[i].x1) >> 6;
|
||||
nrtilesx =
|
||||
((pbox[i].x2 & ~63) -
|
||||
(pbox[i].x1 & ~63)) >> 4;
|
||||
nrtilesy =
|
||||
(pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
|
||||
for (j = 0; j <= nrtilesy; j++) {
|
||||
BEGIN_RING( 4 );
|
||||
OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_ZMASK, 2 ) );
|
||||
OUT_RING( tileoffset * 128 );
|
||||
BEGIN_RING(4);
|
||||
OUT_RING(CP_PACKET3
|
||||
(RADEON_3D_CLEAR_ZMASK, 2));
|
||||
OUT_RING(tileoffset * 128);
|
||||
/* the number of tiles to clear */
|
||||
OUT_RING( nrtilesx + 4 );
|
||||
OUT_RING(nrtilesx + 4);
|
||||
/* clear mask : chooses the clearing pattern. */
|
||||
OUT_RING( clearmask );
|
||||
OUT_RING(clearmask);
|
||||
ADVANCE_RING();
|
||||
tileoffset += depthpixperline >> 6;
|
||||
}
|
||||
|
|
@ -939,18 +958,19 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
|
|||
}
|
||||
|
||||
/* TODO don't always clear all hi-level z tiles */
|
||||
if ((dev_priv->flags & CHIP_HAS_HIERZ) && (dev_priv->microcode_version==UCODE_R200)
|
||||
&& (flags & RADEON_USE_HIERZ))
|
||||
/* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
|
||||
/* FIXME : the mask supposedly contains low-res z values. So can't set
|
||||
just to the max (0xff? or actually 0x3fff?), need to take z clear
|
||||
value into account? */
|
||||
if ((dev_priv->flags & CHIP_HAS_HIERZ)
|
||||
&& (dev_priv->microcode_version == UCODE_R200)
|
||||
&& (flags & RADEON_USE_HIERZ))
|
||||
/* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
|
||||
/* FIXME : the mask supposedly contains low-res z values. So can't set
|
||||
just to the max (0xff? or actually 0x3fff?), need to take z clear
|
||||
value into account? */
|
||||
{
|
||||
BEGIN_RING( 4 );
|
||||
OUT_RING( CP_PACKET3( RADEON_3D_CLEAR_HIZ, 2 ) );
|
||||
OUT_RING( 0x0 ); /* First tile */
|
||||
OUT_RING( 0x3cc0 );
|
||||
OUT_RING( (0xff<<22)|(0xff<<6)| 0x003f003f);
|
||||
BEGIN_RING(4);
|
||||
OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
|
||||
OUT_RING(0x0); /* First tile */
|
||||
OUT_RING(0x3cc0);
|
||||
OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
|
||||
ADVANCE_RING();
|
||||
}
|
||||
}
|
||||
|
|
@ -1028,7 +1048,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
|
|||
|
||||
if (flags & RADEON_USE_COMP_ZBUF) {
|
||||
tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
|
||||
RADEON_Z_DECOMPRESSION_ENABLE;
|
||||
RADEON_Z_DECOMPRESSION_ENABLE;
|
||||
}
|
||||
if (flags & RADEON_USE_HIERZ) {
|
||||
tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
|
||||
|
|
@ -1085,7 +1105,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
|
|||
} else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
|
||||
|
||||
int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
|
||||
|
||||
|
||||
rb3d_cntl = depth_clear->rb3d_cntl;
|
||||
|
||||
if (flags & RADEON_DEPTH) {
|
||||
|
|
@ -1104,7 +1124,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
|
|||
|
||||
if (flags & RADEON_USE_COMP_ZBUF) {
|
||||
tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
|
||||
RADEON_Z_DECOMPRESSION_ENABLE;
|
||||
RADEON_Z_DECOMPRESSION_ENABLE;
|
||||
}
|
||||
if (flags & RADEON_USE_HIERZ) {
|
||||
tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
|
||||
|
|
@ -1566,8 +1586,8 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
/* we got tiled coordinates, untile them */
|
||||
image->x *= 2;
|
||||
}
|
||||
}
|
||||
else microtile = 0;
|
||||
} else
|
||||
microtile = 0;
|
||||
|
||||
DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
|
||||
|
||||
|
|
@ -1611,6 +1631,14 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
(u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
|
||||
dwords = size / 4;
|
||||
|
||||
#define RADEON_COPY_MT(_buf, _data, _width) \
|
||||
do { \
|
||||
if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
|
||||
return DRM_ERR(EFAULT); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
if (microtile) {
|
||||
/* texture micro tiling in use, minimum texture width is thus 16 bytes.
|
||||
however, we cannot use blitter directly for texture width < 64 bytes,
|
||||
|
|
@ -1622,101 +1650,58 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
from user space. */
|
||||
if (tex->height == 1) {
|
||||
if (tex_width >= 64 || tex_width <= 16) {
|
||||
if (DRM_COPY_FROM_USER(buffer, data,
|
||||
tex_width * sizeof(u32))) {
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data,
|
||||
tex_width * sizeof(u32));
|
||||
} else if (tex_width == 32) {
|
||||
if (DRM_COPY_FROM_USER(buffer, data, 16)) {
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
if (DRM_COPY_FROM_USER(buffer + 8, data + 16, 16)) {
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data, 16);
|
||||
RADEON_COPY_MT(buffer + 8,
|
||||
data + 16, 16);
|
||||
}
|
||||
} else if (tex_width >= 64 || tex_width == 16) {
|
||||
if (DRM_COPY_FROM_USER(buffer, data,
|
||||
dwords * sizeof(u32))) {
|
||||
DRM_ERROR("EFAULT on data, %d dwords\n",
|
||||
dwords);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data,
|
||||
dwords * sizeof(u32));
|
||||
} else if (tex_width < 16) {
|
||||
for (i = 0; i < tex->height; i++) {
|
||||
if (DRM_COPY_FROM_USER(buffer, data, tex_width)) {
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data, tex_width);
|
||||
buffer += 4;
|
||||
data += tex_width;
|
||||
}
|
||||
} else if (tex_width == 32) {
|
||||
/* TODO: make sure this works when not fitting in one buffer
|
||||
(i.e. 32bytes x 2048...) */
|
||||
/* TODO: make sure this works when not fitting in one buffer
|
||||
(i.e. 32bytes x 2048...) */
|
||||
for (i = 0; i < tex->height; i += 2) {
|
||||
if (DRM_COPY_FROM_USER(buffer, data, 16)) {
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data, 16);
|
||||
data += 16;
|
||||
if (DRM_COPY_FROM_USER(buffer + 8, data, 16)) {
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer + 8, data, 16);
|
||||
data += 16;
|
||||
if (DRM_COPY_FROM_USER(buffer + 4, data, 16)) {
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer + 4, data, 16);
|
||||
data += 16;
|
||||
if (DRM_COPY_FROM_USER(buffer + 12, data, 16)) {
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer + 12, data, 16);
|
||||
data += 16;
|
||||
buffer += 16;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
if (tex_width >= 32) {
|
||||
/* Texture image width is larger than the minimum, so we
|
||||
* can upload it directly.
|
||||
*/
|
||||
if (DRM_COPY_FROM_USER(buffer, data,
|
||||
dwords * sizeof(u32))) {
|
||||
DRM_ERROR("EFAULT on data, %d dwords\n",
|
||||
dwords);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data,
|
||||
dwords * sizeof(u32));
|
||||
} else {
|
||||
/* Texture image width is less than the minimum, so we
|
||||
* need to pad out each image scanline to the minimum
|
||||
* width.
|
||||
*/
|
||||
for (i = 0; i < tex->height; i++) {
|
||||
if (DRM_COPY_FROM_USER(buffer, data, tex_width)) {
|
||||
DRM_ERROR("EFAULT on pad, %d bytes\n",
|
||||
tex_width);
|
||||
return DRM_ERR(EFAULT);
|
||||
}
|
||||
RADEON_COPY_MT(buffer, data, tex_width);
|
||||
buffer += 8;
|
||||
data += tex_width;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#undef RADEON_COPY_MT
|
||||
buf->filp = filp;
|
||||
buf->used = size;
|
||||
offset = dev_priv->gart_buffers_offset + buf->offset;
|
||||
|
|
@ -1729,8 +1714,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
|
|||
RADEON_GMC_SRC_DATATYPE_COLOR |
|
||||
RADEON_ROP3_S |
|
||||
RADEON_DP_SRC_SOURCE_MEMORY |
|
||||
RADEON_GMC_CLR_CMP_CNTL_DIS |
|
||||
RADEON_GMC_WR_MSK_DIS );
|
||||
RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
|
||||
OUT_RING((spitch << 22) | (offset >> 10));
|
||||
OUT_RING((texpitch << 22) | (tex->offset >> 10));
|
||||
OUT_RING(0);
|
||||
|
|
@ -1778,33 +1762,35 @@ static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
|
|||
ADVANCE_RING();
|
||||
}
|
||||
|
||||
static void radeon_apply_surface_regs(int surf_index, drm_radeon_private_t *dev_priv)
|
||||
static void radeon_apply_surface_regs(int surf_index,
|
||||
drm_radeon_private_t *dev_priv)
|
||||
{
|
||||
if (!dev_priv->mmio)
|
||||
return;
|
||||
|
||||
radeon_do_cp_idle(dev_priv);
|
||||
|
||||
RADEON_WRITE(RADEON_SURFACE0_INFO + 16*surf_index,
|
||||
dev_priv->surfaces[surf_index].flags);
|
||||
RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16*surf_index,
|
||||
dev_priv->surfaces[surf_index].lower);
|
||||
RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16*surf_index,
|
||||
dev_priv->surfaces[surf_index].upper);
|
||||
RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
|
||||
dev_priv->surfaces[surf_index].flags);
|
||||
RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
|
||||
dev_priv->surfaces[surf_index].lower);
|
||||
RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
|
||||
dev_priv->surfaces[surf_index].upper);
|
||||
}
|
||||
|
||||
/* Allocates a virtual surface
|
||||
* doesn't always allocate a real surface, will stretch an existing
|
||||
* doesn't always allocate a real surface, will stretch an existing
|
||||
* surface when possible.
|
||||
*
|
||||
* Note that refcount can be at most 2, since during a free refcount=3
|
||||
* might mean we have to allocate a new surface which might not always
|
||||
* be available.
|
||||
* For example : we allocate three contigous surfaces ABC. If B is
|
||||
* For example : we allocate three contigous surfaces ABC. If B is
|
||||
* freed, we suddenly need two surfaces to store A and C, which might
|
||||
* not always be available.
|
||||
*/
|
||||
static int alloc_surface(drm_radeon_surface_alloc_t* new, drm_radeon_private_t *dev_priv, DRMFILE filp)
|
||||
static int alloc_surface(drm_radeon_surface_alloc_t *new,
|
||||
drm_radeon_private_t *dev_priv, DRMFILE filp)
|
||||
{
|
||||
struct radeon_virt_surface *s;
|
||||
int i;
|
||||
|
|
@ -1816,34 +1802,37 @@ static int alloc_surface(drm_radeon_surface_alloc_t* new, drm_radeon_private_t *
|
|||
|
||||
/* sanity check */
|
||||
if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
|
||||
((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) != RADEON_SURF_ADDRESS_FIXED_MASK) ||
|
||||
((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
|
||||
((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
|
||||
RADEON_SURF_ADDRESS_FIXED_MASK)
|
||||
|| ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
|
||||
return -1;
|
||||
|
||||
/* make sure there is no overlap with existing surfaces */
|
||||
for (i = 0; i < RADEON_MAX_SURFACES; i++) {
|
||||
if ((dev_priv->surfaces[i].refcount != 0) &&
|
||||
(( (new_lower >= dev_priv->surfaces[i].lower) &&
|
||||
(new_lower < dev_priv->surfaces[i].upper) ) ||
|
||||
( (new_lower < dev_priv->surfaces[i].lower) &&
|
||||
(new_upper > dev_priv->surfaces[i].lower) )) ){
|
||||
return -1;}
|
||||
(((new_lower >= dev_priv->surfaces[i].lower) &&
|
||||
(new_lower < dev_priv->surfaces[i].upper)) ||
|
||||
((new_lower < dev_priv->surfaces[i].lower) &&
|
||||
(new_upper > dev_priv->surfaces[i].lower)))) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* find a virtual surface */
|
||||
for (i = 0; i < 2*RADEON_MAX_SURFACES; i++)
|
||||
for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
|
||||
if (dev_priv->virt_surfaces[i].filp == 0)
|
||||
break;
|
||||
if (i == 2*RADEON_MAX_SURFACES) {
|
||||
return -1;}
|
||||
if (i == 2 * RADEON_MAX_SURFACES) {
|
||||
return -1;
|
||||
}
|
||||
virt_surface_index = i;
|
||||
|
||||
/* try to reuse an existing surface */
|
||||
for (i = 0; i < RADEON_MAX_SURFACES; i++) {
|
||||
/* extend before */
|
||||
if ((dev_priv->surfaces[i].refcount == 1) &&
|
||||
(new->flags == dev_priv->surfaces[i].flags) &&
|
||||
(new_upper + 1 == dev_priv->surfaces[i].lower)) {
|
||||
(new->flags == dev_priv->surfaces[i].flags) &&
|
||||
(new_upper + 1 == dev_priv->surfaces[i].lower)) {
|
||||
s = &(dev_priv->virt_surfaces[virt_surface_index]);
|
||||
s->surface_index = i;
|
||||
s->lower = new_lower;
|
||||
|
|
@ -1858,8 +1847,8 @@ static int alloc_surface(drm_radeon_surface_alloc_t* new, drm_radeon_private_t *
|
|||
|
||||
/* extend after */
|
||||
if ((dev_priv->surfaces[i].refcount == 1) &&
|
||||
(new->flags == dev_priv->surfaces[i].flags) &&
|
||||
(new_lower == dev_priv->surfaces[i].upper + 1)) {
|
||||
(new->flags == dev_priv->surfaces[i].flags) &&
|
||||
(new_lower == dev_priv->surfaces[i].upper + 1)) {
|
||||
s = &(dev_priv->virt_surfaces[virt_surface_index]);
|
||||
s->surface_index = i;
|
||||
s->lower = new_lower;
|
||||
|
|
@ -1895,26 +1884,34 @@ static int alloc_surface(drm_radeon_surface_alloc_t* new, drm_radeon_private_t *
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int free_surface(DRMFILE filp, drm_radeon_private_t *dev_priv, int lower)
|
||||
static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
|
||||
int lower)
|
||||
{
|
||||
struct radeon_virt_surface *s;
|
||||
int i;
|
||||
/* find the virtual surface */
|
||||
for(i = 0; i < 2*RADEON_MAX_SURFACES; i++) {
|
||||
for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
|
||||
s = &(dev_priv->virt_surfaces[i]);
|
||||
if (s->filp) {
|
||||
if ((lower == s->lower) && (filp == s->filp)) {
|
||||
if (dev_priv->surfaces[s->surface_index].lower == s->lower)
|
||||
dev_priv->surfaces[s->surface_index].lower = s->upper;
|
||||
if (dev_priv->surfaces[s->surface_index].
|
||||
lower == s->lower)
|
||||
dev_priv->surfaces[s->surface_index].
|
||||
lower = s->upper;
|
||||
|
||||
if (dev_priv->surfaces[s->surface_index].upper == s->upper)
|
||||
dev_priv->surfaces[s->surface_index].upper = s->lower;
|
||||
if (dev_priv->surfaces[s->surface_index].
|
||||
upper == s->upper)
|
||||
dev_priv->surfaces[s->surface_index].
|
||||
upper = s->lower;
|
||||
|
||||
dev_priv->surfaces[s->surface_index].refcount--;
|
||||
if (dev_priv->surfaces[s->surface_index].refcount == 0)
|
||||
dev_priv->surfaces[s->surface_index].flags = 0;
|
||||
s->filp = 0;
|
||||
radeon_apply_surface_regs(s->surface_index, dev_priv);
|
||||
if (dev_priv->surfaces[s->surface_index].
|
||||
refcount == 0)
|
||||
dev_priv->surfaces[s->surface_index].
|
||||
flags = 0;
|
||||
s->filp = NULL;
|
||||
radeon_apply_surface_regs(s->surface_index,
|
||||
dev_priv);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -1922,20 +1919,20 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t *dev_priv, int lower)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void radeon_surfaces_release(DRMFILE filp, drm_radeon_private_t *dev_priv)
|
||||
static void radeon_surfaces_release(DRMFILE filp,
|
||||
drm_radeon_private_t * dev_priv)
|
||||
{
|
||||
int i;
|
||||
for( i = 0; i < 2*RADEON_MAX_SURFACES; i++)
|
||||
{
|
||||
for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
|
||||
if (dev_priv->virt_surfaces[i].filp == filp)
|
||||
free_surface(filp, dev_priv, dev_priv->virt_surfaces[i].lower);
|
||||
free_surface(filp, dev_priv,
|
||||
dev_priv->virt_surfaces[i].lower);
|
||||
}
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
* IOCTL functions
|
||||
*/
|
||||
|
||||
static int radeon_surface_alloc(DRM_IOCTL_ARGS)
|
||||
{
|
||||
DRM_DEVICE;
|
||||
|
|
@ -1943,12 +1940,13 @@ static int radeon_surface_alloc(DRM_IOCTL_ARGS)
|
|||
drm_radeon_surface_alloc_t alloc;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_surface_alloc_t __user *)data,
|
||||
sizeof(alloc));
|
||||
DRM_COPY_FROM_USER_IOCTL(alloc,
|
||||
(drm_radeon_surface_alloc_t __user *) data,
|
||||
sizeof(alloc));
|
||||
|
||||
if (alloc_surface(&alloc, dev_priv, filp) == -1)
|
||||
return DRM_ERR(EINVAL);
|
||||
|
|
@ -1963,12 +1961,12 @@ static int radeon_surface_free(DRM_IOCTL_ARGS)
|
|||
drm_radeon_surface_free_t memfree;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
|
||||
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
|
||||
return DRM_ERR(EINVAL);
|
||||
}
|
||||
|
||||
DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *)data,
|
||||
sizeof(memfree) );
|
||||
DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data,
|
||||
sizeof(memfree));
|
||||
|
||||
if (free_surface(filp, dev_priv, memfree.address))
|
||||
return DRM_ERR(EINVAL);
|
||||
|
|
@ -2512,7 +2510,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
|
|||
static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
|
||||
drm_file_t * filp_priv,
|
||||
drm_radeon_cmd_header_t header,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf)
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
int id = (int)header.packet.packet_id;
|
||||
int sz, reg;
|
||||
|
|
@ -2610,7 +2608,7 @@ static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
|
|||
|
||||
static int radeon_emit_packet3(drm_device_t * dev,
|
||||
drm_file_t * filp_priv,
|
||||
drm_radeon_kcmd_buffer_t * cmdbuf)
|
||||
drm_radeon_kcmd_buffer_t *cmdbuf)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
unsigned int cmdsz;
|
||||
|
|
@ -2776,19 +2774,20 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
|
|||
}
|
||||
|
||||
orig_nbox = cmdbuf.nbox;
|
||||
|
||||
if(dev_priv->microcode_version == UCODE_R300) {
|
||||
|
||||
if (dev_priv->microcode_version == UCODE_R300) {
|
||||
int temp;
|
||||
temp=r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
|
||||
|
||||
temp = r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
|
||||
|
||||
if (orig_bufsz != 0)
|
||||
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
|
||||
|
||||
|
||||
return temp;
|
||||
}
|
||||
|
||||
|
||||
/* microcode_version != r300 */
|
||||
while (cmdbuf.bufsz >= sizeof(header)) {
|
||||
|
||||
header.i = *(int *)cmdbuf.buf;
|
||||
cmdbuf.buf += sizeof(header);
|
||||
cmdbuf.bufsz -= sizeof(header);
|
||||
|
|
@ -2880,12 +2879,12 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
|
|||
|
||||
if (orig_bufsz != 0)
|
||||
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
|
||||
|
||||
DRM_DEBUG("DONE\n");
|
||||
COMMIT_RING();
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
err:
|
||||
if (orig_bufsz != 0)
|
||||
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
|
||||
return DRM_ERR(EINVAL);
|
||||
|
|
@ -2999,13 +2998,12 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
|
|||
break;
|
||||
case RADEON_SETPARAM_SWITCH_TILING:
|
||||
if (sp.value == 0) {
|
||||
DRM_DEBUG( "color tiling disabled\n" );
|
||||
DRM_DEBUG("color tiling disabled\n");
|
||||
dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
|
||||
dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
|
||||
dev_priv->sarea_priv->tiling_enabled = 0;
|
||||
}
|
||||
else if (sp.value == 1) {
|
||||
DRM_DEBUG( "color tiling enabled\n" );
|
||||
} else if (sp.value == 1) {
|
||||
DRM_DEBUG("color tiling enabled\n");
|
||||
dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
|
||||
dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
|
||||
dev_priv->sarea_priv->tiling_enabled = 1;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue