mirror of
https://gitlab.freedesktop.org/mesa/drm.git
synced 2025-12-25 04:50:11 +01:00
Hardware accelerated stencil buffers. Conformant, but broken? Disabled by
default for now.
This commit is contained in:
parent
7918aa6550
commit
dd626209d3
18 changed files with 7869 additions and 67 deletions
829
linux-core/drm_bufs.c
Normal file
829
linux-core/drm_bufs.c
Normal file
|
|
@ -0,0 +1,829 @@
|
|||
/* drm_bufs.h -- Generic buffer template -*- linux-c -*-
|
||||
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
|
||||
*
|
||||
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#include <linux/vmalloc.h>
|
||||
#include "drmP.h"
|
||||
|
||||
#ifndef __HAVE_PCI_DMA
|
||||
#define __HAVE_PCI_DMA 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_BUF_PRIV_T
|
||||
#define DRIVER_BUF_PRIV_T u32
|
||||
#endif
|
||||
#ifndef DRIVER_AGP_BUFFERS_MAP
|
||||
#if __HAVE_AGP && __HAVE_DMA
|
||||
#error "You must define DRIVER_AGP_BUFFERS_MAP()"
|
||||
#else
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Compute order. Can be made faster.
|
||||
*/
|
||||
int DRM(order)( unsigned long size )
|
||||
{
|
||||
int order;
|
||||
unsigned long tmp;
|
||||
|
||||
for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
|
||||
|
||||
if ( size & ~(1 << order) )
|
||||
++order;
|
||||
|
||||
return order;
|
||||
}
|
||||
|
||||
int DRM(addmap)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_map_t *map;
|
||||
drm_map_list_t *list;
|
||||
|
||||
if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
|
||||
|
||||
map = DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
|
||||
if ( !map )
|
||||
return -ENOMEM;
|
||||
|
||||
if ( copy_from_user( map, (drm_map_t *)arg, sizeof(*map) ) ) {
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Only allow shared memory to be removable since we only keep enough
|
||||
* book keeping information about shared memory to allow for removal
|
||||
* when processes fork.
|
||||
*/
|
||||
if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
|
||||
map->offset, map->size, map->type );
|
||||
if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -EINVAL;
|
||||
}
|
||||
map->mtrr = -1;
|
||||
map->handle = 0;
|
||||
|
||||
switch ( map->type ) {
|
||||
case _DRM_REGISTERS:
|
||||
case _DRM_FRAME_BUFFER:
|
||||
#ifndef __sparc__
|
||||
if ( map->offset + map->size < map->offset ||
|
||||
map->offset < virt_to_phys(high_memory) ) {
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
#if __REALLY_HAVE_MTRR
|
||||
if ( map->type == _DRM_FRAME_BUFFER ||
|
||||
(map->flags & _DRM_WRITE_COMBINING) ) {
|
||||
map->mtrr = mtrr_add( map->offset, map->size,
|
||||
MTRR_TYPE_WRCOMB, 1 );
|
||||
}
|
||||
#endif
|
||||
map->handle = DRM(ioremap)( map->offset, map->size );
|
||||
break;
|
||||
|
||||
case _DRM_SHM:
|
||||
map->handle = vmalloc_32(map->size);
|
||||
DRM_DEBUG( "%ld %d %p\n",
|
||||
map->size, DRM(order)( map->size ), map->handle );
|
||||
if ( !map->handle ) {
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -ENOMEM;
|
||||
}
|
||||
map->offset = (unsigned long)map->handle;
|
||||
if ( map->flags & _DRM_CONTAINS_LOCK ) {
|
||||
dev->lock.hw_lock = map->handle; /* Pointer to lock */
|
||||
}
|
||||
break;
|
||||
#if __REALLY_HAVE_AGP
|
||||
case _DRM_AGP:
|
||||
map->offset = map->offset + dev->agp->base;
|
||||
map->mtrr = dev->agp->agp_mtrr; /* for getmap */
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
|
||||
if(!list) {
|
||||
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
return -EINVAL;
|
||||
}
|
||||
memset(list, 0, sizeof(*list));
|
||||
list->map = map;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
list_add(&list->head, &dev->maplist->head);
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
|
||||
return -EFAULT;
|
||||
if ( map->type != _DRM_SHM ) {
|
||||
if ( copy_to_user( &((drm_map_t *)arg)->handle,
|
||||
&map->offset,
|
||||
sizeof(map->offset) ) )
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Remove a map private from list and deallocate resources if the mapping
|
||||
* isn't in use.
|
||||
*/
|
||||
|
||||
int DRM(rmmap)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
struct list_head *list;
|
||||
drm_map_list_t *r_list;
|
||||
drm_vma_entry_t *pt, *prev;
|
||||
drm_map_t *map;
|
||||
drm_map_t request;
|
||||
int found_maps = 0;
|
||||
|
||||
if (copy_from_user(&request, (drm_map_t *)arg,
|
||||
sizeof(request))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
down(&dev->struct_sem);
|
||||
list = &dev->maplist->head;
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *) list;
|
||||
|
||||
if(r_list->map &&
|
||||
r_list->map->handle == request.handle &&
|
||||
r_list->map->flags & _DRM_REMOVABLE) break;
|
||||
}
|
||||
|
||||
/* List has wrapped around to the head pointer, or its empty we didn't
|
||||
* find anything.
|
||||
*/
|
||||
if(list == (&dev->maplist->head)) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
map = r_list->map;
|
||||
list_del(list);
|
||||
DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
|
||||
|
||||
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
|
||||
#if LINUX_VERSION_CODE >= 0x020300
|
||||
if (pt->vma->vm_private_data == map) found_maps++;
|
||||
#else
|
||||
if (pt->vma->vm_pte == map) found_maps++;
|
||||
#endif
|
||||
}
|
||||
|
||||
if(!found_maps) {
|
||||
switch (map->type) {
|
||||
case _DRM_REGISTERS:
|
||||
case _DRM_FRAME_BUFFER:
|
||||
#if __REALLY_HAVE_MTRR
|
||||
if (map->mtrr >= 0) {
|
||||
int retcode;
|
||||
retcode = mtrr_del(map->mtrr,
|
||||
map->offset,
|
||||
map->size);
|
||||
DRM_DEBUG("mtrr_del = %d\n", retcode);
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)(map->handle, map->size);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
break;
|
||||
case _DRM_AGP:
|
||||
break;
|
||||
}
|
||||
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if __HAVE_DMA
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_desc_t request;
|
||||
drm_buf_entry_t *entry;
|
||||
drm_buf_t *buf;
|
||||
unsigned long offset;
|
||||
unsigned long agp_offset;
|
||||
int count;
|
||||
int order;
|
||||
int size;
|
||||
int alignment;
|
||||
int page_order;
|
||||
int total;
|
||||
int byte_count;
|
||||
int i;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
count = request.count;
|
||||
order = DRM(order)( request.size );
|
||||
size = 1 << order;
|
||||
|
||||
alignment = (request.flags & _DRM_PAGE_ALIGN)
|
||||
? PAGE_ALIGN(size) : size;
|
||||
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
|
||||
total = PAGE_SIZE << page_order;
|
||||
|
||||
byte_count = 0;
|
||||
agp_offset = dev->agp->base + request.agp_start;
|
||||
|
||||
DRM_DEBUG( "count: %d\n", count );
|
||||
DRM_DEBUG( "order: %d\n", order );
|
||||
DRM_DEBUG( "size: %d\n", size );
|
||||
DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
|
||||
DRM_DEBUG( "alignment: %d\n", alignment );
|
||||
DRM_DEBUG( "page_order: %d\n", page_order );
|
||||
DRM_DEBUG( "total: %d\n", total );
|
||||
|
||||
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
|
||||
if ( dev->queue_count ) return -EBUSY; /* Not while in use */
|
||||
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( dev->buf_use ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return -EBUSY;
|
||||
}
|
||||
atomic_inc( &dev->buf_alloc );
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
down( &dev->struct_sem );
|
||||
entry = &dma->bufs[order];
|
||||
if ( entry->buf_count ) {
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM; /* May only call once for each order */
|
||||
}
|
||||
|
||||
entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
if ( !entry->buflist ) {
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
|
||||
|
||||
entry->buf_size = size;
|
||||
entry->page_order = page_order;
|
||||
|
||||
offset = 0;
|
||||
|
||||
while ( entry->buf_count < count ) {
|
||||
buf = &entry->buflist[entry->buf_count];
|
||||
buf->idx = dma->buf_count + entry->buf_count;
|
||||
buf->total = alignment;
|
||||
buf->order = order;
|
||||
buf->used = 0;
|
||||
|
||||
buf->offset = (dma->byte_count + offset);
|
||||
buf->bus_address = agp_offset + offset;
|
||||
buf->address = (void *)(agp_offset + offset);
|
||||
buf->next = NULL;
|
||||
buf->waiting = 0;
|
||||
buf->pending = 0;
|
||||
init_waitqueue_head( &buf->dma_wait );
|
||||
buf->pid = 0;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
|
||||
DRM_MEM_BUFS );
|
||||
memset( buf->dev_private, 0, buf->dev_priv_size );
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
buf->time_queued = 0;
|
||||
buf->time_dispatched = 0;
|
||||
buf->time_completed = 0;
|
||||
buf->time_freed = 0;
|
||||
#endif
|
||||
DRM_DEBUG( "buffer %d @ %p\n",
|
||||
entry->buf_count, buf->address );
|
||||
|
||||
offset += alignment;
|
||||
entry->buf_count++;
|
||||
byte_count += PAGE_SIZE << page_order;
|
||||
}
|
||||
|
||||
DRM_DEBUG( "byte_count: %d\n", byte_count );
|
||||
|
||||
dma->buflist = DRM(realloc)( dma->buflist,
|
||||
dma->buf_count * sizeof(*dma->buflist),
|
||||
(dma->buf_count + entry->buf_count)
|
||||
* sizeof(*dma->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
|
||||
}
|
||||
|
||||
dma->buf_count += entry->buf_count;
|
||||
dma->byte_count += byte_count;
|
||||
|
||||
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
|
||||
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_create)( &entry->freelist, entry->buf_count );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
|
||||
}
|
||||
#endif
|
||||
up( &dev->struct_sem );
|
||||
|
||||
request.count = entry->buf_count;
|
||||
request.size = size;
|
||||
|
||||
if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
dma->flags = _DRM_DMA_USE_AGP;
|
||||
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return 0;
|
||||
}
|
||||
#endif /* __REALLY_HAVE_AGP */
|
||||
|
||||
#if __HAVE_PCI_DMA
|
||||
int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_desc_t request;
|
||||
int count;
|
||||
int order;
|
||||
int size;
|
||||
int total;
|
||||
int page_order;
|
||||
drm_buf_entry_t *entry;
|
||||
unsigned long page;
|
||||
drm_buf_t *buf;
|
||||
int alignment;
|
||||
unsigned long offset;
|
||||
int i;
|
||||
int byte_count;
|
||||
int page_count;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
count = request.count;
|
||||
order = DRM(order)( request.size );
|
||||
size = 1 << order;
|
||||
|
||||
DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
|
||||
request.count, request.size, size,
|
||||
order, dev->queue_count );
|
||||
|
||||
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
|
||||
if ( dev->queue_count ) return -EBUSY; /* Not while in use */
|
||||
|
||||
alignment = (request.flags & _DRM_PAGE_ALIGN)
|
||||
? PAGE_ALIGN(size) : size;
|
||||
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
|
||||
total = PAGE_SIZE << page_order;
|
||||
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( dev->buf_use ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return -EBUSY;
|
||||
}
|
||||
atomic_inc( &dev->buf_alloc );
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
down( &dev->struct_sem );
|
||||
entry = &dma->bufs[order];
|
||||
if ( entry->buf_count ) {
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM; /* May only call once for each order */
|
||||
}
|
||||
|
||||
entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
if ( !entry->buflist ) {
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
|
||||
|
||||
entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
|
||||
DRM_MEM_SEGS );
|
||||
if ( !entry->seglist ) {
|
||||
DRM(free)( entry->buflist,
|
||||
count * sizeof(*entry->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
|
||||
|
||||
dma->pagelist = DRM(realloc)( dma->pagelist,
|
||||
dma->page_count * sizeof(*dma->pagelist),
|
||||
(dma->page_count + (count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
DRM_MEM_PAGES );
|
||||
DRM_DEBUG( "pagelist: %d entries\n",
|
||||
dma->page_count + (count << page_order) );
|
||||
|
||||
entry->buf_size = size;
|
||||
entry->page_order = page_order;
|
||||
byte_count = 0;
|
||||
page_count = 0;
|
||||
|
||||
while ( entry->buf_count < count ) {
|
||||
page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
|
||||
if ( !page ) break;
|
||||
entry->seglist[entry->seg_count++] = page;
|
||||
for ( i = 0 ; i < (1 << page_order) ; i++ ) {
|
||||
DRM_DEBUG( "page %d @ 0x%08lx\n",
|
||||
dma->page_count + page_count,
|
||||
page + PAGE_SIZE * i );
|
||||
dma->pagelist[dma->page_count + page_count++]
|
||||
= page + PAGE_SIZE * i;
|
||||
}
|
||||
for ( offset = 0 ;
|
||||
offset + size <= total && entry->buf_count < count ;
|
||||
offset += alignment, ++entry->buf_count ) {
|
||||
buf = &entry->buflist[entry->buf_count];
|
||||
buf->idx = dma->buf_count + entry->buf_count;
|
||||
buf->total = alignment;
|
||||
buf->order = order;
|
||||
buf->used = 0;
|
||||
buf->offset = (dma->byte_count + byte_count + offset);
|
||||
buf->address = (void *)(page + offset);
|
||||
buf->next = NULL;
|
||||
buf->waiting = 0;
|
||||
buf->pending = 0;
|
||||
init_waitqueue_head( &buf->dma_wait );
|
||||
buf->pid = 0;
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
buf->time_queued = 0;
|
||||
buf->time_dispatched = 0;
|
||||
buf->time_completed = 0;
|
||||
buf->time_freed = 0;
|
||||
#endif
|
||||
DRM_DEBUG( "buffer %d @ %p\n",
|
||||
entry->buf_count, buf->address );
|
||||
}
|
||||
byte_count += PAGE_SIZE << page_order;
|
||||
}
|
||||
|
||||
dma->buflist = DRM(realloc)( dma->buflist,
|
||||
dma->buf_count * sizeof(*dma->buflist),
|
||||
(dma->buf_count + entry->buf_count)
|
||||
* sizeof(*dma->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
|
||||
}
|
||||
|
||||
dma->buf_count += entry->buf_count;
|
||||
dma->seg_count += entry->seg_count;
|
||||
dma->page_count += entry->seg_count << page_order;
|
||||
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_create)( &entry->freelist, entry->buf_count );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
|
||||
}
|
||||
#endif
|
||||
up( &dev->struct_sem );
|
||||
|
||||
request.count = entry->buf_count;
|
||||
request.size = size;
|
||||
|
||||
if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return 0;
|
||||
}
|
||||
#endif /* __HAVE_PCI_DMA */
|
||||
|
||||
int DRM(addbufs)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_buf_desc_t request;
|
||||
|
||||
if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( request.flags & _DRM_AGP_BUFFER )
|
||||
return DRM(addbufs_agp)( inode, filp, cmd, arg );
|
||||
else
|
||||
#endif
|
||||
#if __HAVE_PCI_DMA
|
||||
return DRM(addbufs_pci)( inode, filp, cmd, arg );
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
int DRM(infobufs)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_info_t request;
|
||||
int i;
|
||||
int count;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( atomic_read( &dev->buf_alloc ) ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return -EBUSY;
|
||||
}
|
||||
++dev->buf_use; /* Can't allocate more after this call */
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
if ( copy_from_user( &request,
|
||||
(drm_buf_info_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
|
||||
if ( dma->bufs[i].buf_count ) ++count;
|
||||
}
|
||||
|
||||
DRM_DEBUG( "count = %d\n", count );
|
||||
|
||||
if ( request.count >= count ) {
|
||||
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
|
||||
if ( dma->bufs[i].buf_count ) {
|
||||
drm_buf_desc_t *to = &request.list[count];
|
||||
drm_buf_entry_t *from = &dma->bufs[i];
|
||||
drm_freelist_t *list = &dma->bufs[i].freelist;
|
||||
if ( copy_to_user( &to->count,
|
||||
&from->buf_count,
|
||||
sizeof(from->buf_count) ) ||
|
||||
copy_to_user( &to->size,
|
||||
&from->buf_size,
|
||||
sizeof(from->buf_size) ) ||
|
||||
copy_to_user( &to->low_mark,
|
||||
&list->low_mark,
|
||||
sizeof(list->low_mark) ) ||
|
||||
copy_to_user( &to->high_mark,
|
||||
&list->high_mark,
|
||||
sizeof(list->high_mark) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d %d %d %d %d\n",
|
||||
i,
|
||||
dma->bufs[i].buf_count,
|
||||
dma->bufs[i].buf_size,
|
||||
dma->bufs[i].freelist.low_mark,
|
||||
dma->bufs[i].freelist.high_mark );
|
||||
++count;
|
||||
}
|
||||
}
|
||||
}
|
||||
request.count = count;
|
||||
|
||||
if ( copy_to_user( (drm_buf_info_t *)arg,
|
||||
&request,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(markbufs)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_desc_t request;
|
||||
int order;
|
||||
drm_buf_entry_t *entry;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
if ( copy_from_user( &request,
|
||||
(drm_buf_desc_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d, %d, %d\n",
|
||||
request.size, request.low_mark, request.high_mark );
|
||||
order = DRM(order)( request.size );
|
||||
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
|
||||
entry = &dma->bufs[order];
|
||||
|
||||
if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
|
||||
return -EINVAL;
|
||||
if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
|
||||
return -EINVAL;
|
||||
|
||||
entry->freelist.low_mark = request.low_mark;
|
||||
entry->freelist.high_mark = request.high_mark;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(freebufs)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_free_t request;
|
||||
int i;
|
||||
int idx;
|
||||
drm_buf_t *buf;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
if ( copy_from_user( &request,
|
||||
(drm_buf_free_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d\n", request.count );
|
||||
for ( i = 0 ; i < request.count ; i++ ) {
|
||||
if ( copy_from_user( &idx,
|
||||
&request.list[i],
|
||||
sizeof(idx) ) )
|
||||
return -EFAULT;
|
||||
if ( idx < 0 || idx >= dma->buf_count ) {
|
||||
DRM_ERROR( "Index %d (of %d max)\n",
|
||||
idx, dma->buf_count - 1 );
|
||||
return -EINVAL;
|
||||
}
|
||||
buf = dma->buflist[idx];
|
||||
if ( buf->pid != current->pid ) {
|
||||
DRM_ERROR( "Process %d freeing buffer owned by %d\n",
|
||||
current->pid, buf->pid );
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM(free_buffer)( dev, buf );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(mapbufs)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
int retcode = 0;
|
||||
const int zero = 0;
|
||||
unsigned long virtual;
|
||||
unsigned long address;
|
||||
drm_buf_map_t request;
|
||||
int i;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( atomic_read( &dev->buf_alloc ) ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return -EBUSY;
|
||||
}
|
||||
dev->buf_use++; /* Can't allocate more after this call */
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
if ( copy_from_user( &request, (drm_buf_map_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( request.count >= dma->buf_count ) {
|
||||
if ( __HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP) ) {
|
||||
drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
|
||||
|
||||
if ( !map ) {
|
||||
retcode = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
down( ¤t->mm->mmap_sem );
|
||||
virtual = do_mmap( filp, 0, map->size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
(unsigned long)map->offset );
|
||||
up( ¤t->mm->mmap_sem );
|
||||
} else {
|
||||
down( ¤t->mm->mmap_sem );
|
||||
virtual = do_mmap( filp, 0, dma->byte_count,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, 0 );
|
||||
up( ¤t->mm->mmap_sem );
|
||||
}
|
||||
if ( virtual > -1024UL ) {
|
||||
/* Real error */
|
||||
retcode = (signed long)virtual;
|
||||
goto done;
|
||||
}
|
||||
request.virtual = (void *)virtual;
|
||||
|
||||
for ( i = 0 ; i < dma->buf_count ; i++ ) {
|
||||
if ( copy_to_user( &request.list[i].idx,
|
||||
&dma->buflist[i]->idx,
|
||||
sizeof(request.list[0].idx) ) ) {
|
||||
retcode = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
if ( copy_to_user( &request.list[i].total,
|
||||
&dma->buflist[i]->total,
|
||||
sizeof(request.list[0].total) ) ) {
|
||||
retcode = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
if ( copy_to_user( &request.list[i].used,
|
||||
&zero,
|
||||
sizeof(zero) ) ) {
|
||||
retcode = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
address = virtual + dma->buflist[i]->offset; /* *** */
|
||||
if ( copy_to_user( &request.list[i].address,
|
||||
&address,
|
||||
sizeof(address) ) ) {
|
||||
retcode = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
done:
|
||||
request.count = dma->buf_count;
|
||||
DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
|
||||
|
||||
if ( copy_to_user( (drm_buf_map_t *)arg, &request, sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
return retcode;
|
||||
}
|
||||
|
||||
#endif /* __HAVE_DMA */
|
||||
760
linux-core/drm_context.c
Normal file
760
linux-core/drm_context.c
Normal file
|
|
@ -0,0 +1,760 @@
|
|||
/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
|
||||
* Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
|
||||
*
|
||||
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#include "drmP.h"
|
||||
|
||||
#if __HAVE_CTX_BITMAP
|
||||
|
||||
/* ================================================================
|
||||
* Context bitmap support
|
||||
*/
|
||||
|
||||
void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle )
|
||||
{
|
||||
if ( ctx_handle < 0 ) goto failed;
|
||||
if ( !dev->ctx_bitmap ) goto failed;
|
||||
|
||||
if ( ctx_handle < DRM_MAX_CTXBITMAP ) {
|
||||
down(&dev->struct_sem);
|
||||
clear_bit( ctx_handle, dev->ctx_bitmap );
|
||||
dev->context_sareas[ctx_handle] = NULL;
|
||||
up(&dev->struct_sem);
|
||||
return;
|
||||
}
|
||||
failed:
|
||||
DRM_ERROR( "Attempt to free invalid context handle: %d\n",
|
||||
ctx_handle );
|
||||
return;
|
||||
}
|
||||
|
||||
int DRM(ctxbitmap_next)( drm_device_t *dev )
|
||||
{
|
||||
int bit;
|
||||
|
||||
if(!dev->ctx_bitmap) return -1;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
|
||||
if ( bit < DRM_MAX_CTXBITMAP ) {
|
||||
set_bit( bit, dev->ctx_bitmap );
|
||||
DRM_DEBUG( "drm_ctxbitmap_next bit : %d\n", bit );
|
||||
if((bit+1) > dev->max_context) {
|
||||
dev->max_context = (bit+1);
|
||||
if(dev->context_sareas) {
|
||||
dev->context_sareas = DRM(realloc)(
|
||||
dev->context_sareas,
|
||||
(dev->max_context - 1) *
|
||||
sizeof(*dev->context_sareas),
|
||||
dev->max_context *
|
||||
sizeof(*dev->context_sareas),
|
||||
DRM_MEM_MAPS);
|
||||
dev->context_sareas[bit] = NULL;
|
||||
} else {
|
||||
/* max_context == 1 at this point */
|
||||
dev->context_sareas = DRM(alloc)(
|
||||
dev->max_context *
|
||||
sizeof(*dev->context_sareas),
|
||||
DRM_MEM_MAPS);
|
||||
dev->context_sareas[bit] = NULL;
|
||||
}
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
return bit;
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int DRM(ctxbitmap_init)( drm_device_t *dev )
|
||||
{
|
||||
int i;
|
||||
int temp;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
dev->ctx_bitmap = (unsigned long *) DRM(alloc)( PAGE_SIZE,
|
||||
DRM_MEM_CTXBITMAP );
|
||||
if ( dev->ctx_bitmap == NULL ) {
|
||||
up(&dev->struct_sem);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset( (void *)dev->ctx_bitmap, 0, PAGE_SIZE );
|
||||
dev->context_sareas = NULL;
|
||||
dev->max_context = -1;
|
||||
up(&dev->struct_sem);
|
||||
|
||||
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
|
||||
temp = DRM(ctxbitmap_next)( dev );
|
||||
DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void DRM(ctxbitmap_cleanup)( drm_device_t *dev )
|
||||
{
|
||||
down(&dev->struct_sem);
|
||||
if( dev->context_sareas ) DRM(free)( dev->context_sareas,
|
||||
sizeof(*dev->context_sareas) *
|
||||
dev->max_context,
|
||||
DRM_MEM_MAPS );
|
||||
DRM(free)( (void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP );
|
||||
up(&dev->struct_sem);
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
* Per Context SAREA Support
|
||||
*/
|
||||
|
||||
int DRM(getsareactx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_priv_map_t request;
|
||||
drm_map_t *map;
|
||||
|
||||
if (copy_from_user(&request,
|
||||
(drm_ctx_priv_map_t *)arg,
|
||||
sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
if ((int)request.ctx_id >= dev->max_context) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
map = dev->context_sareas[request.ctx_id];
|
||||
up(&dev->struct_sem);
|
||||
|
||||
request.handle = map->handle;
|
||||
if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(setsareactx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_priv_map_t request;
|
||||
drm_map_t *map = NULL;
|
||||
drm_map_list_t *r_list;
|
||||
struct list_head *list;
|
||||
|
||||
if (copy_from_user(&request,
|
||||
(drm_ctx_priv_map_t *)arg,
|
||||
sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *)list;
|
||||
if(r_list->map &&
|
||||
r_list->map->handle == request.handle) break;
|
||||
}
|
||||
if (list == &(dev->maplist->head)) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
map = r_list->map;
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if (!map) return -EINVAL;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
if ((int)request.ctx_id >= dev->max_context) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev->context_sareas[request.ctx_id] = map;
|
||||
up(&dev->struct_sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
* The actual DRM context handling routines
|
||||
*/
|
||||
|
||||
int DRM(context_switch)( drm_device_t *dev, int old, int new )
|
||||
{
|
||||
char buf[64];
|
||||
|
||||
if ( test_and_set_bit( 0, &dev->context_flag ) ) {
|
||||
DRM_ERROR( "Reentering -- FIXME\n" );
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
dev->ctx_start = get_cycles();
|
||||
#endif
|
||||
|
||||
DRM_DEBUG( "Context switch from %d to %d\n", old, new );
|
||||
|
||||
if ( new == dev->last_context ) {
|
||||
clear_bit( 0, &dev->context_flag );
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ( DRM(flags) & DRM_FLAG_NOCTX ) {
|
||||
DRM(context_switch_complete)( dev, new );
|
||||
} else {
|
||||
sprintf( buf, "C %d %d\n", old, new );
|
||||
DRM(write_string)( dev, buf );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(context_switch_complete)( drm_device_t *dev, int new )
|
||||
{
|
||||
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
|
||||
dev->last_switch = jiffies;
|
||||
|
||||
if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) {
|
||||
DRM_ERROR( "Lock isn't held after context switch\n" );
|
||||
}
|
||||
|
||||
/* If a context switch is ever initiated
|
||||
when the kernel holds the lock, release
|
||||
that lock here. */
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
atomic_inc( &dev->histo.ctx[DRM(histogram_slot)(get_cycles()
|
||||
- dev->ctx_start)] );
|
||||
|
||||
#endif
|
||||
clear_bit( 0, &dev->context_flag );
|
||||
wake_up( &dev->context_wait );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(resctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_ctx_res_t res;
|
||||
drm_ctx_t ctx;
|
||||
int i;
|
||||
|
||||
if ( copy_from_user( &res, (drm_ctx_res_t *)arg, sizeof(res) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( res.count >= DRM_RESERVED_CONTEXTS ) {
|
||||
memset( &ctx, 0, sizeof(ctx) );
|
||||
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
|
||||
ctx.handle = i;
|
||||
if ( copy_to_user( &res.contexts[i],
|
||||
&i, sizeof(i) ) )
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
res.count = DRM_RESERVED_CONTEXTS;
|
||||
|
||||
if ( copy_to_user( (drm_ctx_res_t *)arg, &res, sizeof(res) ) )
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(addctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
|
||||
ctx.handle = DRM(ctxbitmap_next)( dev );
|
||||
if ( ctx.handle == DRM_KERNEL_CONTEXT ) {
|
||||
/* Skip kernel's context and get a new one. */
|
||||
ctx.handle = DRM(ctxbitmap_next)( dev );
|
||||
}
|
||||
DRM_DEBUG( "%d\n", ctx.handle );
|
||||
if ( ctx.handle == -1 ) {
|
||||
DRM_DEBUG( "Not enough free contexts.\n" );
|
||||
/* Should this return -EBUSY instead? */
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(modctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
/* This does nothing */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(getctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if ( copy_from_user( &ctx, (drm_ctx_t*)arg, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
|
||||
/* This is 0, because we don't handle any context flags */
|
||||
ctx.flags = 0;
|
||||
|
||||
if ( copy_to_user( (drm_ctx_t*)arg, &ctx, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(switchctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d\n", ctx.handle );
|
||||
return DRM(context_switch)( dev, dev->last_context, ctx.handle );
|
||||
}
|
||||
|
||||
int DRM(newctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d\n", ctx.handle );
|
||||
DRM(context_switch_complete)( dev, ctx.handle );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(rmctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d\n", ctx.handle );
|
||||
if ( ctx.handle == DRM_KERNEL_CONTEXT + 1 ) {
|
||||
priv->remove_auth_on_close = 1;
|
||||
}
|
||||
if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
|
||||
DRM(ctxbitmap_free)( dev, ctx.handle );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#else /* __HAVE_CTX_BITMAP */
|
||||
|
||||
/* ================================================================
|
||||
* Old-style context support
|
||||
*/
|
||||
|
||||
|
||||
int DRM(context_switch)(drm_device_t *dev, int old, int new)
|
||||
{
|
||||
char buf[64];
|
||||
drm_queue_t *q;
|
||||
|
||||
#if 0
|
||||
atomic_inc(&dev->total_ctx);
|
||||
#endif
|
||||
|
||||
if (test_and_set_bit(0, &dev->context_flag)) {
|
||||
DRM_ERROR("Reentering -- FIXME\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
dev->ctx_start = get_cycles();
|
||||
#endif
|
||||
|
||||
DRM_DEBUG("Context switch from %d to %d\n", old, new);
|
||||
|
||||
if (new >= dev->queue_count) {
|
||||
clear_bit(0, &dev->context_flag);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (new == dev->last_context) {
|
||||
clear_bit(0, &dev->context_flag);
|
||||
return 0;
|
||||
}
|
||||
|
||||
q = dev->queuelist[new];
|
||||
atomic_inc(&q->use_count);
|
||||
if (atomic_read(&q->use_count) == 1) {
|
||||
atomic_dec(&q->use_count);
|
||||
clear_bit(0, &dev->context_flag);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (DRM(flags) & DRM_FLAG_NOCTX) {
|
||||
DRM(context_switch_complete)(dev, new);
|
||||
} else {
|
||||
sprintf(buf, "C %d %d\n", old, new);
|
||||
DRM(write_string)(dev, buf);
|
||||
}
|
||||
|
||||
atomic_dec(&q->use_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(context_switch_complete)(drm_device_t *dev, int new)
|
||||
{
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
|
||||
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
|
||||
dev->last_switch = jiffies;
|
||||
|
||||
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
|
||||
DRM_ERROR("Lock isn't held after context switch\n");
|
||||
}
|
||||
|
||||
if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
|
||||
if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT)) {
|
||||
DRM_ERROR("Cannot free lock\n");
|
||||
}
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
atomic_inc(&dev->histo.ctx[DRM(histogram_slot)(get_cycles()
|
||||
- dev->ctx_start)]);
|
||||
|
||||
#endif
|
||||
clear_bit(0, &dev->context_flag);
|
||||
wake_up_interruptible(&dev->context_wait);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
|
||||
{
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
if (atomic_read(&q->use_count) != 1
|
||||
|| atomic_read(&q->finalization)
|
||||
|| atomic_read(&q->block_count)) {
|
||||
DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
|
||||
atomic_read(&q->use_count),
|
||||
atomic_read(&q->finalization),
|
||||
atomic_read(&q->block_count));
|
||||
}
|
||||
|
||||
atomic_set(&q->finalization, 0);
|
||||
atomic_set(&q->block_count, 0);
|
||||
atomic_set(&q->block_read, 0);
|
||||
atomic_set(&q->block_write, 0);
|
||||
atomic_set(&q->total_queued, 0);
|
||||
atomic_set(&q->total_flushed, 0);
|
||||
atomic_set(&q->total_locks, 0);
|
||||
|
||||
init_waitqueue_head(&q->write_queue);
|
||||
init_waitqueue_head(&q->read_queue);
|
||||
init_waitqueue_head(&q->flush_queue);
|
||||
|
||||
q->flags = ctx->flags;
|
||||
|
||||
DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* drm_alloc_queue:
|
||||
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
|
||||
disappear (so all deallocation must be done after IOCTLs are off)
|
||||
2) dev->queue_count < dev->queue_slots
|
||||
3) dev->queuelist[i].use_count == 0 and
|
||||
dev->queuelist[i].finalization == 0 if i not in use
|
||||
POST: 1) dev->queuelist[i].use_count == 1
|
||||
2) dev->queue_count < dev->queue_slots */
|
||||
|
||||
static int DRM(alloc_queue)(drm_device_t *dev)
|
||||
{
|
||||
int i;
|
||||
drm_queue_t *queue;
|
||||
int oldslots;
|
||||
int newslots;
|
||||
/* Check for a free queue */
|
||||
for (i = 0; i < dev->queue_count; i++) {
|
||||
atomic_inc(&dev->queuelist[i]->use_count);
|
||||
if (atomic_read(&dev->queuelist[i]->use_count) == 1
|
||||
&& !atomic_read(&dev->queuelist[i]->finalization)) {
|
||||
DRM_DEBUG("%d (free)\n", i);
|
||||
return i;
|
||||
}
|
||||
atomic_dec(&dev->queuelist[i]->use_count);
|
||||
}
|
||||
/* Allocate a new queue */
|
||||
down(&dev->struct_sem);
|
||||
|
||||
queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES);
|
||||
memset(queue, 0, sizeof(*queue));
|
||||
atomic_set(&queue->use_count, 1);
|
||||
|
||||
++dev->queue_count;
|
||||
if (dev->queue_count >= dev->queue_slots) {
|
||||
oldslots = dev->queue_slots * sizeof(*dev->queuelist);
|
||||
if (!dev->queue_slots) dev->queue_slots = 1;
|
||||
dev->queue_slots *= 2;
|
||||
newslots = dev->queue_slots * sizeof(*dev->queuelist);
|
||||
|
||||
dev->queuelist = DRM(realloc)(dev->queuelist,
|
||||
oldslots,
|
||||
newslots,
|
||||
DRM_MEM_QUEUES);
|
||||
if (!dev->queuelist) {
|
||||
up(&dev->struct_sem);
|
||||
DRM_DEBUG("out of memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
dev->queuelist[dev->queue_count-1] = queue;
|
||||
|
||||
up(&dev->struct_sem);
|
||||
DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
|
||||
return dev->queue_count - 1;
|
||||
}
|
||||
|
||||
int DRM(resctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_ctx_res_t res;
|
||||
drm_ctx_t ctx;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
|
||||
if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
|
||||
return -EFAULT;
|
||||
if (res.count >= DRM_RESERVED_CONTEXTS) {
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
|
||||
ctx.handle = i;
|
||||
if (copy_to_user(&res.contexts[i],
|
||||
&i,
|
||||
sizeof(i)))
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
res.count = DRM_RESERVED_CONTEXTS;
|
||||
if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(addctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
|
||||
/* Init kernel's context and get a new one. */
|
||||
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
|
||||
ctx.handle = DRM(alloc_queue)(dev);
|
||||
}
|
||||
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(modctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
drm_queue_t *q;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
|
||||
if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
|
||||
q = dev->queuelist[ctx.handle];
|
||||
|
||||
atomic_inc(&q->use_count);
|
||||
if (atomic_read(&q->use_count) == 1) {
|
||||
/* No longer in use */
|
||||
atomic_dec(&q->use_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (DRM_BUFCOUNT(&q->waitlist)) {
|
||||
atomic_dec(&q->use_count);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
q->flags = ctx.flags;
|
||||
|
||||
atomic_dec(&q->use_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(getctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
drm_queue_t *q;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
|
||||
if (ctx.handle >= dev->queue_count) return -EINVAL;
|
||||
q = dev->queuelist[ctx.handle];
|
||||
|
||||
atomic_inc(&q->use_count);
|
||||
if (atomic_read(&q->use_count) == 1) {
|
||||
/* No longer in use */
|
||||
atomic_dec(&q->use_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx.flags = q->flags;
|
||||
atomic_dec(&q->use_count);
|
||||
|
||||
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(switchctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
|
||||
}
|
||||
|
||||
int DRM(newctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
DRM(context_switch_complete)(dev, ctx.handle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(rmctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
drm_queue_t *q;
|
||||
drm_buf_t *buf;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
|
||||
if (ctx.handle >= dev->queue_count) return -EINVAL;
|
||||
q = dev->queuelist[ctx.handle];
|
||||
|
||||
atomic_inc(&q->use_count);
|
||||
if (atomic_read(&q->use_count) == 1) {
|
||||
/* No longer in use */
|
||||
atomic_dec(&q->use_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
atomic_inc(&q->finalization); /* Mark queue in finalization state */
|
||||
atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
|
||||
finalization) */
|
||||
|
||||
while (test_and_set_bit(0, &dev->interrupt_flag)) {
|
||||
schedule();
|
||||
if (signal_pending(current)) {
|
||||
clear_bit(0, &dev->interrupt_flag);
|
||||
return -EINTR;
|
||||
}
|
||||
}
|
||||
/* Remove queued buffers */
|
||||
while ((buf = DRM(waitlist_get)(&q->waitlist))) {
|
||||
DRM(free_buffer)(dev, buf);
|
||||
}
|
||||
clear_bit(0, &dev->interrupt_flag);
|
||||
|
||||
/* Wakeup blocked processes */
|
||||
wake_up_interruptible(&q->read_queue);
|
||||
wake_up_interruptible(&q->write_queue);
|
||||
wake_up_interruptible(&q->flush_queue);
|
||||
|
||||
/* Finalization over. Queue is made
|
||||
available when both use_count and
|
||||
finalization become 0, which won't
|
||||
happen until all the waiting processes
|
||||
stop waiting. */
|
||||
atomic_dec(&q->finalization);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* __HAVE_CTX_BITMAP */
|
||||
938
linux-core/drm_drv.c
Normal file
938
linux-core/drm_drv.c
Normal file
|
|
@ -0,0 +1,938 @@
|
|||
/* drm_drv.h -- Generic driver template -*- linux-c -*-
|
||||
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
|
||||
*
|
||||
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* To use this template, you must at least define the following (samples
|
||||
* given for the MGA driver):
|
||||
*
|
||||
* #define DRIVER_AUTHOR "VA Linux Systems, Inc."
|
||||
*
|
||||
* #define DRIVER_NAME "mga"
|
||||
* #define DRIVER_DESC "Matrox G200/G400"
|
||||
* #define DRIVER_DATE "20001127"
|
||||
*
|
||||
* #define DRIVER_MAJOR 2
|
||||
* #define DRIVER_MINOR 0
|
||||
* #define DRIVER_PATCHLEVEL 2
|
||||
*
|
||||
* #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
|
||||
*
|
||||
* #define DRM(x) mga_##x
|
||||
*/
|
||||
|
||||
#ifndef __MUST_HAVE_AGP
|
||||
#define __MUST_HAVE_AGP 0
|
||||
#endif
|
||||
#ifndef __HAVE_CTX_BITMAP
|
||||
#define __HAVE_CTX_BITMAP 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_IRQ
|
||||
#define __HAVE_DMA_IRQ 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_QUEUE
|
||||
#define __HAVE_DMA_QUEUE 0
|
||||
#endif
|
||||
#ifndef __HAVE_MULTIPLE_DMA_QUEUES
|
||||
#define __HAVE_MULTIPLE_DMA_QUEUES 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_SCHEDULE
|
||||
#define __HAVE_DMA_SCHEDULE 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_FLUSH
|
||||
#define __HAVE_DMA_FLUSH 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_READY
|
||||
#define __HAVE_DMA_READY 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_QUIESCENT
|
||||
#define __HAVE_DMA_QUIESCENT 0
|
||||
#endif
|
||||
#ifndef __HAVE_RELEASE
|
||||
#define __HAVE_RELEASE 0
|
||||
#endif
|
||||
#ifndef __HAVE_COUNTERS
|
||||
#define __HAVE_COUNTERS 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_PREINIT
|
||||
#define DRIVER_PREINIT()
|
||||
#endif
|
||||
#ifndef DRIVER_POSTINIT
|
||||
#define DRIVER_POSTINIT()
|
||||
#endif
|
||||
#ifndef DRIVER_PRERELEASE
|
||||
#define DRIVER_PRERELEASE()
|
||||
#endif
|
||||
#ifndef DRIVER_PRETAKEDOWN
|
||||
#define DRIVER_PRETAKEDOWN()
|
||||
#endif
|
||||
#ifndef DRIVER_IOCTLS
|
||||
#define DRIVER_IOCTLS
|
||||
#endif
|
||||
|
||||
|
||||
static drm_device_t DRM(device);
|
||||
static int DRM(minor);
|
||||
|
||||
static struct file_operations DRM(fops) = {
|
||||
#if LINUX_VERSION_CODE >= 0x020400
|
||||
/* This started being used during 2.4.0-test */
|
||||
owner: THIS_MODULE,
|
||||
#endif
|
||||
open: DRM(open),
|
||||
flush: DRM(flush),
|
||||
release: DRM(release),
|
||||
ioctl: DRM(ioctl),
|
||||
mmap: DRM(mmap),
|
||||
read: DRM(read),
|
||||
fasync: DRM(fasync),
|
||||
poll: DRM(poll),
|
||||
};
|
||||
|
||||
|
||||
static drm_ioctl_desc_t DRM(ioctls)[] = {
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { DRM(irq_busid), 0, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { DRM(getmap), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { DRM(getclient), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(block), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(unblock), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { DRM(modctx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { DRM(getctx), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { DRM(switchctx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { DRM(newctx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { DRM(resctx), 1, 0 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { DRM(adddraw), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { DRM(rmdraw), 1, 1 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
|
||||
|
||||
#if __HAVE_DMA
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { DRM(markbufs), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { DRM(infobufs), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { DRM(mapbufs), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { DRM(freebufs), 1, 0 },
|
||||
|
||||
/* The DRM_IOCTL_DMA ioctl should be defined by the driver.
|
||||
*/
|
||||
#if __HAVE_DMA_IRQ
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 },
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { DRM(agp_release), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { DRM(agp_enable), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { DRM(agp_info), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { DRM(agp_alloc), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { DRM(agp_free), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { DRM(agp_bind), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { DRM(agp_unbind), 1, 1 },
|
||||
#endif
|
||||
|
||||
DRIVER_IOCTLS
|
||||
};
|
||||
|
||||
#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) )
|
||||
|
||||
#ifdef MODULE
|
||||
static char *drm_opts = NULL;
|
||||
#endif
|
||||
|
||||
MODULE_AUTHOR( DRIVER_AUTHOR );
|
||||
MODULE_DESCRIPTION( DRIVER_DESC );
|
||||
MODULE_PARM( drm_opts, "s" );
|
||||
|
||||
#ifndef MODULE
|
||||
/* DRM(options) is called by the kernel to parse command-line options
|
||||
* passed via the boot-loader (e.g., LILO). It calls the insmod option
|
||||
* routine, drm_parse_drm.
|
||||
*/
|
||||
|
||||
static int __init DRM(options)( char *str )
|
||||
{
|
||||
DRM(parse_options)( str );
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup( DRIVER_NAME "=", DRM(options) );
|
||||
#endif
|
||||
|
||||
static int DRM(setup)( drm_device_t *dev )
|
||||
{
|
||||
int i;
|
||||
|
||||
atomic_set( &dev->ioctl_count, 0 );
|
||||
atomic_set( &dev->vma_count, 0 );
|
||||
dev->buf_use = 0;
|
||||
atomic_set( &dev->buf_alloc, 0 );
|
||||
|
||||
#if __HAVE_DMA
|
||||
i = DRM(dma_setup)( dev );
|
||||
if ( i < 0 )
|
||||
return i;
|
||||
#endif
|
||||
|
||||
dev->counters = 6 + __HAVE_COUNTERS;
|
||||
dev->types[0] = _DRM_STAT_LOCK;
|
||||
dev->types[1] = _DRM_STAT_OPENS;
|
||||
dev->types[2] = _DRM_STAT_CLOSES;
|
||||
dev->types[3] = _DRM_STAT_IOCTLS;
|
||||
dev->types[4] = _DRM_STAT_LOCKS;
|
||||
dev->types[5] = _DRM_STAT_UNLOCKS;
|
||||
#ifdef __HAVE_COUNTER6
|
||||
dev->types[6] = __HAVE_COUNTER6;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER7
|
||||
dev->types[7] = __HAVE_COUNTER7;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER8
|
||||
dev->types[8] = __HAVE_COUNTER8;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER9
|
||||
dev->types[9] = __HAVE_COUNTER9;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER10
|
||||
dev->types[10] = __HAVE_COUNTER10;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER11
|
||||
dev->types[11] = __HAVE_COUNTER11;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER12
|
||||
dev->types[12] = __HAVE_COUNTER12;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER13
|
||||
dev->types[13] = __HAVE_COUNTER13;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER14
|
||||
dev->types[14] = __HAVE_COUNTER14;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER15
|
||||
dev->types[14] = __HAVE_COUNTER14;
|
||||
#endif
|
||||
|
||||
for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
|
||||
atomic_set( &dev->counts[i], 0 );
|
||||
|
||||
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
|
||||
dev->magiclist[i].head = NULL;
|
||||
dev->magiclist[i].tail = NULL;
|
||||
}
|
||||
|
||||
dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
|
||||
DRM_MEM_MAPS);
|
||||
if(dev->maplist == NULL) return -ENOMEM;
|
||||
memset(dev->maplist, 0, sizeof(*dev->maplist));
|
||||
INIT_LIST_HEAD(&dev->maplist->head);
|
||||
dev->map_count = 0;
|
||||
|
||||
dev->vmalist = NULL;
|
||||
dev->lock.hw_lock = NULL;
|
||||
init_waitqueue_head( &dev->lock.lock_queue );
|
||||
dev->queue_count = 0;
|
||||
dev->queue_reserved = 0;
|
||||
dev->queue_slots = 0;
|
||||
dev->queuelist = NULL;
|
||||
dev->irq = 0;
|
||||
dev->context_flag = 0;
|
||||
dev->interrupt_flag = 0;
|
||||
dev->dma_flag = 0;
|
||||
dev->last_context = 0;
|
||||
dev->last_switch = 0;
|
||||
dev->last_checked = 0;
|
||||
init_timer( &dev->timer );
|
||||
init_waitqueue_head( &dev->context_wait );
|
||||
|
||||
dev->ctx_start = 0;
|
||||
dev->lck_start = 0;
|
||||
|
||||
dev->buf_rp = dev->buf;
|
||||
dev->buf_wp = dev->buf;
|
||||
dev->buf_end = dev->buf + DRM_BSZ;
|
||||
dev->buf_async = NULL;
|
||||
init_waitqueue_head( &dev->buf_readers );
|
||||
init_waitqueue_head( &dev->buf_writers );
|
||||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
/* The kernel's context could be created here, but is now created
|
||||
* in drm_dma_enqueue. This is more resource-efficient for
|
||||
* hardware that does not do DMA, but may mean that
|
||||
* drm_select_queue fails between the time the interrupt is
|
||||
* initialized and the time the queues are initialized.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int DRM(takedown)( drm_device_t *dev )
|
||||
{
|
||||
drm_magic_entry_t *pt, *next;
|
||||
drm_map_t *map;
|
||||
drm_map_list_t *r_list;
|
||||
struct list_head *list;
|
||||
drm_vma_entry_t *vma, *vma_next;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
DRIVER_PRETAKEDOWN();
|
||||
#if __HAVE_DMA_IRQ
|
||||
if ( dev->irq ) DRM(irq_uninstall)( dev );
|
||||
#endif
|
||||
|
||||
down( &dev->struct_sem );
|
||||
del_timer( &dev->timer );
|
||||
|
||||
if ( dev->devname ) {
|
||||
DRM(free)( dev->devname, strlen( dev->devname ) + 1,
|
||||
DRM_MEM_DRIVER );
|
||||
dev->devname = NULL;
|
||||
}
|
||||
|
||||
if ( dev->unique ) {
|
||||
DRM(free)( dev->unique, strlen( dev->unique ) + 1,
|
||||
DRM_MEM_DRIVER );
|
||||
dev->unique = NULL;
|
||||
dev->unique_len = 0;
|
||||
}
|
||||
/* Clear pid list */
|
||||
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
|
||||
for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
|
||||
next = pt->next;
|
||||
DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
|
||||
}
|
||||
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
|
||||
}
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
/* Clear AGP information */
|
||||
if ( dev->agp ) {
|
||||
drm_agp_mem_t *entry;
|
||||
drm_agp_mem_t *nexte;
|
||||
|
||||
/* Remove AGP resources, but leave dev->agp
|
||||
intact until drv_cleanup is called. */
|
||||
for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
|
||||
nexte = entry->next;
|
||||
if ( entry->bound ) DRM(unbind_agp)( entry->memory );
|
||||
DRM(free_agp)( entry->memory, entry->pages );
|
||||
DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
|
||||
}
|
||||
dev->agp->memory = NULL;
|
||||
|
||||
if ( dev->agp->acquired ) DRM(agp_do_release)();
|
||||
|
||||
dev->agp->acquired = 0;
|
||||
dev->agp->enabled = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Clear vma list (only built for debugging) */
|
||||
if ( dev->vmalist ) {
|
||||
for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
|
||||
vma_next = vma->next;
|
||||
DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
|
||||
}
|
||||
dev->vmalist = NULL;
|
||||
}
|
||||
|
||||
if( dev->maplist ) {
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *)list;
|
||||
map = r_list->map;
|
||||
DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
|
||||
if(!map) continue;
|
||||
|
||||
switch ( map->type ) {
|
||||
case _DRM_REGISTERS:
|
||||
case _DRM_FRAME_BUFFER:
|
||||
#if __REALLY_HAVE_MTRR
|
||||
if ( map->mtrr >= 0 ) {
|
||||
int retcode;
|
||||
retcode = mtrr_del( map->mtrr,
|
||||
map->offset,
|
||||
map->size );
|
||||
DRM_DEBUG( "mtrr_del=%d\n", retcode );
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)( map->handle, map->size );
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
break;
|
||||
|
||||
case _DRM_AGP:
|
||||
/* Do nothing here, because this is all
|
||||
* handled in the AGP/GART driver.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
}
|
||||
DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
dev->maplist = NULL;
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
|
||||
if ( dev->queuelist ) {
|
||||
for ( i = 0 ; i < dev->queue_count ; i++ ) {
|
||||
DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
|
||||
if ( dev->queuelist[i] ) {
|
||||
DRM(free)( dev->queuelist[i],
|
||||
sizeof(*dev->queuelist[0]),
|
||||
DRM_MEM_QUEUES );
|
||||
dev->queuelist[i] = NULL;
|
||||
}
|
||||
}
|
||||
DRM(free)( dev->queuelist,
|
||||
dev->queue_slots * sizeof(*dev->queuelist),
|
||||
DRM_MEM_QUEUES );
|
||||
dev->queuelist = NULL;
|
||||
}
|
||||
dev->queue_count = 0;
|
||||
#endif
|
||||
|
||||
#if __HAVE_DMA
|
||||
DRM(dma_takedown)( dev );
|
||||
#endif
|
||||
if ( dev->lock.hw_lock ) {
|
||||
dev->lock.hw_lock = NULL; /* SHM removed */
|
||||
dev->lock.pid = 0;
|
||||
wake_up_interruptible( &dev->lock.lock_queue );
|
||||
}
|
||||
up( &dev->struct_sem );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* drm_init is called via init_module at module load time, or via
|
||||
* linux/init/main.c (this is not currently supported).
|
||||
*/
|
||||
static int __init drm_init( void )
|
||||
{
|
||||
drm_device_t *dev = &DRM(device);
|
||||
#if __HAVE_CTX_BITMAP
|
||||
int retcode;
|
||||
#endif
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
memset( (void *)dev, 0, sizeof(*dev) );
|
||||
dev->count_lock = SPIN_LOCK_UNLOCKED;
|
||||
sema_init( &dev->struct_sem, 1 );
|
||||
|
||||
#ifdef MODULE
|
||||
DRM(parse_options)( drm_opts );
|
||||
#endif
|
||||
DRIVER_PREINIT();
|
||||
|
||||
DRM(mem_init)();
|
||||
|
||||
if ((DRM(minor) = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
|
||||
return -EPERM;
|
||||
dev->device = MKDEV(DRM_MAJOR, DRM(minor) );
|
||||
dev->name = DRIVER_NAME;
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
dev->agp = DRM(agp_init)();
|
||||
#if __MUST_HAVE_AGP
|
||||
if ( dev->agp == NULL ) {
|
||||
DRM_ERROR( "Cannot initialize the agpgart module.\n" );
|
||||
DRM(stub_unregister)(DRM(minor));
|
||||
DRM(takedown)( dev );
|
||||
return -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
#if __REALLY_HAVE_MTRR
|
||||
if (dev->agp)
|
||||
dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
|
||||
dev->agp->agp_info.aper_size*1024*1024,
|
||||
MTRR_TYPE_WRCOMB,
|
||||
1 );
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if __HAVE_CTX_BITMAP
|
||||
retcode = DRM(ctxbitmap_init)( dev );
|
||||
if( retcode ) {
|
||||
DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
|
||||
DRM(stub_unregister)(DRM(minor));
|
||||
DRM(takedown)( dev );
|
||||
return retcode;
|
||||
}
|
||||
#endif
|
||||
|
||||
DRIVER_POSTINIT();
|
||||
|
||||
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
DRM(minor) );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* drm_cleanup is called via cleanup_module at module unload time.
|
||||
*/
|
||||
static void __exit drm_cleanup( void )
|
||||
{
|
||||
drm_device_t *dev = &DRM(device);
|
||||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
if ( DRM(stub_unregister)(DRM(minor)) ) {
|
||||
DRM_ERROR( "Cannot unload module\n" );
|
||||
} else {
|
||||
DRM_INFO( "Module unloaded\n" );
|
||||
}
|
||||
#if __HAVE_CTX_BITMAP
|
||||
DRM(ctxbitmap_cleanup)( dev );
|
||||
#endif
|
||||
|
||||
#if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
|
||||
if ( dev->agp && dev->agp->agp_mtrr ) {
|
||||
int retval;
|
||||
retval = mtrr_del( dev->agp->agp_mtrr,
|
||||
dev->agp->agp_info.aper_base,
|
||||
dev->agp->agp_info.aper_size*1024*1024 );
|
||||
DRM_DEBUG( "mtrr_del=%d\n", retval );
|
||||
}
|
||||
#endif
|
||||
|
||||
DRM(takedown)( dev );
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( dev->agp ) {
|
||||
DRM(agp_uninit)();
|
||||
DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
|
||||
dev->agp = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
module_init( drm_init );
|
||||
module_exit( drm_cleanup );
|
||||
|
||||
|
||||
int DRM(version)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_version_t version;
|
||||
int len;
|
||||
|
||||
if ( copy_from_user( &version,
|
||||
(drm_version_t *)arg,
|
||||
sizeof(version) ) )
|
||||
return -EFAULT;
|
||||
|
||||
#define DRM_COPY( name, value ) \
|
||||
len = strlen( value ); \
|
||||
if ( len > name##_len ) len = name##_len; \
|
||||
name##_len = strlen( value ); \
|
||||
if ( len && name ) { \
|
||||
if ( copy_to_user( name, value, len ) ) \
|
||||
return -EFAULT; \
|
||||
}
|
||||
|
||||
version.version_major = DRIVER_MAJOR;
|
||||
version.version_minor = DRIVER_MINOR;
|
||||
version.version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
|
||||
DRM_COPY( version.name, DRIVER_NAME );
|
||||
DRM_COPY( version.date, DRIVER_DATE );
|
||||
DRM_COPY( version.desc, DRIVER_DESC );
|
||||
|
||||
if ( copy_to_user( (drm_version_t *)arg,
|
||||
&version,
|
||||
sizeof(version) ) )
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(open)( struct inode *inode, struct file *filp )
|
||||
{
|
||||
drm_device_t *dev = &DRM(device);
|
||||
int retcode = 0;
|
||||
|
||||
DRM_DEBUG( "open_count = %d\n", dev->open_count );
|
||||
|
||||
retcode = DRM(open_helper)( inode, filp, dev );
|
||||
if ( !retcode ) {
|
||||
#if LINUX_VERSION_CODE < 0x020333
|
||||
MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
|
||||
#endif
|
||||
atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( !dev->open_count++ ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return DRM(setup)( dev );
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
}
|
||||
|
||||
return retcode;
|
||||
}
|
||||
|
||||
int DRM(release)( struct inode *inode, struct file *filp )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev;
|
||||
int retcode = 0;
|
||||
|
||||
lock_kernel();
|
||||
dev = priv->dev;
|
||||
|
||||
DRM_DEBUG( "open_count = %d\n", dev->open_count );
|
||||
|
||||
DRIVER_PRERELEASE();
|
||||
|
||||
/* ========================================================
|
||||
* Begin inline drm_release
|
||||
*/
|
||||
|
||||
DRM_DEBUG( "pid = %d, device = 0x%x, open_count = %d\n",
|
||||
current->pid, dev->device, dev->open_count );
|
||||
|
||||
if ( dev->lock.hw_lock &&
|
||||
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
|
||||
dev->lock.pid == current->pid ) {
|
||||
DRM_DEBUG( "Process %d dead, freeing lock for context %d\n",
|
||||
current->pid,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
#if __HAVE_RELEASE
|
||||
DRIVER_RELEASE();
|
||||
#endif
|
||||
DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
|
||||
/* FIXME: may require heavy-handed reset of
|
||||
hardware at this point, possibly
|
||||
processed via a callback to the X
|
||||
server. */
|
||||
}
|
||||
#if __HAVE_RELEASE
|
||||
else if ( dev->lock.hw_lock ) {
|
||||
/* The lock is required to reclaim buffers */
|
||||
DECLARE_WAITQUEUE( entry, current );
|
||||
add_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
for (;;) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
if ( !dev->lock.hw_lock ) {
|
||||
/* Device has been unregistered */
|
||||
retcode = -EINTR;
|
||||
break;
|
||||
}
|
||||
if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
dev->lock.pid = priv->pid;
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
|
||||
break; /* Got lock */
|
||||
}
|
||||
/* Contention */
|
||||
#if 0
|
||||
atomic_inc( &dev->total_sleeps );
|
||||
#endif
|
||||
schedule();
|
||||
if ( signal_pending( current ) ) {
|
||||
retcode = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
current->state = TASK_RUNNING;
|
||||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
if( !retcode ) {
|
||||
DRIVER_RELEASE();
|
||||
DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
}
|
||||
}
|
||||
#elif __HAVE_DMA
|
||||
DRM(reclaim_buffers)( dev, priv->pid );
|
||||
#endif
|
||||
|
||||
DRM(fasync)( -1, filp, 0 );
|
||||
|
||||
down( &dev->struct_sem );
|
||||
if ( priv->remove_auth_on_close == 1 ) {
|
||||
drm_file_t *temp = dev->file_first;
|
||||
while ( temp ) {
|
||||
temp->authenticated = 0;
|
||||
temp = temp->next;
|
||||
}
|
||||
}
|
||||
if ( priv->prev ) {
|
||||
priv->prev->next = priv->next;
|
||||
} else {
|
||||
dev->file_first = priv->next;
|
||||
}
|
||||
if ( priv->next ) {
|
||||
priv->next->prev = priv->prev;
|
||||
} else {
|
||||
dev->file_last = priv->prev;
|
||||
}
|
||||
up( &dev->struct_sem );
|
||||
|
||||
DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
|
||||
|
||||
/* ========================================================
|
||||
* End inline drm_release
|
||||
*/
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020333
|
||||
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
|
||||
#endif
|
||||
atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( !--dev->open_count ) {
|
||||
if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
|
||||
DRM_ERROR( "Device busy: %d %d\n",
|
||||
atomic_read( &dev->ioctl_count ),
|
||||
dev->blocked );
|
||||
spin_unlock( &dev->count_lock );
|
||||
unlock_kernel();
|
||||
return -EBUSY;
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
unlock_kernel();
|
||||
return DRM(takedown)( dev );
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
unlock_kernel();
|
||||
return retcode;
|
||||
}
|
||||
|
||||
/* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm.
|
||||
*/
|
||||
int DRM(ioctl)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ioctl_desc_t *ioctl;
|
||||
drm_ioctl_t *func;
|
||||
int nr = DRM_IOCTL_NR(cmd);
|
||||
int retcode = 0;
|
||||
|
||||
atomic_inc( &dev->ioctl_count );
|
||||
atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
|
||||
++priv->ioctl_count;
|
||||
|
||||
DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%x, auth=%d\n",
|
||||
current->pid, cmd, nr, dev->device, priv->authenticated );
|
||||
|
||||
if ( nr >= DRIVER_IOCTL_COUNT ) {
|
||||
retcode = -EINVAL;
|
||||
} else {
|
||||
ioctl = &DRM(ioctls)[nr];
|
||||
func = ioctl->func;
|
||||
|
||||
if ( !func ) {
|
||||
DRM_DEBUG( "no function\n" );
|
||||
retcode = -EINVAL;
|
||||
} else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
|
||||
( ioctl->auth_needed && !priv->authenticated ) ) {
|
||||
retcode = -EACCES;
|
||||
} else {
|
||||
retcode = func( inode, filp, cmd, arg );
|
||||
}
|
||||
}
|
||||
|
||||
atomic_dec( &dev->ioctl_count );
|
||||
return retcode;
|
||||
}
|
||||
|
||||
int DRM(lock)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
DECLARE_WAITQUEUE( entry, current );
|
||||
drm_lock_t lock;
|
||||
int ret = 0;
|
||||
#if __HAVE_MULTIPLE_DMA_QUEUES
|
||||
drm_queue_t *q;
|
||||
#endif
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
cycles_t start;
|
||||
|
||||
dev->lck_start = start = get_cycles();
|
||||
#endif
|
||||
|
||||
if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( lock.context == DRM_KERNEL_CONTEXT ) {
|
||||
DRM_ERROR( "Process %d using kernel context %d\n",
|
||||
current->pid, lock.context );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
|
||||
lock.context, current->pid,
|
||||
dev->lock.hw_lock->lock, lock.flags );
|
||||
|
||||
#if __HAVE_DMA_QUEUE
|
||||
if ( lock.context < 0 )
|
||||
return -EINVAL;
|
||||
#elif __HAVE_MULTIPLE_DMA_QUEUES
|
||||
if ( lock.context < 0 || lock.context >= dev->queue_count )
|
||||
return -EINVAL;
|
||||
q = dev->queuelist[lock.context];
|
||||
#endif
|
||||
|
||||
#if __HAVE_DMA_FLUSH
|
||||
ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
|
||||
#endif
|
||||
if ( !ret ) {
|
||||
add_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
for (;;) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
if ( !dev->lock.hw_lock ) {
|
||||
/* Device has been unregistered */
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
|
||||
lock.context ) ) {
|
||||
dev->lock.pid = current->pid;
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
|
||||
break; /* Got lock */
|
||||
}
|
||||
|
||||
/* Contention */
|
||||
schedule();
|
||||
if ( signal_pending( current ) ) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
current->state = TASK_RUNNING;
|
||||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_FLUSH
|
||||
DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
|
||||
#endif
|
||||
|
||||
if ( !ret ) {
|
||||
sigemptyset( &dev->sigmask );
|
||||
sigaddset( &dev->sigmask, SIGSTOP );
|
||||
sigaddset( &dev->sigmask, SIGTSTP );
|
||||
sigaddset( &dev->sigmask, SIGTTIN );
|
||||
sigaddset( &dev->sigmask, SIGTTOU );
|
||||
dev->sigdata.context = lock.context;
|
||||
dev->sigdata.lock = dev->lock.hw_lock;
|
||||
block_all_signals( DRM(notifier),
|
||||
&dev->sigdata, &dev->sigmask );
|
||||
|
||||
#if __HAVE_DMA_READY
|
||||
if ( lock.flags & _DRM_LOCK_READY ) {
|
||||
DRIVER_DMA_READY();
|
||||
}
|
||||
#endif
|
||||
#if __HAVE_DMA_QUIESCENT
|
||||
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
|
||||
DRIVER_DMA_QUIESCENT();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
atomic_inc(&dev->histo.lacq[DRM(histogram_slot)(get_cycles()-start)]);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int DRM(unlock)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_lock_t lock;
|
||||
|
||||
if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( lock.context == DRM_KERNEL_CONTEXT ) {
|
||||
DRM_ERROR( "Process %d using kernel context %d\n",
|
||||
current->pid, lock.context );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
|
||||
|
||||
DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
#if __HAVE_DMA_SCHEDULE
|
||||
DRM(dma_schedule)( dev, 1 );
|
||||
#endif
|
||||
|
||||
/* FIXME: Do we ever really need to check this???
|
||||
*/
|
||||
if ( 1 /* !dev->context_flag */ ) {
|
||||
if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
DRM_ERROR( "\n" );
|
||||
}
|
||||
}
|
||||
|
||||
unblock_all_signals();
|
||||
return 0;
|
||||
}
|
||||
210
linux-core/drm_ioctl.c
Normal file
210
linux-core/drm_ioctl.c
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
/* drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*-
|
||||
* Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#include "drmP.h"
|
||||
|
||||
int DRM(irq_busid)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_irq_busid_t p;
|
||||
struct pci_dev *dev;
|
||||
|
||||
if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p)))
|
||||
return -EFAULT;
|
||||
dev = pci_find_slot(p.busnum, PCI_DEVFN(p.devnum, p.funcnum));
|
||||
if (dev) p.irq = dev->irq;
|
||||
else p.irq = 0;
|
||||
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
|
||||
p.busnum, p.devnum, p.funcnum, p.irq);
|
||||
if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(getunique)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_unique_t u;
|
||||
|
||||
if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u)))
|
||||
return -EFAULT;
|
||||
if (u.unique_len >= dev->unique_len) {
|
||||
if (copy_to_user(u.unique, dev->unique, dev->unique_len))
|
||||
return -EFAULT;
|
||||
}
|
||||
u.unique_len = dev->unique_len;
|
||||
if (copy_to_user((drm_unique_t *)arg, &u, sizeof(u)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(setunique)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_unique_t u;
|
||||
|
||||
if (dev->unique_len || dev->unique)
|
||||
return -EBUSY;
|
||||
|
||||
if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!u.unique_len)
|
||||
return -EINVAL;
|
||||
|
||||
dev->unique_len = u.unique_len;
|
||||
dev->unique = DRM(alloc)(u.unique_len + 1, DRM_MEM_DRIVER);
|
||||
if (copy_from_user(dev->unique, u.unique, dev->unique_len))
|
||||
return -EFAULT;
|
||||
dev->unique[dev->unique_len] = '\0';
|
||||
|
||||
dev->devname = DRM(alloc)(strlen(dev->name) + strlen(dev->unique) + 2,
|
||||
DRM_MEM_DRIVER);
|
||||
sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int DRM(getmap)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_map_t map;
|
||||
drm_map_list_t *r_list = NULL;
|
||||
struct list_head *list;
|
||||
int idx;
|
||||
int i;
|
||||
|
||||
if (copy_from_user(&map, (drm_map_t *)arg, sizeof(map)))
|
||||
return -EFAULT;
|
||||
idx = map.offset;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
if (idx < 0 || idx >= dev->map_count) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
if(i == idx) {
|
||||
r_list = (drm_map_list_t *)list;
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
if(!r_list || !r_list->map) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
map.offset = r_list->map->offset;
|
||||
map.size = r_list->map->size;
|
||||
map.type = r_list->map->type;
|
||||
map.flags = r_list->map->flags;
|
||||
map.handle = r_list->map->handle;
|
||||
map.mtrr = r_list->map->mtrr;
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if (copy_to_user((drm_map_t *)arg, &map, sizeof(map))) return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(getclient)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_client_t client;
|
||||
drm_file_t *pt;
|
||||
int idx;
|
||||
int i;
|
||||
|
||||
if (copy_from_user(&client, (drm_client_t *)arg, sizeof(client)))
|
||||
return -EFAULT;
|
||||
idx = client.idx;
|
||||
down(&dev->struct_sem);
|
||||
for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next)
|
||||
;
|
||||
|
||||
if (!pt) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
client.auth = pt->authenticated;
|
||||
client.pid = pt->pid;
|
||||
client.uid = pt->uid;
|
||||
client.magic = pt->magic;
|
||||
client.iocs = pt->ioctl_count;
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if (copy_to_user((drm_client_t *)arg, &client, sizeof(client)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(getstats)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_stats_t stats;
|
||||
int i;
|
||||
|
||||
memset(&stats, 0, sizeof(stats));
|
||||
|
||||
down(&dev->struct_sem);
|
||||
|
||||
for (i = 0; i < dev->counters; i++) {
|
||||
if (dev->types[i] == _DRM_STAT_LOCK)
|
||||
stats.data[i].value
|
||||
= (dev->lock.hw_lock
|
||||
? dev->lock.hw_lock->lock : 0);
|
||||
else
|
||||
stats.data[i].value = atomic_read(&dev->counts[i]);
|
||||
stats.data[i].type = dev->types[i];
|
||||
}
|
||||
|
||||
stats.count = dev->counters;
|
||||
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if (copy_to_user((drm_stats_t *)arg, &stats, sizeof(stats)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
630
linux-core/drm_proc.c
Normal file
630
linux-core/drm_proc.c
Normal file
|
|
@ -0,0 +1,630 @@
|
|||
/* drm_proc.h -- /proc support for DRM -*- linux-c -*-
|
||||
* Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*
|
||||
* Acknowledgements:
|
||||
* Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
|
||||
* the problem with the proc files not outputting all their information.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#include "drmP.h"
|
||||
|
||||
static int DRM(name_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
static int DRM(vm_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
static int DRM(clients_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
static int DRM(queues_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
static int DRM(bufs_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
#if DRM_DEBUG_CODE
|
||||
static int DRM(vma_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
#endif
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
static int DRM(histo_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
#endif
|
||||
|
||||
struct drm_proc_list {
|
||||
const char *name;
|
||||
int (*f)(char *, char **, off_t, int, int *, void *);
|
||||
} DRM(proc_list)[] = {
|
||||
{ "name", DRM(name_info) },
|
||||
{ "mem", DRM(mem_info) },
|
||||
{ "vm", DRM(vm_info) },
|
||||
{ "clients", DRM(clients_info) },
|
||||
{ "queues", DRM(queues_info) },
|
||||
{ "bufs", DRM(bufs_info) },
|
||||
#if DRM_DEBUG_CODE
|
||||
{ "vma", DRM(vma_info) },
|
||||
#endif
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
{ "histo", DRM(histo_info) },
|
||||
#endif
|
||||
};
|
||||
#define DRM_PROC_ENTRIES (sizeof(DRM(proc_list))/sizeof(DRM(proc_list)[0]))
|
||||
|
||||
struct proc_dir_entry *DRM(proc_init)(drm_device_t *dev, int minor,
|
||||
struct proc_dir_entry *root,
|
||||
struct proc_dir_entry **dev_root)
|
||||
{
|
||||
struct proc_dir_entry *ent;
|
||||
int i, j;
|
||||
char name[64];
|
||||
|
||||
if (!minor) root = create_proc_entry("dri", S_IFDIR, NULL);
|
||||
if (!root) {
|
||||
DRM_ERROR("Cannot create /proc/dri\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sprintf(name, "%d", minor);
|
||||
*dev_root = create_proc_entry(name, S_IFDIR, root);
|
||||
if (!*dev_root) {
|
||||
DRM_ERROR("Cannot create /proc/%s\n", name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
|
||||
ent = create_proc_entry(DRM(proc_list)[i].name,
|
||||
S_IFREG|S_IRUGO, *dev_root);
|
||||
if (!ent) {
|
||||
DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
|
||||
name, DRM(proc_list)[i].name);
|
||||
for (j = 0; j < i; j++)
|
||||
remove_proc_entry(DRM(proc_list)[i].name,
|
||||
*dev_root);
|
||||
remove_proc_entry(name, root);
|
||||
if (!minor) remove_proc_entry("dri", NULL);
|
||||
return NULL;
|
||||
}
|
||||
ent->read_proc = DRM(proc_list)[i].f;
|
||||
ent->data = dev;
|
||||
}
|
||||
|
||||
return root;
|
||||
}
|
||||
|
||||
|
||||
int DRM(proc_cleanup)(int minor, struct proc_dir_entry *root,
|
||||
struct proc_dir_entry *dev_root)
|
||||
{
|
||||
int i;
|
||||
char name[64];
|
||||
|
||||
if (!root || !dev_root) return 0;
|
||||
|
||||
for (i = 0; i < DRM_PROC_ENTRIES; i++)
|
||||
remove_proc_entry(DRM(proc_list)[i].name, dev_root);
|
||||
sprintf(name, "%d", minor);
|
||||
remove_proc_entry(name, root);
|
||||
if (!minor) remove_proc_entry("dri", NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int DRM(name_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
if (dev->unique) {
|
||||
DRM_PROC_PRINT("%s 0x%x %s\n",
|
||||
dev->name, dev->device, dev->unique);
|
||||
} else {
|
||||
DRM_PROC_PRINT("%s 0x%x\n", dev->name, dev->device);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(_vm_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
drm_map_t *map;
|
||||
drm_map_list_t *r_list;
|
||||
struct list_head *list;
|
||||
|
||||
/* Hardcoded from _DRM_FRAME_BUFFER,
|
||||
_DRM_REGISTERS, _DRM_SHM, and
|
||||
_DRM_AGP. */
|
||||
const char *types[] = { "FB", "REG", "SHM", "AGP" };
|
||||
const char *type;
|
||||
int i;
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT("slot offset size type flags "
|
||||
"address mtrr\n\n");
|
||||
i = 0;
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *)list;
|
||||
map = r_list->map;
|
||||
if(!map) continue;
|
||||
if (map->type < 0 || map->type > 3) type = "??";
|
||||
else type = types[map->type];
|
||||
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
|
||||
i,
|
||||
map->offset,
|
||||
map->size,
|
||||
type,
|
||||
map->flags,
|
||||
(unsigned long)map->handle);
|
||||
if (map->mtrr < 0) {
|
||||
DRM_PROC_PRINT("none\n");
|
||||
} else {
|
||||
DRM_PROC_PRINT("%4d\n", map->mtrr);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(vm_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_vm_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int DRM(_queues_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
int i;
|
||||
drm_queue_t *q;
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT(" ctx/flags use fin"
|
||||
" blk/rw/rwf wait flushed queued"
|
||||
" locks\n\n");
|
||||
for (i = 0; i < dev->queue_count; i++) {
|
||||
q = dev->queuelist[i];
|
||||
atomic_inc(&q->use_count);
|
||||
DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
|
||||
"%5d/0x%03x %5d %5d"
|
||||
" %5d/%c%c/%c%c%c %5Zd\n",
|
||||
i,
|
||||
q->flags,
|
||||
atomic_read(&q->use_count),
|
||||
atomic_read(&q->finalization),
|
||||
atomic_read(&q->block_count),
|
||||
atomic_read(&q->block_read) ? 'r' : '-',
|
||||
atomic_read(&q->block_write) ? 'w' : '-',
|
||||
waitqueue_active(&q->read_queue) ? 'r':'-',
|
||||
waitqueue_active(&q->write_queue) ? 'w':'-',
|
||||
waitqueue_active(&q->flush_queue) ? 'f':'-',
|
||||
DRM_BUFCOUNT(&q->waitlist));
|
||||
atomic_dec(&q->use_count);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(queues_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_queues_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* drm_bufs_info is called whenever a process reads
|
||||
/dev/dri/<dev>/bufs. */
|
||||
|
||||
static int DRM(_bufs_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
int i;
|
||||
|
||||
if (!dma || offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT(" o size count free segs pages kB\n\n");
|
||||
for (i = 0; i <= DRM_MAX_ORDER; i++) {
|
||||
if (dma->bufs[i].buf_count)
|
||||
DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
|
||||
i,
|
||||
dma->bufs[i].buf_size,
|
||||
dma->bufs[i].buf_count,
|
||||
atomic_read(&dma->bufs[i]
|
||||
.freelist.count),
|
||||
dma->bufs[i].seg_count,
|
||||
dma->bufs[i].seg_count
|
||||
*(1 << dma->bufs[i].page_order),
|
||||
(dma->bufs[i].seg_count
|
||||
* (1 << dma->bufs[i].page_order))
|
||||
* PAGE_SIZE / 1024);
|
||||
}
|
||||
DRM_PROC_PRINT("\n");
|
||||
for (i = 0; i < dma->buf_count; i++) {
|
||||
if (i && !(i%32)) DRM_PROC_PRINT("\n");
|
||||
DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
|
||||
}
|
||||
DRM_PROC_PRINT("\n");
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(bufs_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_bufs_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int DRM(_clients_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
drm_file_t *priv;
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
|
||||
for (priv = dev->file_first; priv; priv = priv->next) {
|
||||
DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
|
||||
priv->authenticated ? 'y' : 'n',
|
||||
priv->minor,
|
||||
priv->pid,
|
||||
priv->uid,
|
||||
priv->magic,
|
||||
priv->ioctl_count);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(clients_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_clients_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if DRM_DEBUG_CODE
|
||||
|
||||
#define DRM_VMA_VERBOSE 0
|
||||
|
||||
static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
drm_vma_entry_t *pt;
|
||||
struct vm_area_struct *vma;
|
||||
#if DRM_VMA_VERBOSE
|
||||
unsigned long i;
|
||||
unsigned long address;
|
||||
pgd_t *pgd;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
#endif
|
||||
#if defined(__i386__)
|
||||
unsigned int pgprot;
|
||||
#endif
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
|
||||
atomic_read(&dev->vma_count),
|
||||
high_memory, virt_to_phys(high_memory));
|
||||
for (pt = dev->vmalist; pt; pt = pt->next) {
|
||||
if (!(vma = pt->vma)) continue;
|
||||
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
|
||||
pt->pid,
|
||||
vma->vm_start,
|
||||
vma->vm_end,
|
||||
vma->vm_flags & VM_READ ? 'r' : '-',
|
||||
vma->vm_flags & VM_WRITE ? 'w' : '-',
|
||||
vma->vm_flags & VM_EXEC ? 'x' : '-',
|
||||
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
|
||||
vma->vm_flags & VM_LOCKED ? 'l' : '-',
|
||||
vma->vm_flags & VM_IO ? 'i' : '-',
|
||||
VM_OFFSET(vma));
|
||||
|
||||
#if defined(__i386__)
|
||||
pgprot = pgprot_val(vma->vm_page_prot);
|
||||
DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
|
||||
pgprot & _PAGE_PRESENT ? 'p' : '-',
|
||||
pgprot & _PAGE_RW ? 'w' : 'r',
|
||||
pgprot & _PAGE_USER ? 'u' : 's',
|
||||
pgprot & _PAGE_PWT ? 't' : 'b',
|
||||
pgprot & _PAGE_PCD ? 'u' : 'c',
|
||||
pgprot & _PAGE_ACCESSED ? 'a' : '-',
|
||||
pgprot & _PAGE_DIRTY ? 'd' : '-',
|
||||
pgprot & _PAGE_PSE ? 'm' : 'k',
|
||||
pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
|
||||
#endif
|
||||
DRM_PROC_PRINT("\n");
|
||||
#if 0
|
||||
for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
|
||||
pgd = pgd_offset(vma->vm_mm, i);
|
||||
pmd = pmd_offset(pgd, i);
|
||||
pte = pte_offset(pmd, i);
|
||||
if (pte_present(*pte)) {
|
||||
address = __pa(pte_page(*pte))
|
||||
+ (i & (PAGE_SIZE-1));
|
||||
DRM_PROC_PRINT(" 0x%08lx -> 0x%08lx"
|
||||
" %c%c%c%c%c\n",
|
||||
i,
|
||||
address,
|
||||
pte_read(*pte) ? 'r' : '-',
|
||||
pte_write(*pte) ? 'w' : '-',
|
||||
pte_exec(*pte) ? 'x' : '-',
|
||||
pte_dirty(*pte) ? 'd' : '-',
|
||||
pte_young(*pte) ? 'a' : '-' );
|
||||
} else {
|
||||
DRM_PROC_PRINT(" 0x%08lx\n", i);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(vma_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_vma_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
static int DRM(_histo_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
int i;
|
||||
unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
|
||||
unsigned long prev_value = 0;
|
||||
drm_buf_t *buffer;
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT("general statistics:\n");
|
||||
DRM_PROC_PRINT("total %10u\n", atomic_read(&dev->histo.total));
|
||||
DRM_PROC_PRINT("open %10u\n",
|
||||
atomic_read(&dev->counts[_DRM_STAT_OPENS]));
|
||||
DRM_PROC_PRINT("close %10u\n",
|
||||
atomic_read(&dev->counts[_DRM_STAT_CLOSES]));
|
||||
DRM_PROC_PRINT("ioctl %10u\n",
|
||||
atomic_read(&dev->counts[_DRM_STAT_IOCTLS]));
|
||||
|
||||
DRM_PROC_PRINT("\nlock statistics:\n");
|
||||
DRM_PROC_PRINT("locks %10u\n",
|
||||
atomic_read(&dev->counts[_DRM_STAT_LOCKS]));
|
||||
DRM_PROC_PRINT("unlocks %10u\n",
|
||||
atomic_read(&dev->counts[_DRM_STAT_UNLOCKS]));
|
||||
|
||||
if (dma) {
|
||||
#if 0
|
||||
DRM_PROC_PRINT("\ndma statistics:\n");
|
||||
DRM_PROC_PRINT("prio %10u\n",
|
||||
atomic_read(&dma->total_prio));
|
||||
DRM_PROC_PRINT("bytes %10u\n",
|
||||
atomic_read(&dma->total_bytes));
|
||||
DRM_PROC_PRINT("dmas %10u\n",
|
||||
atomic_read(&dma->total_dmas));
|
||||
DRM_PROC_PRINT("missed:\n");
|
||||
DRM_PROC_PRINT(" dma %10u\n",
|
||||
atomic_read(&dma->total_missed_dma));
|
||||
DRM_PROC_PRINT(" lock %10u\n",
|
||||
atomic_read(&dma->total_missed_lock));
|
||||
DRM_PROC_PRINT(" free %10u\n",
|
||||
atomic_read(&dma->total_missed_free));
|
||||
DRM_PROC_PRINT(" sched %10u\n",
|
||||
atomic_read(&dma->total_missed_sched));
|
||||
DRM_PROC_PRINT("tried %10u\n",
|
||||
atomic_read(&dma->total_tried));
|
||||
DRM_PROC_PRINT("hit %10u\n",
|
||||
atomic_read(&dma->total_hit));
|
||||
DRM_PROC_PRINT("lost %10u\n",
|
||||
atomic_read(&dma->total_lost));
|
||||
#endif
|
||||
|
||||
buffer = dma->next_buffer;
|
||||
if (buffer) {
|
||||
DRM_PROC_PRINT("next_buffer %7d\n", buffer->idx);
|
||||
} else {
|
||||
DRM_PROC_PRINT("next_buffer none\n");
|
||||
}
|
||||
buffer = dma->this_buffer;
|
||||
if (buffer) {
|
||||
DRM_PROC_PRINT("this_buffer %7d\n", buffer->idx);
|
||||
} else {
|
||||
DRM_PROC_PRINT("this_buffer none\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DRM_PROC_PRINT("\nvalues:\n");
|
||||
if (dev->lock.hw_lock) {
|
||||
DRM_PROC_PRINT("lock 0x%08x\n",
|
||||
dev->lock.hw_lock->lock);
|
||||
} else {
|
||||
DRM_PROC_PRINT("lock none\n");
|
||||
}
|
||||
DRM_PROC_PRINT("context_flag 0x%08lx\n", dev->context_flag);
|
||||
DRM_PROC_PRINT("interrupt_flag 0x%08lx\n", dev->interrupt_flag);
|
||||
DRM_PROC_PRINT("dma_flag 0x%08lx\n", dev->dma_flag);
|
||||
|
||||
DRM_PROC_PRINT("queue_count %10d\n", dev->queue_count);
|
||||
DRM_PROC_PRINT("last_context %10d\n", dev->last_context);
|
||||
DRM_PROC_PRINT("last_switch %10lu\n", dev->last_switch);
|
||||
DRM_PROC_PRINT("last_checked %10d\n", dev->last_checked);
|
||||
|
||||
|
||||
DRM_PROC_PRINT("\n q2d d2c c2f"
|
||||
" q2c q2f dma sch"
|
||||
" ctx lacq lhld\n\n");
|
||||
for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
|
||||
DRM_PROC_PRINT("%s %10lu %10u %10u %10u %10u %10u"
|
||||
" %10u %10u %10u %10u %10u\n",
|
||||
i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
|
||||
i == DRM_DMA_HISTOGRAM_SLOTS - 1
|
||||
? prev_value : slot_value ,
|
||||
|
||||
atomic_read(&dev->histo
|
||||
.queued_to_dispatched[i]),
|
||||
atomic_read(&dev->histo
|
||||
.dispatched_to_completed[i]),
|
||||
atomic_read(&dev->histo
|
||||
.completed_to_freed[i]),
|
||||
|
||||
atomic_read(&dev->histo
|
||||
.queued_to_completed[i]),
|
||||
atomic_read(&dev->histo
|
||||
.queued_to_freed[i]),
|
||||
atomic_read(&dev->histo.dma[i]),
|
||||
atomic_read(&dev->histo.schedule[i]),
|
||||
atomic_read(&dev->histo.ctx[i]),
|
||||
atomic_read(&dev->histo.lacq[i]),
|
||||
atomic_read(&dev->histo.lhld[i]));
|
||||
prev_value = slot_value;
|
||||
slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(histo_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_histo_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
416
linux-core/drm_vm.c
Normal file
416
linux-core/drm_vm.c
Normal file
|
|
@ -0,0 +1,416 @@
|
|||
/* drm_vm.h -- Memory mapping for DRM -*- linux-c -*-
|
||||
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#include "drmP.h"
|
||||
|
||||
struct vm_operations_struct drm_vm_ops = {
|
||||
nopage: DRM(vm_nopage),
|
||||
open: DRM(vm_open),
|
||||
close: DRM(vm_close),
|
||||
};
|
||||
|
||||
struct vm_operations_struct drm_vm_shm_ops = {
|
||||
nopage: DRM(vm_shm_nopage),
|
||||
open: DRM(vm_open),
|
||||
close: DRM(vm_shm_close),
|
||||
};
|
||||
|
||||
struct vm_operations_struct drm_vm_dma_ops = {
|
||||
nopage: DRM(vm_dma_nopage),
|
||||
open: DRM(vm_open),
|
||||
close: DRM(vm_close),
|
||||
};
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020317
|
||||
unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#else
|
||||
/* Return type changed in 2.3.23 */
|
||||
struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#endif
|
||||
{
|
||||
return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020317
|
||||
unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#else
|
||||
/* Return type changed in 2.3.23 */
|
||||
struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#endif
|
||||
{
|
||||
#if LINUX_VERSION_CODE >= 0x020300
|
||||
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
|
||||
#else
|
||||
drm_map_t *map = (drm_map_t *)vma->vm_pte;
|
||||
#endif
|
||||
unsigned long physical;
|
||||
unsigned long offset;
|
||||
unsigned long i;
|
||||
pgd_t *pgd;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!map) return NOPAGE_OOM; /* Nothing allocated */
|
||||
|
||||
offset = address - vma->vm_start;
|
||||
i = (unsigned long)map->handle + offset;
|
||||
/* We have to walk page tables here because we need large SAREA's, and
|
||||
* they need to be virtually contigious in kernel space.
|
||||
*/
|
||||
pgd = pgd_offset_k( i );
|
||||
if( !pgd_present( *pgd ) ) return NOPAGE_OOM;
|
||||
pmd = pmd_offset( pgd, i );
|
||||
if( !pmd_present( *pmd ) ) return NOPAGE_OOM;
|
||||
pte = pte_offset( pmd, i );
|
||||
if( !pte_present( *pte ) ) return NOPAGE_OOM;
|
||||
physical = (unsigned long)pte_page( *pte )->virtual;
|
||||
atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
|
||||
|
||||
DRM_DEBUG("0x%08lx => 0x%08lx\n", address, physical);
|
||||
#if LINUX_VERSION_CODE < 0x020317
|
||||
return physical;
|
||||
#else
|
||||
return virt_to_page(physical);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Special close routine which deletes map information if we are the last
|
||||
* person to close a mapping and its not in the global maplist.
|
||||
*/
|
||||
|
||||
void DRM(vm_shm_close)(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = vma->vm_file->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_vma_entry_t *pt, *prev;
|
||||
drm_map_t *map;
|
||||
drm_map_list_t *r_list;
|
||||
struct list_head *list;
|
||||
int found_maps = 0;
|
||||
|
||||
DRM_DEBUG("0x%08lx,0x%08lx\n",
|
||||
vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
#if LINUX_VERSION_CODE < 0x020333
|
||||
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
|
||||
#endif
|
||||
atomic_dec(&dev->vma_count);
|
||||
|
||||
#if LINUX_VERSION_CODE >= 0x020300
|
||||
map = vma->vm_private_data;
|
||||
#else
|
||||
map = vma->vm_pte;
|
||||
#endif
|
||||
|
||||
down(&dev->struct_sem);
|
||||
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
|
||||
#if LINUX_VERSION_CODE >= 0x020300
|
||||
if (pt->vma->vm_private_data == map) found_maps++;
|
||||
#else
|
||||
if (pt->vma->vm_pte == map) found_maps++;
|
||||
#endif
|
||||
if (pt->vma == vma) {
|
||||
if (prev) {
|
||||
prev->next = pt->next;
|
||||
} else {
|
||||
dev->vmalist = pt->next;
|
||||
}
|
||||
DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
|
||||
}
|
||||
}
|
||||
/* We were the only map that was found */
|
||||
if(found_maps == 1 &&
|
||||
map->flags & _DRM_REMOVABLE) {
|
||||
/* Check to see if we are in the maplist, if we are not, then
|
||||
* we delete this mappings information.
|
||||
*/
|
||||
found_maps = 0;
|
||||
list = &dev->maplist->head;
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *) list;
|
||||
if (r_list->map == map) found_maps++;
|
||||
}
|
||||
|
||||
if(!found_maps) {
|
||||
switch (map->type) {
|
||||
case _DRM_REGISTERS:
|
||||
case _DRM_FRAME_BUFFER:
|
||||
#if __REALLY_HAVE_MTRR
|
||||
if (map->mtrr >= 0) {
|
||||
int retcode;
|
||||
retcode = mtrr_del(map->mtrr,
|
||||
map->offset,
|
||||
map->size);
|
||||
DRM_DEBUG("mtrr_del = %d\n", retcode);
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)(map->handle, map->size);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
break;
|
||||
case _DRM_AGP:
|
||||
break;
|
||||
}
|
||||
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
}
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020317
|
||||
unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#else
|
||||
/* Return type changed in 2.3.23 */
|
||||
struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#endif
|
||||
{
|
||||
drm_file_t *priv = vma->vm_file->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
unsigned long physical;
|
||||
unsigned long offset;
|
||||
unsigned long page;
|
||||
|
||||
if (!dma) return NOPAGE_SIGBUS; /* Error */
|
||||
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
|
||||
|
||||
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
|
||||
page = offset >> PAGE_SHIFT;
|
||||
physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
|
||||
atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
|
||||
|
||||
DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
|
||||
#if LINUX_VERSION_CODE < 0x020317
|
||||
return physical;
|
||||
#else
|
||||
return virt_to_page(physical);
|
||||
#endif
|
||||
}
|
||||
|
||||
void DRM(vm_open)(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = vma->vm_file->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_vma_entry_t *vma_entry;
|
||||
|
||||
DRM_DEBUG("0x%08lx,0x%08lx\n",
|
||||
vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
atomic_inc(&dev->vma_count);
|
||||
#if LINUX_VERSION_CODE < 0x020333
|
||||
/* The map can exist after the fd is closed. */
|
||||
MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
|
||||
#endif
|
||||
|
||||
vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
|
||||
if (vma_entry) {
|
||||
down(&dev->struct_sem);
|
||||
vma_entry->vma = vma;
|
||||
vma_entry->next = dev->vmalist;
|
||||
vma_entry->pid = current->pid;
|
||||
dev->vmalist = vma_entry;
|
||||
up(&dev->struct_sem);
|
||||
}
|
||||
}
|
||||
|
||||
void DRM(vm_close)(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = vma->vm_file->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_vma_entry_t *pt, *prev;
|
||||
|
||||
DRM_DEBUG("0x%08lx,0x%08lx\n",
|
||||
vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
#if LINUX_VERSION_CODE < 0x020333
|
||||
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
|
||||
#endif
|
||||
atomic_dec(&dev->vma_count);
|
||||
|
||||
down(&dev->struct_sem);
|
||||
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
|
||||
if (pt->vma == vma) {
|
||||
if (prev) {
|
||||
prev->next = pt->next;
|
||||
} else {
|
||||
dev->vmalist = pt->next;
|
||||
}
|
||||
DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
|
||||
break;
|
||||
}
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
}
|
||||
|
||||
int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev;
|
||||
drm_device_dma_t *dma;
|
||||
unsigned long length = vma->vm_end - vma->vm_start;
|
||||
|
||||
lock_kernel();
|
||||
dev = priv->dev;
|
||||
dma = dev->dma;
|
||||
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
|
||||
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
|
||||
|
||||
/* Length must match exact page count */
|
||||
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
|
||||
unlock_kernel();
|
||||
return -EINVAL;
|
||||
}
|
||||
unlock_kernel();
|
||||
|
||||
vma->vm_ops = &drm_vm_dma_ops;
|
||||
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
|
||||
/* In Linux 2.2.3 and above, this is
|
||||
handled in do_mmap() in mm/mmap.c. */
|
||||
++filp->f_count;
|
||||
#endif
|
||||
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
||||
DRM(vm_open)(vma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_map_t *map = NULL;
|
||||
drm_map_list_t *r_list;
|
||||
struct list_head *list;
|
||||
|
||||
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
|
||||
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
|
||||
|
||||
if (!VM_OFFSET(vma)) return DRM(mmap_dma)(filp, vma);
|
||||
|
||||
/* A sequential search of a linked list is
|
||||
fine here because: 1) there will only be
|
||||
about 5-10 entries in the list and, 2) a
|
||||
DRI client only has to do this mapping
|
||||
once, so it doesn't have to be optimized
|
||||
for performance, even if the list was a
|
||||
bit longer. */
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *)list;
|
||||
map = r_list->map;
|
||||
if (!map) continue;
|
||||
if (map->offset == VM_OFFSET(vma)) break;
|
||||
}
|
||||
|
||||
if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
|
||||
return -EPERM;
|
||||
|
||||
/* Check for valid size. */
|
||||
if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
|
||||
vma->vm_flags &= VM_MAYWRITE;
|
||||
#if defined(__i386__)
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
|
||||
#else
|
||||
/* Ye gads this is ugly. With more thought
|
||||
we could move this up higher and use
|
||||
`protection_map' instead. */
|
||||
vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
|
||||
__pte(pgprot_val(vma->vm_page_prot)))));
|
||||
#endif
|
||||
}
|
||||
|
||||
switch (map->type) {
|
||||
case _DRM_FRAME_BUFFER:
|
||||
case _DRM_REGISTERS:
|
||||
case _DRM_AGP:
|
||||
if (VM_OFFSET(vma) >= __pa(high_memory)) {
|
||||
#if defined(__i386__)
|
||||
if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
|
||||
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
|
||||
}
|
||||
#elif defined(__ia64__)
|
||||
if (map->type != _DRM_AGP)
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
#endif
|
||||
vma->vm_flags |= VM_IO; /* not in core dump */
|
||||
}
|
||||
if (remap_page_range(vma->vm_start,
|
||||
VM_OFFSET(vma),
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
|
||||
" offset = 0x%lx\n",
|
||||
map->type,
|
||||
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
|
||||
vma->vm_ops = &drm_vm_ops;
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vma->vm_ops = &drm_vm_shm_ops;
|
||||
#if LINUX_VERSION_CODE >= 0x020300
|
||||
vma->vm_private_data = (void *)map;
|
||||
#else
|
||||
vma->vm_pte = (unsigned long)map;
|
||||
#endif
|
||||
/* Don't let this area swap. Change when
|
||||
DRM_KERNEL advisory is supported. */
|
||||
vma->vm_flags |= VM_LOCKED;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL; /* This should never happen. */
|
||||
}
|
||||
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
|
||||
/* In Linux 2.2.3 and above, this is
|
||||
handled in do_mmap() in mm/mmap.c. */
|
||||
++filp->f_count;
|
||||
#endif
|
||||
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
||||
DRM(vm_open)(vma);
|
||||
return 0;
|
||||
}
|
||||
829
linux/drm_bufs.h
Normal file
829
linux/drm_bufs.h
Normal file
|
|
@ -0,0 +1,829 @@
|
|||
/* drm_bufs.h -- Generic buffer template -*- linux-c -*-
|
||||
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
|
||||
*
|
||||
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#include <linux/vmalloc.h>
|
||||
#include "drmP.h"
|
||||
|
||||
#ifndef __HAVE_PCI_DMA
|
||||
#define __HAVE_PCI_DMA 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_BUF_PRIV_T
|
||||
#define DRIVER_BUF_PRIV_T u32
|
||||
#endif
|
||||
#ifndef DRIVER_AGP_BUFFERS_MAP
|
||||
#if __HAVE_AGP && __HAVE_DMA
|
||||
#error "You must define DRIVER_AGP_BUFFERS_MAP()"
|
||||
#else
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Compute order. Can be made faster.
|
||||
*/
|
||||
int DRM(order)( unsigned long size )
|
||||
{
|
||||
int order;
|
||||
unsigned long tmp;
|
||||
|
||||
for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
|
||||
|
||||
if ( size & ~(1 << order) )
|
||||
++order;
|
||||
|
||||
return order;
|
||||
}
|
||||
|
||||
int DRM(addmap)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_map_t *map;
|
||||
drm_map_list_t *list;
|
||||
|
||||
if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
|
||||
|
||||
map = DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
|
||||
if ( !map )
|
||||
return -ENOMEM;
|
||||
|
||||
if ( copy_from_user( map, (drm_map_t *)arg, sizeof(*map) ) ) {
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Only allow shared memory to be removable since we only keep enough
|
||||
* book keeping information about shared memory to allow for removal
|
||||
* when processes fork.
|
||||
*/
|
||||
if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
|
||||
map->offset, map->size, map->type );
|
||||
if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -EINVAL;
|
||||
}
|
||||
map->mtrr = -1;
|
||||
map->handle = 0;
|
||||
|
||||
switch ( map->type ) {
|
||||
case _DRM_REGISTERS:
|
||||
case _DRM_FRAME_BUFFER:
|
||||
#ifndef __sparc__
|
||||
if ( map->offset + map->size < map->offset ||
|
||||
map->offset < virt_to_phys(high_memory) ) {
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
#if __REALLY_HAVE_MTRR
|
||||
if ( map->type == _DRM_FRAME_BUFFER ||
|
||||
(map->flags & _DRM_WRITE_COMBINING) ) {
|
||||
map->mtrr = mtrr_add( map->offset, map->size,
|
||||
MTRR_TYPE_WRCOMB, 1 );
|
||||
}
|
||||
#endif
|
||||
map->handle = DRM(ioremap)( map->offset, map->size );
|
||||
break;
|
||||
|
||||
case _DRM_SHM:
|
||||
map->handle = vmalloc_32(map->size);
|
||||
DRM_DEBUG( "%ld %d %p\n",
|
||||
map->size, DRM(order)( map->size ), map->handle );
|
||||
if ( !map->handle ) {
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -ENOMEM;
|
||||
}
|
||||
map->offset = (unsigned long)map->handle;
|
||||
if ( map->flags & _DRM_CONTAINS_LOCK ) {
|
||||
dev->lock.hw_lock = map->handle; /* Pointer to lock */
|
||||
}
|
||||
break;
|
||||
#if __REALLY_HAVE_AGP
|
||||
case _DRM_AGP:
|
||||
map->offset = map->offset + dev->agp->base;
|
||||
map->mtrr = dev->agp->agp_mtrr; /* for getmap */
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
|
||||
if(!list) {
|
||||
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
return -EINVAL;
|
||||
}
|
||||
memset(list, 0, sizeof(*list));
|
||||
list->map = map;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
list_add(&list->head, &dev->maplist->head);
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if ( copy_to_user( (drm_map_t *)arg, map, sizeof(*map) ) )
|
||||
return -EFAULT;
|
||||
if ( map->type != _DRM_SHM ) {
|
||||
if ( copy_to_user( &((drm_map_t *)arg)->handle,
|
||||
&map->offset,
|
||||
sizeof(map->offset) ) )
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Remove a map private from list and deallocate resources if the mapping
|
||||
* isn't in use.
|
||||
*/
|
||||
|
||||
int DRM(rmmap)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
struct list_head *list;
|
||||
drm_map_list_t *r_list;
|
||||
drm_vma_entry_t *pt, *prev;
|
||||
drm_map_t *map;
|
||||
drm_map_t request;
|
||||
int found_maps = 0;
|
||||
|
||||
if (copy_from_user(&request, (drm_map_t *)arg,
|
||||
sizeof(request))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
down(&dev->struct_sem);
|
||||
list = &dev->maplist->head;
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *) list;
|
||||
|
||||
if(r_list->map &&
|
||||
r_list->map->handle == request.handle &&
|
||||
r_list->map->flags & _DRM_REMOVABLE) break;
|
||||
}
|
||||
|
||||
/* List has wrapped around to the head pointer, or its empty we didn't
|
||||
* find anything.
|
||||
*/
|
||||
if(list == (&dev->maplist->head)) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
map = r_list->map;
|
||||
list_del(list);
|
||||
DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
|
||||
|
||||
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
|
||||
#if LINUX_VERSION_CODE >= 0x020300
|
||||
if (pt->vma->vm_private_data == map) found_maps++;
|
||||
#else
|
||||
if (pt->vma->vm_pte == map) found_maps++;
|
||||
#endif
|
||||
}
|
||||
|
||||
if(!found_maps) {
|
||||
switch (map->type) {
|
||||
case _DRM_REGISTERS:
|
||||
case _DRM_FRAME_BUFFER:
|
||||
#if __REALLY_HAVE_MTRR
|
||||
if (map->mtrr >= 0) {
|
||||
int retcode;
|
||||
retcode = mtrr_del(map->mtrr,
|
||||
map->offset,
|
||||
map->size);
|
||||
DRM_DEBUG("mtrr_del = %d\n", retcode);
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)(map->handle, map->size);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
break;
|
||||
case _DRM_AGP:
|
||||
break;
|
||||
}
|
||||
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if __HAVE_DMA
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
int DRM(addbufs_agp)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_desc_t request;
|
||||
drm_buf_entry_t *entry;
|
||||
drm_buf_t *buf;
|
||||
unsigned long offset;
|
||||
unsigned long agp_offset;
|
||||
int count;
|
||||
int order;
|
||||
int size;
|
||||
int alignment;
|
||||
int page_order;
|
||||
int total;
|
||||
int byte_count;
|
||||
int i;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
count = request.count;
|
||||
order = DRM(order)( request.size );
|
||||
size = 1 << order;
|
||||
|
||||
alignment = (request.flags & _DRM_PAGE_ALIGN)
|
||||
? PAGE_ALIGN(size) : size;
|
||||
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
|
||||
total = PAGE_SIZE << page_order;
|
||||
|
||||
byte_count = 0;
|
||||
agp_offset = dev->agp->base + request.agp_start;
|
||||
|
||||
DRM_DEBUG( "count: %d\n", count );
|
||||
DRM_DEBUG( "order: %d\n", order );
|
||||
DRM_DEBUG( "size: %d\n", size );
|
||||
DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
|
||||
DRM_DEBUG( "alignment: %d\n", alignment );
|
||||
DRM_DEBUG( "page_order: %d\n", page_order );
|
||||
DRM_DEBUG( "total: %d\n", total );
|
||||
|
||||
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
|
||||
if ( dev->queue_count ) return -EBUSY; /* Not while in use */
|
||||
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( dev->buf_use ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return -EBUSY;
|
||||
}
|
||||
atomic_inc( &dev->buf_alloc );
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
down( &dev->struct_sem );
|
||||
entry = &dma->bufs[order];
|
||||
if ( entry->buf_count ) {
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM; /* May only call once for each order */
|
||||
}
|
||||
|
||||
entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
if ( !entry->buflist ) {
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
|
||||
|
||||
entry->buf_size = size;
|
||||
entry->page_order = page_order;
|
||||
|
||||
offset = 0;
|
||||
|
||||
while ( entry->buf_count < count ) {
|
||||
buf = &entry->buflist[entry->buf_count];
|
||||
buf->idx = dma->buf_count + entry->buf_count;
|
||||
buf->total = alignment;
|
||||
buf->order = order;
|
||||
buf->used = 0;
|
||||
|
||||
buf->offset = (dma->byte_count + offset);
|
||||
buf->bus_address = agp_offset + offset;
|
||||
buf->address = (void *)(agp_offset + offset);
|
||||
buf->next = NULL;
|
||||
buf->waiting = 0;
|
||||
buf->pending = 0;
|
||||
init_waitqueue_head( &buf->dma_wait );
|
||||
buf->pid = 0;
|
||||
|
||||
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
|
||||
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
|
||||
DRM_MEM_BUFS );
|
||||
memset( buf->dev_private, 0, buf->dev_priv_size );
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
buf->time_queued = 0;
|
||||
buf->time_dispatched = 0;
|
||||
buf->time_completed = 0;
|
||||
buf->time_freed = 0;
|
||||
#endif
|
||||
DRM_DEBUG( "buffer %d @ %p\n",
|
||||
entry->buf_count, buf->address );
|
||||
|
||||
offset += alignment;
|
||||
entry->buf_count++;
|
||||
byte_count += PAGE_SIZE << page_order;
|
||||
}
|
||||
|
||||
DRM_DEBUG( "byte_count: %d\n", byte_count );
|
||||
|
||||
dma->buflist = DRM(realloc)( dma->buflist,
|
||||
dma->buf_count * sizeof(*dma->buflist),
|
||||
(dma->buf_count + entry->buf_count)
|
||||
* sizeof(*dma->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
|
||||
}
|
||||
|
||||
dma->buf_count += entry->buf_count;
|
||||
dma->byte_count += byte_count;
|
||||
|
||||
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
|
||||
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_create)( &entry->freelist, entry->buf_count );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
|
||||
}
|
||||
#endif
|
||||
up( &dev->struct_sem );
|
||||
|
||||
request.count = entry->buf_count;
|
||||
request.size = size;
|
||||
|
||||
if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
dma->flags = _DRM_DMA_USE_AGP;
|
||||
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return 0;
|
||||
}
|
||||
#endif /* __REALLY_HAVE_AGP */
|
||||
|
||||
#if __HAVE_PCI_DMA
|
||||
int DRM(addbufs_pci)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_desc_t request;
|
||||
int count;
|
||||
int order;
|
||||
int size;
|
||||
int total;
|
||||
int page_order;
|
||||
drm_buf_entry_t *entry;
|
||||
unsigned long page;
|
||||
drm_buf_t *buf;
|
||||
int alignment;
|
||||
unsigned long offset;
|
||||
int i;
|
||||
int byte_count;
|
||||
int page_count;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
count = request.count;
|
||||
order = DRM(order)( request.size );
|
||||
size = 1 << order;
|
||||
|
||||
DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
|
||||
request.count, request.size, size,
|
||||
order, dev->queue_count );
|
||||
|
||||
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
|
||||
if ( dev->queue_count ) return -EBUSY; /* Not while in use */
|
||||
|
||||
alignment = (request.flags & _DRM_PAGE_ALIGN)
|
||||
? PAGE_ALIGN(size) : size;
|
||||
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
|
||||
total = PAGE_SIZE << page_order;
|
||||
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( dev->buf_use ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return -EBUSY;
|
||||
}
|
||||
atomic_inc( &dev->buf_alloc );
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
down( &dev->struct_sem );
|
||||
entry = &dma->bufs[order];
|
||||
if ( entry->buf_count ) {
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM; /* May only call once for each order */
|
||||
}
|
||||
|
||||
entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
if ( !entry->buflist ) {
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
|
||||
|
||||
entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
|
||||
DRM_MEM_SEGS );
|
||||
if ( !entry->seglist ) {
|
||||
DRM(free)( entry->buflist,
|
||||
count * sizeof(*entry->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
up( &dev->struct_sem );
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
|
||||
|
||||
dma->pagelist = DRM(realloc)( dma->pagelist,
|
||||
dma->page_count * sizeof(*dma->pagelist),
|
||||
(dma->page_count + (count << page_order))
|
||||
* sizeof(*dma->pagelist),
|
||||
DRM_MEM_PAGES );
|
||||
DRM_DEBUG( "pagelist: %d entries\n",
|
||||
dma->page_count + (count << page_order) );
|
||||
|
||||
entry->buf_size = size;
|
||||
entry->page_order = page_order;
|
||||
byte_count = 0;
|
||||
page_count = 0;
|
||||
|
||||
while ( entry->buf_count < count ) {
|
||||
page = DRM(alloc_pages)( page_order, DRM_MEM_DMA );
|
||||
if ( !page ) break;
|
||||
entry->seglist[entry->seg_count++] = page;
|
||||
for ( i = 0 ; i < (1 << page_order) ; i++ ) {
|
||||
DRM_DEBUG( "page %d @ 0x%08lx\n",
|
||||
dma->page_count + page_count,
|
||||
page + PAGE_SIZE * i );
|
||||
dma->pagelist[dma->page_count + page_count++]
|
||||
= page + PAGE_SIZE * i;
|
||||
}
|
||||
for ( offset = 0 ;
|
||||
offset + size <= total && entry->buf_count < count ;
|
||||
offset += alignment, ++entry->buf_count ) {
|
||||
buf = &entry->buflist[entry->buf_count];
|
||||
buf->idx = dma->buf_count + entry->buf_count;
|
||||
buf->total = alignment;
|
||||
buf->order = order;
|
||||
buf->used = 0;
|
||||
buf->offset = (dma->byte_count + byte_count + offset);
|
||||
buf->address = (void *)(page + offset);
|
||||
buf->next = NULL;
|
||||
buf->waiting = 0;
|
||||
buf->pending = 0;
|
||||
init_waitqueue_head( &buf->dma_wait );
|
||||
buf->pid = 0;
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
buf->time_queued = 0;
|
||||
buf->time_dispatched = 0;
|
||||
buf->time_completed = 0;
|
||||
buf->time_freed = 0;
|
||||
#endif
|
||||
DRM_DEBUG( "buffer %d @ %p\n",
|
||||
entry->buf_count, buf->address );
|
||||
}
|
||||
byte_count += PAGE_SIZE << page_order;
|
||||
}
|
||||
|
||||
dma->buflist = DRM(realloc)( dma->buflist,
|
||||
dma->buf_count * sizeof(*dma->buflist),
|
||||
(dma->buf_count + entry->buf_count)
|
||||
* sizeof(*dma->buflist),
|
||||
DRM_MEM_BUFS );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
|
||||
}
|
||||
|
||||
dma->buf_count += entry->buf_count;
|
||||
dma->seg_count += entry->seg_count;
|
||||
dma->page_count += entry->seg_count << page_order;
|
||||
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
|
||||
|
||||
#if __HAVE_DMA_FREELIST
|
||||
DRM(freelist_create)( &entry->freelist, entry->buf_count );
|
||||
for ( i = 0 ; i < entry->buf_count ; i++ ) {
|
||||
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
|
||||
}
|
||||
#endif
|
||||
up( &dev->struct_sem );
|
||||
|
||||
request.count = entry->buf_count;
|
||||
request.size = size;
|
||||
|
||||
if ( copy_to_user( (drm_buf_desc_t *)arg, &request, sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
atomic_dec( &dev->buf_alloc );
|
||||
return 0;
|
||||
}
|
||||
#endif /* __HAVE_PCI_DMA */
|
||||
|
||||
int DRM(addbufs)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_buf_desc_t request;
|
||||
|
||||
if ( copy_from_user( &request, (drm_buf_desc_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( request.flags & _DRM_AGP_BUFFER )
|
||||
return DRM(addbufs_agp)( inode, filp, cmd, arg );
|
||||
else
|
||||
#endif
|
||||
#if __HAVE_PCI_DMA
|
||||
return DRM(addbufs_pci)( inode, filp, cmd, arg );
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
int DRM(infobufs)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_info_t request;
|
||||
int i;
|
||||
int count;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( atomic_read( &dev->buf_alloc ) ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return -EBUSY;
|
||||
}
|
||||
++dev->buf_use; /* Can't allocate more after this call */
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
if ( copy_from_user( &request,
|
||||
(drm_buf_info_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
|
||||
if ( dma->bufs[i].buf_count ) ++count;
|
||||
}
|
||||
|
||||
DRM_DEBUG( "count = %d\n", count );
|
||||
|
||||
if ( request.count >= count ) {
|
||||
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
|
||||
if ( dma->bufs[i].buf_count ) {
|
||||
drm_buf_desc_t *to = &request.list[count];
|
||||
drm_buf_entry_t *from = &dma->bufs[i];
|
||||
drm_freelist_t *list = &dma->bufs[i].freelist;
|
||||
if ( copy_to_user( &to->count,
|
||||
&from->buf_count,
|
||||
sizeof(from->buf_count) ) ||
|
||||
copy_to_user( &to->size,
|
||||
&from->buf_size,
|
||||
sizeof(from->buf_size) ) ||
|
||||
copy_to_user( &to->low_mark,
|
||||
&list->low_mark,
|
||||
sizeof(list->low_mark) ) ||
|
||||
copy_to_user( &to->high_mark,
|
||||
&list->high_mark,
|
||||
sizeof(list->high_mark) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d %d %d %d %d\n",
|
||||
i,
|
||||
dma->bufs[i].buf_count,
|
||||
dma->bufs[i].buf_size,
|
||||
dma->bufs[i].freelist.low_mark,
|
||||
dma->bufs[i].freelist.high_mark );
|
||||
++count;
|
||||
}
|
||||
}
|
||||
}
|
||||
request.count = count;
|
||||
|
||||
if ( copy_to_user( (drm_buf_info_t *)arg,
|
||||
&request,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(markbufs)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_desc_t request;
|
||||
int order;
|
||||
drm_buf_entry_t *entry;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
if ( copy_from_user( &request,
|
||||
(drm_buf_desc_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d, %d, %d\n",
|
||||
request.size, request.low_mark, request.high_mark );
|
||||
order = DRM(order)( request.size );
|
||||
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
|
||||
entry = &dma->bufs[order];
|
||||
|
||||
if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
|
||||
return -EINVAL;
|
||||
if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
|
||||
return -EINVAL;
|
||||
|
||||
entry->freelist.low_mark = request.low_mark;
|
||||
entry->freelist.high_mark = request.high_mark;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(freebufs)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
drm_buf_free_t request;
|
||||
int i;
|
||||
int idx;
|
||||
drm_buf_t *buf;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
if ( copy_from_user( &request,
|
||||
(drm_buf_free_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d\n", request.count );
|
||||
for ( i = 0 ; i < request.count ; i++ ) {
|
||||
if ( copy_from_user( &idx,
|
||||
&request.list[i],
|
||||
sizeof(idx) ) )
|
||||
return -EFAULT;
|
||||
if ( idx < 0 || idx >= dma->buf_count ) {
|
||||
DRM_ERROR( "Index %d (of %d max)\n",
|
||||
idx, dma->buf_count - 1 );
|
||||
return -EINVAL;
|
||||
}
|
||||
buf = dma->buflist[idx];
|
||||
if ( buf->pid != current->pid ) {
|
||||
DRM_ERROR( "Process %d freeing buffer owned by %d\n",
|
||||
current->pid, buf->pid );
|
||||
return -EINVAL;
|
||||
}
|
||||
DRM(free_buffer)( dev, buf );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(mapbufs)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
int retcode = 0;
|
||||
const int zero = 0;
|
||||
unsigned long virtual;
|
||||
unsigned long address;
|
||||
drm_buf_map_t request;
|
||||
int i;
|
||||
|
||||
if ( !dma ) return -EINVAL;
|
||||
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( atomic_read( &dev->buf_alloc ) ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return -EBUSY;
|
||||
}
|
||||
dev->buf_use++; /* Can't allocate more after this call */
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
if ( copy_from_user( &request, (drm_buf_map_t *)arg,
|
||||
sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( request.count >= dma->buf_count ) {
|
||||
if ( __HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP) ) {
|
||||
drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
|
||||
|
||||
if ( !map ) {
|
||||
retcode = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
down( ¤t->mm->mmap_sem );
|
||||
virtual = do_mmap( filp, 0, map->size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED,
|
||||
(unsigned long)map->offset );
|
||||
up( ¤t->mm->mmap_sem );
|
||||
} else {
|
||||
down( ¤t->mm->mmap_sem );
|
||||
virtual = do_mmap( filp, 0, dma->byte_count,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, 0 );
|
||||
up( ¤t->mm->mmap_sem );
|
||||
}
|
||||
if ( virtual > -1024UL ) {
|
||||
/* Real error */
|
||||
retcode = (signed long)virtual;
|
||||
goto done;
|
||||
}
|
||||
request.virtual = (void *)virtual;
|
||||
|
||||
for ( i = 0 ; i < dma->buf_count ; i++ ) {
|
||||
if ( copy_to_user( &request.list[i].idx,
|
||||
&dma->buflist[i]->idx,
|
||||
sizeof(request.list[0].idx) ) ) {
|
||||
retcode = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
if ( copy_to_user( &request.list[i].total,
|
||||
&dma->buflist[i]->total,
|
||||
sizeof(request.list[0].total) ) ) {
|
||||
retcode = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
if ( copy_to_user( &request.list[i].used,
|
||||
&zero,
|
||||
sizeof(zero) ) ) {
|
||||
retcode = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
address = virtual + dma->buflist[i]->offset; /* *** */
|
||||
if ( copy_to_user( &request.list[i].address,
|
||||
&address,
|
||||
sizeof(address) ) ) {
|
||||
retcode = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
done:
|
||||
request.count = dma->buf_count;
|
||||
DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
|
||||
|
||||
if ( copy_to_user( (drm_buf_map_t *)arg, &request, sizeof(request) ) )
|
||||
return -EFAULT;
|
||||
|
||||
return retcode;
|
||||
}
|
||||
|
||||
#endif /* __HAVE_DMA */
|
||||
760
linux/drm_context.h
Normal file
760
linux/drm_context.h
Normal file
|
|
@ -0,0 +1,760 @@
|
|||
/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
|
||||
* Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
|
||||
*
|
||||
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#include "drmP.h"
|
||||
|
||||
#if __HAVE_CTX_BITMAP
|
||||
|
||||
/* ================================================================
|
||||
* Context bitmap support
|
||||
*/
|
||||
|
||||
void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle )
|
||||
{
|
||||
if ( ctx_handle < 0 ) goto failed;
|
||||
if ( !dev->ctx_bitmap ) goto failed;
|
||||
|
||||
if ( ctx_handle < DRM_MAX_CTXBITMAP ) {
|
||||
down(&dev->struct_sem);
|
||||
clear_bit( ctx_handle, dev->ctx_bitmap );
|
||||
dev->context_sareas[ctx_handle] = NULL;
|
||||
up(&dev->struct_sem);
|
||||
return;
|
||||
}
|
||||
failed:
|
||||
DRM_ERROR( "Attempt to free invalid context handle: %d\n",
|
||||
ctx_handle );
|
||||
return;
|
||||
}
|
||||
|
||||
int DRM(ctxbitmap_next)( drm_device_t *dev )
|
||||
{
|
||||
int bit;
|
||||
|
||||
if(!dev->ctx_bitmap) return -1;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
|
||||
if ( bit < DRM_MAX_CTXBITMAP ) {
|
||||
set_bit( bit, dev->ctx_bitmap );
|
||||
DRM_DEBUG( "drm_ctxbitmap_next bit : %d\n", bit );
|
||||
if((bit+1) > dev->max_context) {
|
||||
dev->max_context = (bit+1);
|
||||
if(dev->context_sareas) {
|
||||
dev->context_sareas = DRM(realloc)(
|
||||
dev->context_sareas,
|
||||
(dev->max_context - 1) *
|
||||
sizeof(*dev->context_sareas),
|
||||
dev->max_context *
|
||||
sizeof(*dev->context_sareas),
|
||||
DRM_MEM_MAPS);
|
||||
dev->context_sareas[bit] = NULL;
|
||||
} else {
|
||||
/* max_context == 1 at this point */
|
||||
dev->context_sareas = DRM(alloc)(
|
||||
dev->max_context *
|
||||
sizeof(*dev->context_sareas),
|
||||
DRM_MEM_MAPS);
|
||||
dev->context_sareas[bit] = NULL;
|
||||
}
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
return bit;
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int DRM(ctxbitmap_init)( drm_device_t *dev )
|
||||
{
|
||||
int i;
|
||||
int temp;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
dev->ctx_bitmap = (unsigned long *) DRM(alloc)( PAGE_SIZE,
|
||||
DRM_MEM_CTXBITMAP );
|
||||
if ( dev->ctx_bitmap == NULL ) {
|
||||
up(&dev->struct_sem);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset( (void *)dev->ctx_bitmap, 0, PAGE_SIZE );
|
||||
dev->context_sareas = NULL;
|
||||
dev->max_context = -1;
|
||||
up(&dev->struct_sem);
|
||||
|
||||
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
|
||||
temp = DRM(ctxbitmap_next)( dev );
|
||||
DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void DRM(ctxbitmap_cleanup)( drm_device_t *dev )
|
||||
{
|
||||
down(&dev->struct_sem);
|
||||
if( dev->context_sareas ) DRM(free)( dev->context_sareas,
|
||||
sizeof(*dev->context_sareas) *
|
||||
dev->max_context,
|
||||
DRM_MEM_MAPS );
|
||||
DRM(free)( (void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP );
|
||||
up(&dev->struct_sem);
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
* Per Context SAREA Support
|
||||
*/
|
||||
|
||||
int DRM(getsareactx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_priv_map_t request;
|
||||
drm_map_t *map;
|
||||
|
||||
if (copy_from_user(&request,
|
||||
(drm_ctx_priv_map_t *)arg,
|
||||
sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
if ((int)request.ctx_id >= dev->max_context) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
map = dev->context_sareas[request.ctx_id];
|
||||
up(&dev->struct_sem);
|
||||
|
||||
request.handle = map->handle;
|
||||
if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(setsareactx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_priv_map_t request;
|
||||
drm_map_t *map = NULL;
|
||||
drm_map_list_t *r_list;
|
||||
struct list_head *list;
|
||||
|
||||
if (copy_from_user(&request,
|
||||
(drm_ctx_priv_map_t *)arg,
|
||||
sizeof(request)))
|
||||
return -EFAULT;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *)list;
|
||||
if(r_list->map &&
|
||||
r_list->map->handle == request.handle) break;
|
||||
}
|
||||
if (list == &(dev->maplist->head)) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
map = r_list->map;
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if (!map) return -EINVAL;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
if ((int)request.ctx_id >= dev->max_context) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev->context_sareas[request.ctx_id] = map;
|
||||
up(&dev->struct_sem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
* The actual DRM context handling routines
|
||||
*/
|
||||
|
||||
int DRM(context_switch)( drm_device_t *dev, int old, int new )
|
||||
{
|
||||
char buf[64];
|
||||
|
||||
if ( test_and_set_bit( 0, &dev->context_flag ) ) {
|
||||
DRM_ERROR( "Reentering -- FIXME\n" );
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
dev->ctx_start = get_cycles();
|
||||
#endif
|
||||
|
||||
DRM_DEBUG( "Context switch from %d to %d\n", old, new );
|
||||
|
||||
if ( new == dev->last_context ) {
|
||||
clear_bit( 0, &dev->context_flag );
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ( DRM(flags) & DRM_FLAG_NOCTX ) {
|
||||
DRM(context_switch_complete)( dev, new );
|
||||
} else {
|
||||
sprintf( buf, "C %d %d\n", old, new );
|
||||
DRM(write_string)( dev, buf );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(context_switch_complete)( drm_device_t *dev, int new )
|
||||
{
|
||||
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
|
||||
dev->last_switch = jiffies;
|
||||
|
||||
if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) {
|
||||
DRM_ERROR( "Lock isn't held after context switch\n" );
|
||||
}
|
||||
|
||||
/* If a context switch is ever initiated
|
||||
when the kernel holds the lock, release
|
||||
that lock here. */
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
atomic_inc( &dev->histo.ctx[DRM(histogram_slot)(get_cycles()
|
||||
- dev->ctx_start)] );
|
||||
|
||||
#endif
|
||||
clear_bit( 0, &dev->context_flag );
|
||||
wake_up( &dev->context_wait );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(resctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_ctx_res_t res;
|
||||
drm_ctx_t ctx;
|
||||
int i;
|
||||
|
||||
if ( copy_from_user( &res, (drm_ctx_res_t *)arg, sizeof(res) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( res.count >= DRM_RESERVED_CONTEXTS ) {
|
||||
memset( &ctx, 0, sizeof(ctx) );
|
||||
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
|
||||
ctx.handle = i;
|
||||
if ( copy_to_user( &res.contexts[i],
|
||||
&i, sizeof(i) ) )
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
res.count = DRM_RESERVED_CONTEXTS;
|
||||
|
||||
if ( copy_to_user( (drm_ctx_res_t *)arg, &res, sizeof(res) ) )
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(addctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
|
||||
ctx.handle = DRM(ctxbitmap_next)( dev );
|
||||
if ( ctx.handle == DRM_KERNEL_CONTEXT ) {
|
||||
/* Skip kernel's context and get a new one. */
|
||||
ctx.handle = DRM(ctxbitmap_next)( dev );
|
||||
}
|
||||
DRM_DEBUG( "%d\n", ctx.handle );
|
||||
if ( ctx.handle == -1 ) {
|
||||
DRM_DEBUG( "Not enough free contexts.\n" );
|
||||
/* Should this return -EBUSY instead? */
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(modctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
/* This does nothing */
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(getctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if ( copy_from_user( &ctx, (drm_ctx_t*)arg, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
|
||||
/* This is 0, because we don't handle any context flags */
|
||||
ctx.flags = 0;
|
||||
|
||||
if ( copy_to_user( (drm_ctx_t*)arg, &ctx, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(switchctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d\n", ctx.handle );
|
||||
return DRM(context_switch)( dev, dev->last_context, ctx.handle );
|
||||
}
|
||||
|
||||
int DRM(newctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d\n", ctx.handle );
|
||||
DRM(context_switch_complete)( dev, ctx.handle );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(rmctx)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG( "%d\n", ctx.handle );
|
||||
if ( ctx.handle == DRM_KERNEL_CONTEXT + 1 ) {
|
||||
priv->remove_auth_on_close = 1;
|
||||
}
|
||||
if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
|
||||
DRM(ctxbitmap_free)( dev, ctx.handle );
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#else /* __HAVE_CTX_BITMAP */
|
||||
|
||||
/* ================================================================
|
||||
* Old-style context support
|
||||
*/
|
||||
|
||||
|
||||
int DRM(context_switch)(drm_device_t *dev, int old, int new)
|
||||
{
|
||||
char buf[64];
|
||||
drm_queue_t *q;
|
||||
|
||||
#if 0
|
||||
atomic_inc(&dev->total_ctx);
|
||||
#endif
|
||||
|
||||
if (test_and_set_bit(0, &dev->context_flag)) {
|
||||
DRM_ERROR("Reentering -- FIXME\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
dev->ctx_start = get_cycles();
|
||||
#endif
|
||||
|
||||
DRM_DEBUG("Context switch from %d to %d\n", old, new);
|
||||
|
||||
if (new >= dev->queue_count) {
|
||||
clear_bit(0, &dev->context_flag);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (new == dev->last_context) {
|
||||
clear_bit(0, &dev->context_flag);
|
||||
return 0;
|
||||
}
|
||||
|
||||
q = dev->queuelist[new];
|
||||
atomic_inc(&q->use_count);
|
||||
if (atomic_read(&q->use_count) == 1) {
|
||||
atomic_dec(&q->use_count);
|
||||
clear_bit(0, &dev->context_flag);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (DRM(flags) & DRM_FLAG_NOCTX) {
|
||||
DRM(context_switch_complete)(dev, new);
|
||||
} else {
|
||||
sprintf(buf, "C %d %d\n", old, new);
|
||||
DRM(write_string)(dev, buf);
|
||||
}
|
||||
|
||||
atomic_dec(&q->use_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(context_switch_complete)(drm_device_t *dev, int new)
|
||||
{
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
|
||||
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
|
||||
dev->last_switch = jiffies;
|
||||
|
||||
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
|
||||
DRM_ERROR("Lock isn't held after context switch\n");
|
||||
}
|
||||
|
||||
if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
|
||||
if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT)) {
|
||||
DRM_ERROR("Cannot free lock\n");
|
||||
}
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
atomic_inc(&dev->histo.ctx[DRM(histogram_slot)(get_cycles()
|
||||
- dev->ctx_start)]);
|
||||
|
||||
#endif
|
||||
clear_bit(0, &dev->context_flag);
|
||||
wake_up_interruptible(&dev->context_wait);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
|
||||
{
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
if (atomic_read(&q->use_count) != 1
|
||||
|| atomic_read(&q->finalization)
|
||||
|| atomic_read(&q->block_count)) {
|
||||
DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
|
||||
atomic_read(&q->use_count),
|
||||
atomic_read(&q->finalization),
|
||||
atomic_read(&q->block_count));
|
||||
}
|
||||
|
||||
atomic_set(&q->finalization, 0);
|
||||
atomic_set(&q->block_count, 0);
|
||||
atomic_set(&q->block_read, 0);
|
||||
atomic_set(&q->block_write, 0);
|
||||
atomic_set(&q->total_queued, 0);
|
||||
atomic_set(&q->total_flushed, 0);
|
||||
atomic_set(&q->total_locks, 0);
|
||||
|
||||
init_waitqueue_head(&q->write_queue);
|
||||
init_waitqueue_head(&q->read_queue);
|
||||
init_waitqueue_head(&q->flush_queue);
|
||||
|
||||
q->flags = ctx->flags;
|
||||
|
||||
DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* drm_alloc_queue:
|
||||
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
|
||||
disappear (so all deallocation must be done after IOCTLs are off)
|
||||
2) dev->queue_count < dev->queue_slots
|
||||
3) dev->queuelist[i].use_count == 0 and
|
||||
dev->queuelist[i].finalization == 0 if i not in use
|
||||
POST: 1) dev->queuelist[i].use_count == 1
|
||||
2) dev->queue_count < dev->queue_slots */
|
||||
|
||||
static int DRM(alloc_queue)(drm_device_t *dev)
|
||||
{
|
||||
int i;
|
||||
drm_queue_t *queue;
|
||||
int oldslots;
|
||||
int newslots;
|
||||
/* Check for a free queue */
|
||||
for (i = 0; i < dev->queue_count; i++) {
|
||||
atomic_inc(&dev->queuelist[i]->use_count);
|
||||
if (atomic_read(&dev->queuelist[i]->use_count) == 1
|
||||
&& !atomic_read(&dev->queuelist[i]->finalization)) {
|
||||
DRM_DEBUG("%d (free)\n", i);
|
||||
return i;
|
||||
}
|
||||
atomic_dec(&dev->queuelist[i]->use_count);
|
||||
}
|
||||
/* Allocate a new queue */
|
||||
down(&dev->struct_sem);
|
||||
|
||||
queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES);
|
||||
memset(queue, 0, sizeof(*queue));
|
||||
atomic_set(&queue->use_count, 1);
|
||||
|
||||
++dev->queue_count;
|
||||
if (dev->queue_count >= dev->queue_slots) {
|
||||
oldslots = dev->queue_slots * sizeof(*dev->queuelist);
|
||||
if (!dev->queue_slots) dev->queue_slots = 1;
|
||||
dev->queue_slots *= 2;
|
||||
newslots = dev->queue_slots * sizeof(*dev->queuelist);
|
||||
|
||||
dev->queuelist = DRM(realloc)(dev->queuelist,
|
||||
oldslots,
|
||||
newslots,
|
||||
DRM_MEM_QUEUES);
|
||||
if (!dev->queuelist) {
|
||||
up(&dev->struct_sem);
|
||||
DRM_DEBUG("out of memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
dev->queuelist[dev->queue_count-1] = queue;
|
||||
|
||||
up(&dev->struct_sem);
|
||||
DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
|
||||
return dev->queue_count - 1;
|
||||
}
|
||||
|
||||
int DRM(resctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_ctx_res_t res;
|
||||
drm_ctx_t ctx;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
|
||||
if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
|
||||
return -EFAULT;
|
||||
if (res.count >= DRM_RESERVED_CONTEXTS) {
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
|
||||
ctx.handle = i;
|
||||
if (copy_to_user(&res.contexts[i],
|
||||
&i,
|
||||
sizeof(i)))
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
res.count = DRM_RESERVED_CONTEXTS;
|
||||
if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(addctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
|
||||
/* Init kernel's context and get a new one. */
|
||||
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
|
||||
ctx.handle = DRM(alloc_queue)(dev);
|
||||
}
|
||||
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(modctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
drm_queue_t *q;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
|
||||
if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
|
||||
q = dev->queuelist[ctx.handle];
|
||||
|
||||
atomic_inc(&q->use_count);
|
||||
if (atomic_read(&q->use_count) == 1) {
|
||||
/* No longer in use */
|
||||
atomic_dec(&q->use_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (DRM_BUFCOUNT(&q->waitlist)) {
|
||||
atomic_dec(&q->use_count);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
q->flags = ctx.flags;
|
||||
|
||||
atomic_dec(&q->use_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(getctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
drm_queue_t *q;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
|
||||
if (ctx.handle >= dev->queue_count) return -EINVAL;
|
||||
q = dev->queuelist[ctx.handle];
|
||||
|
||||
atomic_inc(&q->use_count);
|
||||
if (atomic_read(&q->use_count) == 1) {
|
||||
/* No longer in use */
|
||||
atomic_dec(&q->use_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx.flags = q->flags;
|
||||
atomic_dec(&q->use_count);
|
||||
|
||||
if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(switchctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
|
||||
}
|
||||
|
||||
int DRM(newctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
DRM(context_switch_complete)(dev, ctx.handle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(rmctx)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ctx_t ctx;
|
||||
drm_queue_t *q;
|
||||
drm_buf_t *buf;
|
||||
|
||||
if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
|
||||
return -EFAULT;
|
||||
DRM_DEBUG("%d\n", ctx.handle);
|
||||
|
||||
if (ctx.handle >= dev->queue_count) return -EINVAL;
|
||||
q = dev->queuelist[ctx.handle];
|
||||
|
||||
atomic_inc(&q->use_count);
|
||||
if (atomic_read(&q->use_count) == 1) {
|
||||
/* No longer in use */
|
||||
atomic_dec(&q->use_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
atomic_inc(&q->finalization); /* Mark queue in finalization state */
|
||||
atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
|
||||
finalization) */
|
||||
|
||||
while (test_and_set_bit(0, &dev->interrupt_flag)) {
|
||||
schedule();
|
||||
if (signal_pending(current)) {
|
||||
clear_bit(0, &dev->interrupt_flag);
|
||||
return -EINTR;
|
||||
}
|
||||
}
|
||||
/* Remove queued buffers */
|
||||
while ((buf = DRM(waitlist_get)(&q->waitlist))) {
|
||||
DRM(free_buffer)(dev, buf);
|
||||
}
|
||||
clear_bit(0, &dev->interrupt_flag);
|
||||
|
||||
/* Wakeup blocked processes */
|
||||
wake_up_interruptible(&q->read_queue);
|
||||
wake_up_interruptible(&q->write_queue);
|
||||
wake_up_interruptible(&q->flush_queue);
|
||||
|
||||
/* Finalization over. Queue is made
|
||||
available when both use_count and
|
||||
finalization become 0, which won't
|
||||
happen until all the waiting processes
|
||||
stop waiting. */
|
||||
atomic_dec(&q->finalization);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* __HAVE_CTX_BITMAP */
|
||||
938
linux/drm_drv.h
Normal file
938
linux/drm_drv.h
Normal file
|
|
@ -0,0 +1,938 @@
|
|||
/* drm_drv.h -- Generic driver template -*- linux-c -*-
|
||||
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
|
||||
*
|
||||
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* To use this template, you must at least define the following (samples
|
||||
* given for the MGA driver):
|
||||
*
|
||||
* #define DRIVER_AUTHOR "VA Linux Systems, Inc."
|
||||
*
|
||||
* #define DRIVER_NAME "mga"
|
||||
* #define DRIVER_DESC "Matrox G200/G400"
|
||||
* #define DRIVER_DATE "20001127"
|
||||
*
|
||||
* #define DRIVER_MAJOR 2
|
||||
* #define DRIVER_MINOR 0
|
||||
* #define DRIVER_PATCHLEVEL 2
|
||||
*
|
||||
* #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
|
||||
*
|
||||
* #define DRM(x) mga_##x
|
||||
*/
|
||||
|
||||
#ifndef __MUST_HAVE_AGP
|
||||
#define __MUST_HAVE_AGP 0
|
||||
#endif
|
||||
#ifndef __HAVE_CTX_BITMAP
|
||||
#define __HAVE_CTX_BITMAP 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_IRQ
|
||||
#define __HAVE_DMA_IRQ 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_QUEUE
|
||||
#define __HAVE_DMA_QUEUE 0
|
||||
#endif
|
||||
#ifndef __HAVE_MULTIPLE_DMA_QUEUES
|
||||
#define __HAVE_MULTIPLE_DMA_QUEUES 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_SCHEDULE
|
||||
#define __HAVE_DMA_SCHEDULE 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_FLUSH
|
||||
#define __HAVE_DMA_FLUSH 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_READY
|
||||
#define __HAVE_DMA_READY 0
|
||||
#endif
|
||||
#ifndef __HAVE_DMA_QUIESCENT
|
||||
#define __HAVE_DMA_QUIESCENT 0
|
||||
#endif
|
||||
#ifndef __HAVE_RELEASE
|
||||
#define __HAVE_RELEASE 0
|
||||
#endif
|
||||
#ifndef __HAVE_COUNTERS
|
||||
#define __HAVE_COUNTERS 0
|
||||
#endif
|
||||
|
||||
#ifndef DRIVER_PREINIT
|
||||
#define DRIVER_PREINIT()
|
||||
#endif
|
||||
#ifndef DRIVER_POSTINIT
|
||||
#define DRIVER_POSTINIT()
|
||||
#endif
|
||||
#ifndef DRIVER_PRERELEASE
|
||||
#define DRIVER_PRERELEASE()
|
||||
#endif
|
||||
#ifndef DRIVER_PRETAKEDOWN
|
||||
#define DRIVER_PRETAKEDOWN()
|
||||
#endif
|
||||
#ifndef DRIVER_IOCTLS
|
||||
#define DRIVER_IOCTLS
|
||||
#endif
|
||||
|
||||
|
||||
static drm_device_t DRM(device);
|
||||
static int DRM(minor);
|
||||
|
||||
static struct file_operations DRM(fops) = {
|
||||
#if LINUX_VERSION_CODE >= 0x020400
|
||||
/* This started being used during 2.4.0-test */
|
||||
owner: THIS_MODULE,
|
||||
#endif
|
||||
open: DRM(open),
|
||||
flush: DRM(flush),
|
||||
release: DRM(release),
|
||||
ioctl: DRM(ioctl),
|
||||
mmap: DRM(mmap),
|
||||
read: DRM(read),
|
||||
fasync: DRM(fasync),
|
||||
poll: DRM(poll),
|
||||
};
|
||||
|
||||
|
||||
static drm_ioctl_desc_t DRM(ioctls)[] = {
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { DRM(irq_busid), 0, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { DRM(getmap), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { DRM(getclient), 0, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(block), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(unblock), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { DRM(modctx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { DRM(getctx), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { DRM(switchctx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { DRM(newctx), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { DRM(resctx), 1, 0 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { DRM(adddraw), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { DRM(rmdraw), 1, 1 },
|
||||
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 },
|
||||
|
||||
#if __HAVE_DMA
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { DRM(markbufs), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { DRM(infobufs), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { DRM(mapbufs), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { DRM(freebufs), 1, 0 },
|
||||
|
||||
/* The DRM_IOCTL_DMA ioctl should be defined by the driver.
|
||||
*/
|
||||
#if __HAVE_DMA_IRQ
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 },
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { DRM(agp_release), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { DRM(agp_enable), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { DRM(agp_info), 1, 0 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { DRM(agp_alloc), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { DRM(agp_free), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { DRM(agp_bind), 1, 1 },
|
||||
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { DRM(agp_unbind), 1, 1 },
|
||||
#endif
|
||||
|
||||
DRIVER_IOCTLS
|
||||
};
|
||||
|
||||
#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) )
|
||||
|
||||
#ifdef MODULE
|
||||
static char *drm_opts = NULL;
|
||||
#endif
|
||||
|
||||
MODULE_AUTHOR( DRIVER_AUTHOR );
|
||||
MODULE_DESCRIPTION( DRIVER_DESC );
|
||||
MODULE_PARM( drm_opts, "s" );
|
||||
|
||||
#ifndef MODULE
|
||||
/* DRM(options) is called by the kernel to parse command-line options
|
||||
* passed via the boot-loader (e.g., LILO). It calls the insmod option
|
||||
* routine, drm_parse_drm.
|
||||
*/
|
||||
|
||||
static int __init DRM(options)( char *str )
|
||||
{
|
||||
DRM(parse_options)( str );
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup( DRIVER_NAME "=", DRM(options) );
|
||||
#endif
|
||||
|
||||
static int DRM(setup)( drm_device_t *dev )
|
||||
{
|
||||
int i;
|
||||
|
||||
atomic_set( &dev->ioctl_count, 0 );
|
||||
atomic_set( &dev->vma_count, 0 );
|
||||
dev->buf_use = 0;
|
||||
atomic_set( &dev->buf_alloc, 0 );
|
||||
|
||||
#if __HAVE_DMA
|
||||
i = DRM(dma_setup)( dev );
|
||||
if ( i < 0 )
|
||||
return i;
|
||||
#endif
|
||||
|
||||
dev->counters = 6 + __HAVE_COUNTERS;
|
||||
dev->types[0] = _DRM_STAT_LOCK;
|
||||
dev->types[1] = _DRM_STAT_OPENS;
|
||||
dev->types[2] = _DRM_STAT_CLOSES;
|
||||
dev->types[3] = _DRM_STAT_IOCTLS;
|
||||
dev->types[4] = _DRM_STAT_LOCKS;
|
||||
dev->types[5] = _DRM_STAT_UNLOCKS;
|
||||
#ifdef __HAVE_COUNTER6
|
||||
dev->types[6] = __HAVE_COUNTER6;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER7
|
||||
dev->types[7] = __HAVE_COUNTER7;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER8
|
||||
dev->types[8] = __HAVE_COUNTER8;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER9
|
||||
dev->types[9] = __HAVE_COUNTER9;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER10
|
||||
dev->types[10] = __HAVE_COUNTER10;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER11
|
||||
dev->types[11] = __HAVE_COUNTER11;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER12
|
||||
dev->types[12] = __HAVE_COUNTER12;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER13
|
||||
dev->types[13] = __HAVE_COUNTER13;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER14
|
||||
dev->types[14] = __HAVE_COUNTER14;
|
||||
#endif
|
||||
#ifdef __HAVE_COUNTER15
|
||||
dev->types[14] = __HAVE_COUNTER14;
|
||||
#endif
|
||||
|
||||
for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
|
||||
atomic_set( &dev->counts[i], 0 );
|
||||
|
||||
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
|
||||
dev->magiclist[i].head = NULL;
|
||||
dev->magiclist[i].tail = NULL;
|
||||
}
|
||||
|
||||
dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
|
||||
DRM_MEM_MAPS);
|
||||
if(dev->maplist == NULL) return -ENOMEM;
|
||||
memset(dev->maplist, 0, sizeof(*dev->maplist));
|
||||
INIT_LIST_HEAD(&dev->maplist->head);
|
||||
dev->map_count = 0;
|
||||
|
||||
dev->vmalist = NULL;
|
||||
dev->lock.hw_lock = NULL;
|
||||
init_waitqueue_head( &dev->lock.lock_queue );
|
||||
dev->queue_count = 0;
|
||||
dev->queue_reserved = 0;
|
||||
dev->queue_slots = 0;
|
||||
dev->queuelist = NULL;
|
||||
dev->irq = 0;
|
||||
dev->context_flag = 0;
|
||||
dev->interrupt_flag = 0;
|
||||
dev->dma_flag = 0;
|
||||
dev->last_context = 0;
|
||||
dev->last_switch = 0;
|
||||
dev->last_checked = 0;
|
||||
init_timer( &dev->timer );
|
||||
init_waitqueue_head( &dev->context_wait );
|
||||
|
||||
dev->ctx_start = 0;
|
||||
dev->lck_start = 0;
|
||||
|
||||
dev->buf_rp = dev->buf;
|
||||
dev->buf_wp = dev->buf;
|
||||
dev->buf_end = dev->buf + DRM_BSZ;
|
||||
dev->buf_async = NULL;
|
||||
init_waitqueue_head( &dev->buf_readers );
|
||||
init_waitqueue_head( &dev->buf_writers );
|
||||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
/* The kernel's context could be created here, but is now created
|
||||
* in drm_dma_enqueue. This is more resource-efficient for
|
||||
* hardware that does not do DMA, but may mean that
|
||||
* drm_select_queue fails between the time the interrupt is
|
||||
* initialized and the time the queues are initialized.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int DRM(takedown)( drm_device_t *dev )
|
||||
{
|
||||
drm_magic_entry_t *pt, *next;
|
||||
drm_map_t *map;
|
||||
drm_map_list_t *r_list;
|
||||
struct list_head *list;
|
||||
drm_vma_entry_t *vma, *vma_next;
|
||||
int i;
|
||||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
DRIVER_PRETAKEDOWN();
|
||||
#if __HAVE_DMA_IRQ
|
||||
if ( dev->irq ) DRM(irq_uninstall)( dev );
|
||||
#endif
|
||||
|
||||
down( &dev->struct_sem );
|
||||
del_timer( &dev->timer );
|
||||
|
||||
if ( dev->devname ) {
|
||||
DRM(free)( dev->devname, strlen( dev->devname ) + 1,
|
||||
DRM_MEM_DRIVER );
|
||||
dev->devname = NULL;
|
||||
}
|
||||
|
||||
if ( dev->unique ) {
|
||||
DRM(free)( dev->unique, strlen( dev->unique ) + 1,
|
||||
DRM_MEM_DRIVER );
|
||||
dev->unique = NULL;
|
||||
dev->unique_len = 0;
|
||||
}
|
||||
/* Clear pid list */
|
||||
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
|
||||
for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
|
||||
next = pt->next;
|
||||
DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
|
||||
}
|
||||
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
|
||||
}
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
/* Clear AGP information */
|
||||
if ( dev->agp ) {
|
||||
drm_agp_mem_t *entry;
|
||||
drm_agp_mem_t *nexte;
|
||||
|
||||
/* Remove AGP resources, but leave dev->agp
|
||||
intact until drv_cleanup is called. */
|
||||
for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
|
||||
nexte = entry->next;
|
||||
if ( entry->bound ) DRM(unbind_agp)( entry->memory );
|
||||
DRM(free_agp)( entry->memory, entry->pages );
|
||||
DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
|
||||
}
|
||||
dev->agp->memory = NULL;
|
||||
|
||||
if ( dev->agp->acquired ) DRM(agp_do_release)();
|
||||
|
||||
dev->agp->acquired = 0;
|
||||
dev->agp->enabled = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Clear vma list (only built for debugging) */
|
||||
if ( dev->vmalist ) {
|
||||
for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
|
||||
vma_next = vma->next;
|
||||
DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
|
||||
}
|
||||
dev->vmalist = NULL;
|
||||
}
|
||||
|
||||
if( dev->maplist ) {
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *)list;
|
||||
map = r_list->map;
|
||||
DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
|
||||
if(!map) continue;
|
||||
|
||||
switch ( map->type ) {
|
||||
case _DRM_REGISTERS:
|
||||
case _DRM_FRAME_BUFFER:
|
||||
#if __REALLY_HAVE_MTRR
|
||||
if ( map->mtrr >= 0 ) {
|
||||
int retcode;
|
||||
retcode = mtrr_del( map->mtrr,
|
||||
map->offset,
|
||||
map->size );
|
||||
DRM_DEBUG( "mtrr_del=%d\n", retcode );
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)( map->handle, map->size );
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
break;
|
||||
|
||||
case _DRM_AGP:
|
||||
/* Do nothing here, because this is all
|
||||
* handled in the AGP/GART driver.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
}
|
||||
DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
|
||||
dev->maplist = NULL;
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
|
||||
if ( dev->queuelist ) {
|
||||
for ( i = 0 ; i < dev->queue_count ; i++ ) {
|
||||
DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
|
||||
if ( dev->queuelist[i] ) {
|
||||
DRM(free)( dev->queuelist[i],
|
||||
sizeof(*dev->queuelist[0]),
|
||||
DRM_MEM_QUEUES );
|
||||
dev->queuelist[i] = NULL;
|
||||
}
|
||||
}
|
||||
DRM(free)( dev->queuelist,
|
||||
dev->queue_slots * sizeof(*dev->queuelist),
|
||||
DRM_MEM_QUEUES );
|
||||
dev->queuelist = NULL;
|
||||
}
|
||||
dev->queue_count = 0;
|
||||
#endif
|
||||
|
||||
#if __HAVE_DMA
|
||||
DRM(dma_takedown)( dev );
|
||||
#endif
|
||||
if ( dev->lock.hw_lock ) {
|
||||
dev->lock.hw_lock = NULL; /* SHM removed */
|
||||
dev->lock.pid = 0;
|
||||
wake_up_interruptible( &dev->lock.lock_queue );
|
||||
}
|
||||
up( &dev->struct_sem );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* drm_init is called via init_module at module load time, or via
|
||||
* linux/init/main.c (this is not currently supported).
|
||||
*/
|
||||
static int __init drm_init( void )
|
||||
{
|
||||
drm_device_t *dev = &DRM(device);
|
||||
#if __HAVE_CTX_BITMAP
|
||||
int retcode;
|
||||
#endif
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
memset( (void *)dev, 0, sizeof(*dev) );
|
||||
dev->count_lock = SPIN_LOCK_UNLOCKED;
|
||||
sema_init( &dev->struct_sem, 1 );
|
||||
|
||||
#ifdef MODULE
|
||||
DRM(parse_options)( drm_opts );
|
||||
#endif
|
||||
DRIVER_PREINIT();
|
||||
|
||||
DRM(mem_init)();
|
||||
|
||||
if ((DRM(minor) = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
|
||||
return -EPERM;
|
||||
dev->device = MKDEV(DRM_MAJOR, DRM(minor) );
|
||||
dev->name = DRIVER_NAME;
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
dev->agp = DRM(agp_init)();
|
||||
#if __MUST_HAVE_AGP
|
||||
if ( dev->agp == NULL ) {
|
||||
DRM_ERROR( "Cannot initialize the agpgart module.\n" );
|
||||
DRM(stub_unregister)(DRM(minor));
|
||||
DRM(takedown)( dev );
|
||||
return -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
#if __REALLY_HAVE_MTRR
|
||||
if (dev->agp)
|
||||
dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
|
||||
dev->agp->agp_info.aper_size*1024*1024,
|
||||
MTRR_TYPE_WRCOMB,
|
||||
1 );
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if __HAVE_CTX_BITMAP
|
||||
retcode = DRM(ctxbitmap_init)( dev );
|
||||
if( retcode ) {
|
||||
DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
|
||||
DRM(stub_unregister)(DRM(minor));
|
||||
DRM(takedown)( dev );
|
||||
return retcode;
|
||||
}
|
||||
#endif
|
||||
|
||||
DRIVER_POSTINIT();
|
||||
|
||||
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
|
||||
DRIVER_NAME,
|
||||
DRIVER_MAJOR,
|
||||
DRIVER_MINOR,
|
||||
DRIVER_PATCHLEVEL,
|
||||
DRIVER_DATE,
|
||||
DRM(minor) );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* drm_cleanup is called via cleanup_module at module unload time.
|
||||
*/
|
||||
static void __exit drm_cleanup( void )
|
||||
{
|
||||
drm_device_t *dev = &DRM(device);
|
||||
|
||||
DRM_DEBUG( "\n" );
|
||||
|
||||
if ( DRM(stub_unregister)(DRM(minor)) ) {
|
||||
DRM_ERROR( "Cannot unload module\n" );
|
||||
} else {
|
||||
DRM_INFO( "Module unloaded\n" );
|
||||
}
|
||||
#if __HAVE_CTX_BITMAP
|
||||
DRM(ctxbitmap_cleanup)( dev );
|
||||
#endif
|
||||
|
||||
#if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
|
||||
if ( dev->agp && dev->agp->agp_mtrr ) {
|
||||
int retval;
|
||||
retval = mtrr_del( dev->agp->agp_mtrr,
|
||||
dev->agp->agp_info.aper_base,
|
||||
dev->agp->agp_info.aper_size*1024*1024 );
|
||||
DRM_DEBUG( "mtrr_del=%d\n", retval );
|
||||
}
|
||||
#endif
|
||||
|
||||
DRM(takedown)( dev );
|
||||
|
||||
#if __REALLY_HAVE_AGP
|
||||
if ( dev->agp ) {
|
||||
DRM(agp_uninit)();
|
||||
DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
|
||||
dev->agp = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
module_init( drm_init );
|
||||
module_exit( drm_cleanup );
|
||||
|
||||
|
||||
int DRM(version)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_version_t version;
|
||||
int len;
|
||||
|
||||
if ( copy_from_user( &version,
|
||||
(drm_version_t *)arg,
|
||||
sizeof(version) ) )
|
||||
return -EFAULT;
|
||||
|
||||
#define DRM_COPY( name, value ) \
|
||||
len = strlen( value ); \
|
||||
if ( len > name##_len ) len = name##_len; \
|
||||
name##_len = strlen( value ); \
|
||||
if ( len && name ) { \
|
||||
if ( copy_to_user( name, value, len ) ) \
|
||||
return -EFAULT; \
|
||||
}
|
||||
|
||||
version.version_major = DRIVER_MAJOR;
|
||||
version.version_minor = DRIVER_MINOR;
|
||||
version.version_patchlevel = DRIVER_PATCHLEVEL;
|
||||
|
||||
DRM_COPY( version.name, DRIVER_NAME );
|
||||
DRM_COPY( version.date, DRIVER_DATE );
|
||||
DRM_COPY( version.desc, DRIVER_DESC );
|
||||
|
||||
if ( copy_to_user( (drm_version_t *)arg,
|
||||
&version,
|
||||
sizeof(version) ) )
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(open)( struct inode *inode, struct file *filp )
|
||||
{
|
||||
drm_device_t *dev = &DRM(device);
|
||||
int retcode = 0;
|
||||
|
||||
DRM_DEBUG( "open_count = %d\n", dev->open_count );
|
||||
|
||||
retcode = DRM(open_helper)( inode, filp, dev );
|
||||
if ( !retcode ) {
|
||||
#if LINUX_VERSION_CODE < 0x020333
|
||||
MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
|
||||
#endif
|
||||
atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( !dev->open_count++ ) {
|
||||
spin_unlock( &dev->count_lock );
|
||||
return DRM(setup)( dev );
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
}
|
||||
|
||||
return retcode;
|
||||
}
|
||||
|
||||
int DRM(release)( struct inode *inode, struct file *filp )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev;
|
||||
int retcode = 0;
|
||||
|
||||
lock_kernel();
|
||||
dev = priv->dev;
|
||||
|
||||
DRM_DEBUG( "open_count = %d\n", dev->open_count );
|
||||
|
||||
DRIVER_PRERELEASE();
|
||||
|
||||
/* ========================================================
|
||||
* Begin inline drm_release
|
||||
*/
|
||||
|
||||
DRM_DEBUG( "pid = %d, device = 0x%x, open_count = %d\n",
|
||||
current->pid, dev->device, dev->open_count );
|
||||
|
||||
if ( dev->lock.hw_lock &&
|
||||
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
|
||||
dev->lock.pid == current->pid ) {
|
||||
DRM_DEBUG( "Process %d dead, freeing lock for context %d\n",
|
||||
current->pid,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
#if __HAVE_RELEASE
|
||||
DRIVER_RELEASE();
|
||||
#endif
|
||||
DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
|
||||
|
||||
/* FIXME: may require heavy-handed reset of
|
||||
hardware at this point, possibly
|
||||
processed via a callback to the X
|
||||
server. */
|
||||
}
|
||||
#if __HAVE_RELEASE
|
||||
else if ( dev->lock.hw_lock ) {
|
||||
/* The lock is required to reclaim buffers */
|
||||
DECLARE_WAITQUEUE( entry, current );
|
||||
add_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
for (;;) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
if ( !dev->lock.hw_lock ) {
|
||||
/* Device has been unregistered */
|
||||
retcode = -EINTR;
|
||||
break;
|
||||
}
|
||||
if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
dev->lock.pid = priv->pid;
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
|
||||
break; /* Got lock */
|
||||
}
|
||||
/* Contention */
|
||||
#if 0
|
||||
atomic_inc( &dev->total_sleeps );
|
||||
#endif
|
||||
schedule();
|
||||
if ( signal_pending( current ) ) {
|
||||
retcode = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
current->state = TASK_RUNNING;
|
||||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
if( !retcode ) {
|
||||
DRIVER_RELEASE();
|
||||
DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
}
|
||||
}
|
||||
#elif __HAVE_DMA
|
||||
DRM(reclaim_buffers)( dev, priv->pid );
|
||||
#endif
|
||||
|
||||
DRM(fasync)( -1, filp, 0 );
|
||||
|
||||
down( &dev->struct_sem );
|
||||
if ( priv->remove_auth_on_close == 1 ) {
|
||||
drm_file_t *temp = dev->file_first;
|
||||
while ( temp ) {
|
||||
temp->authenticated = 0;
|
||||
temp = temp->next;
|
||||
}
|
||||
}
|
||||
if ( priv->prev ) {
|
||||
priv->prev->next = priv->next;
|
||||
} else {
|
||||
dev->file_first = priv->next;
|
||||
}
|
||||
if ( priv->next ) {
|
||||
priv->next->prev = priv->prev;
|
||||
} else {
|
||||
dev->file_last = priv->prev;
|
||||
}
|
||||
up( &dev->struct_sem );
|
||||
|
||||
DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
|
||||
|
||||
/* ========================================================
|
||||
* End inline drm_release
|
||||
*/
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020333
|
||||
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
|
||||
#endif
|
||||
atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
|
||||
spin_lock( &dev->count_lock );
|
||||
if ( !--dev->open_count ) {
|
||||
if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
|
||||
DRM_ERROR( "Device busy: %d %d\n",
|
||||
atomic_read( &dev->ioctl_count ),
|
||||
dev->blocked );
|
||||
spin_unlock( &dev->count_lock );
|
||||
unlock_kernel();
|
||||
return -EBUSY;
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
unlock_kernel();
|
||||
return DRM(takedown)( dev );
|
||||
}
|
||||
spin_unlock( &dev->count_lock );
|
||||
|
||||
unlock_kernel();
|
||||
return retcode;
|
||||
}
|
||||
|
||||
/* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm.
|
||||
*/
|
||||
int DRM(ioctl)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_ioctl_desc_t *ioctl;
|
||||
drm_ioctl_t *func;
|
||||
int nr = DRM_IOCTL_NR(cmd);
|
||||
int retcode = 0;
|
||||
|
||||
atomic_inc( &dev->ioctl_count );
|
||||
atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
|
||||
++priv->ioctl_count;
|
||||
|
||||
DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%x, auth=%d\n",
|
||||
current->pid, cmd, nr, dev->device, priv->authenticated );
|
||||
|
||||
if ( nr >= DRIVER_IOCTL_COUNT ) {
|
||||
retcode = -EINVAL;
|
||||
} else {
|
||||
ioctl = &DRM(ioctls)[nr];
|
||||
func = ioctl->func;
|
||||
|
||||
if ( !func ) {
|
||||
DRM_DEBUG( "no function\n" );
|
||||
retcode = -EINVAL;
|
||||
} else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
|
||||
( ioctl->auth_needed && !priv->authenticated ) ) {
|
||||
retcode = -EACCES;
|
||||
} else {
|
||||
retcode = func( inode, filp, cmd, arg );
|
||||
}
|
||||
}
|
||||
|
||||
atomic_dec( &dev->ioctl_count );
|
||||
return retcode;
|
||||
}
|
||||
|
||||
int DRM(lock)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
DECLARE_WAITQUEUE( entry, current );
|
||||
drm_lock_t lock;
|
||||
int ret = 0;
|
||||
#if __HAVE_MULTIPLE_DMA_QUEUES
|
||||
drm_queue_t *q;
|
||||
#endif
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
cycles_t start;
|
||||
|
||||
dev->lck_start = start = get_cycles();
|
||||
#endif
|
||||
|
||||
if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( lock.context == DRM_KERNEL_CONTEXT ) {
|
||||
DRM_ERROR( "Process %d using kernel context %d\n",
|
||||
current->pid, lock.context );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
|
||||
lock.context, current->pid,
|
||||
dev->lock.hw_lock->lock, lock.flags );
|
||||
|
||||
#if __HAVE_DMA_QUEUE
|
||||
if ( lock.context < 0 )
|
||||
return -EINVAL;
|
||||
#elif __HAVE_MULTIPLE_DMA_QUEUES
|
||||
if ( lock.context < 0 || lock.context >= dev->queue_count )
|
||||
return -EINVAL;
|
||||
q = dev->queuelist[lock.context];
|
||||
#endif
|
||||
|
||||
#if __HAVE_DMA_FLUSH
|
||||
ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
|
||||
#endif
|
||||
if ( !ret ) {
|
||||
add_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
for (;;) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
if ( !dev->lock.hw_lock ) {
|
||||
/* Device has been unregistered */
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
|
||||
lock.context ) ) {
|
||||
dev->lock.pid = current->pid;
|
||||
dev->lock.lock_time = jiffies;
|
||||
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
|
||||
break; /* Got lock */
|
||||
}
|
||||
|
||||
/* Contention */
|
||||
schedule();
|
||||
if ( signal_pending( current ) ) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
current->state = TASK_RUNNING;
|
||||
remove_wait_queue( &dev->lock.lock_queue, &entry );
|
||||
}
|
||||
|
||||
#if __HAVE_DMA_FLUSH
|
||||
DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
|
||||
#endif
|
||||
|
||||
if ( !ret ) {
|
||||
sigemptyset( &dev->sigmask );
|
||||
sigaddset( &dev->sigmask, SIGSTOP );
|
||||
sigaddset( &dev->sigmask, SIGTSTP );
|
||||
sigaddset( &dev->sigmask, SIGTTIN );
|
||||
sigaddset( &dev->sigmask, SIGTTOU );
|
||||
dev->sigdata.context = lock.context;
|
||||
dev->sigdata.lock = dev->lock.hw_lock;
|
||||
block_all_signals( DRM(notifier),
|
||||
&dev->sigdata, &dev->sigmask );
|
||||
|
||||
#if __HAVE_DMA_READY
|
||||
if ( lock.flags & _DRM_LOCK_READY ) {
|
||||
DRIVER_DMA_READY();
|
||||
}
|
||||
#endif
|
||||
#if __HAVE_DMA_QUIESCENT
|
||||
if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
|
||||
DRIVER_DMA_QUIESCENT();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
atomic_inc(&dev->histo.lacq[DRM(histogram_slot)(get_cycles()-start)]);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int DRM(unlock)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_lock_t lock;
|
||||
|
||||
if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
|
||||
return -EFAULT;
|
||||
|
||||
if ( lock.context == DRM_KERNEL_CONTEXT ) {
|
||||
DRM_ERROR( "Process %d using kernel context %d\n",
|
||||
current->pid, lock.context );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
|
||||
|
||||
DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT );
|
||||
#if __HAVE_DMA_SCHEDULE
|
||||
DRM(dma_schedule)( dev, 1 );
|
||||
#endif
|
||||
|
||||
/* FIXME: Do we ever really need to check this???
|
||||
*/
|
||||
if ( 1 /* !dev->context_flag */ ) {
|
||||
if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
|
||||
DRM_KERNEL_CONTEXT ) ) {
|
||||
DRM_ERROR( "\n" );
|
||||
}
|
||||
}
|
||||
|
||||
unblock_all_signals();
|
||||
return 0;
|
||||
}
|
||||
210
linux/drm_ioctl.h
Normal file
210
linux/drm_ioctl.h
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
/* drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*-
|
||||
* Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#include "drmP.h"
|
||||
|
||||
int DRM(irq_busid)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_irq_busid_t p;
|
||||
struct pci_dev *dev;
|
||||
|
||||
if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p)))
|
||||
return -EFAULT;
|
||||
dev = pci_find_slot(p.busnum, PCI_DEVFN(p.devnum, p.funcnum));
|
||||
if (dev) p.irq = dev->irq;
|
||||
else p.irq = 0;
|
||||
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
|
||||
p.busnum, p.devnum, p.funcnum, p.irq);
|
||||
if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(getunique)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_unique_t u;
|
||||
|
||||
if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u)))
|
||||
return -EFAULT;
|
||||
if (u.unique_len >= dev->unique_len) {
|
||||
if (copy_to_user(u.unique, dev->unique, dev->unique_len))
|
||||
return -EFAULT;
|
||||
}
|
||||
u.unique_len = dev->unique_len;
|
||||
if (copy_to_user((drm_unique_t *)arg, &u, sizeof(u)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(setunique)(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_unique_t u;
|
||||
|
||||
if (dev->unique_len || dev->unique)
|
||||
return -EBUSY;
|
||||
|
||||
if (copy_from_user(&u, (drm_unique_t *)arg, sizeof(u)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!u.unique_len)
|
||||
return -EINVAL;
|
||||
|
||||
dev->unique_len = u.unique_len;
|
||||
dev->unique = DRM(alloc)(u.unique_len + 1, DRM_MEM_DRIVER);
|
||||
if (copy_from_user(dev->unique, u.unique, dev->unique_len))
|
||||
return -EFAULT;
|
||||
dev->unique[dev->unique_len] = '\0';
|
||||
|
||||
dev->devname = DRM(alloc)(strlen(dev->name) + strlen(dev->unique) + 2,
|
||||
DRM_MEM_DRIVER);
|
||||
sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int DRM(getmap)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_map_t map;
|
||||
drm_map_list_t *r_list = NULL;
|
||||
struct list_head *list;
|
||||
int idx;
|
||||
int i;
|
||||
|
||||
if (copy_from_user(&map, (drm_map_t *)arg, sizeof(map)))
|
||||
return -EFAULT;
|
||||
idx = map.offset;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
if (idx < 0 || idx >= dev->map_count) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
if(i == idx) {
|
||||
r_list = (drm_map_list_t *)list;
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
if(!r_list || !r_list->map) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
map.offset = r_list->map->offset;
|
||||
map.size = r_list->map->size;
|
||||
map.type = r_list->map->type;
|
||||
map.flags = r_list->map->flags;
|
||||
map.handle = r_list->map->handle;
|
||||
map.mtrr = r_list->map->mtrr;
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if (copy_to_user((drm_map_t *)arg, &map, sizeof(map))) return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(getclient)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_client_t client;
|
||||
drm_file_t *pt;
|
||||
int idx;
|
||||
int i;
|
||||
|
||||
if (copy_from_user(&client, (drm_client_t *)arg, sizeof(client)))
|
||||
return -EFAULT;
|
||||
idx = client.idx;
|
||||
down(&dev->struct_sem);
|
||||
for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next)
|
||||
;
|
||||
|
||||
if (!pt) {
|
||||
up(&dev->struct_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
client.auth = pt->authenticated;
|
||||
client.pid = pt->pid;
|
||||
client.uid = pt->uid;
|
||||
client.magic = pt->magic;
|
||||
client.iocs = pt->ioctl_count;
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if (copy_to_user((drm_client_t *)arg, &client, sizeof(client)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(getstats)( struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg )
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_stats_t stats;
|
||||
int i;
|
||||
|
||||
memset(&stats, 0, sizeof(stats));
|
||||
|
||||
down(&dev->struct_sem);
|
||||
|
||||
for (i = 0; i < dev->counters; i++) {
|
||||
if (dev->types[i] == _DRM_STAT_LOCK)
|
||||
stats.data[i].value
|
||||
= (dev->lock.hw_lock
|
||||
? dev->lock.hw_lock->lock : 0);
|
||||
else
|
||||
stats.data[i].value = atomic_read(&dev->counts[i]);
|
||||
stats.data[i].type = dev->types[i];
|
||||
}
|
||||
|
||||
stats.count = dev->counters;
|
||||
|
||||
up(&dev->struct_sem);
|
||||
|
||||
if (copy_to_user((drm_stats_t *)arg, &stats, sizeof(stats)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
630
linux/drm_proc.h
Normal file
630
linux/drm_proc.h
Normal file
|
|
@ -0,0 +1,630 @@
|
|||
/* drm_proc.h -- /proc support for DRM -*- linux-c -*-
|
||||
* Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*
|
||||
* Acknowledgements:
|
||||
* Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
|
||||
* the problem with the proc files not outputting all their information.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#include "drmP.h"
|
||||
|
||||
static int DRM(name_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
static int DRM(vm_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
static int DRM(clients_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
static int DRM(queues_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
static int DRM(bufs_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
#if DRM_DEBUG_CODE
|
||||
static int DRM(vma_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
#endif
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
static int DRM(histo_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data);
|
||||
#endif
|
||||
|
||||
struct drm_proc_list {
|
||||
const char *name;
|
||||
int (*f)(char *, char **, off_t, int, int *, void *);
|
||||
} DRM(proc_list)[] = {
|
||||
{ "name", DRM(name_info) },
|
||||
{ "mem", DRM(mem_info) },
|
||||
{ "vm", DRM(vm_info) },
|
||||
{ "clients", DRM(clients_info) },
|
||||
{ "queues", DRM(queues_info) },
|
||||
{ "bufs", DRM(bufs_info) },
|
||||
#if DRM_DEBUG_CODE
|
||||
{ "vma", DRM(vma_info) },
|
||||
#endif
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
{ "histo", DRM(histo_info) },
|
||||
#endif
|
||||
};
|
||||
#define DRM_PROC_ENTRIES (sizeof(DRM(proc_list))/sizeof(DRM(proc_list)[0]))
|
||||
|
||||
struct proc_dir_entry *DRM(proc_init)(drm_device_t *dev, int minor,
|
||||
struct proc_dir_entry *root,
|
||||
struct proc_dir_entry **dev_root)
|
||||
{
|
||||
struct proc_dir_entry *ent;
|
||||
int i, j;
|
||||
char name[64];
|
||||
|
||||
if (!minor) root = create_proc_entry("dri", S_IFDIR, NULL);
|
||||
if (!root) {
|
||||
DRM_ERROR("Cannot create /proc/dri\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sprintf(name, "%d", minor);
|
||||
*dev_root = create_proc_entry(name, S_IFDIR, root);
|
||||
if (!*dev_root) {
|
||||
DRM_ERROR("Cannot create /proc/%s\n", name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
|
||||
ent = create_proc_entry(DRM(proc_list)[i].name,
|
||||
S_IFREG|S_IRUGO, *dev_root);
|
||||
if (!ent) {
|
||||
DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
|
||||
name, DRM(proc_list)[i].name);
|
||||
for (j = 0; j < i; j++)
|
||||
remove_proc_entry(DRM(proc_list)[i].name,
|
||||
*dev_root);
|
||||
remove_proc_entry(name, root);
|
||||
if (!minor) remove_proc_entry("dri", NULL);
|
||||
return NULL;
|
||||
}
|
||||
ent->read_proc = DRM(proc_list)[i].f;
|
||||
ent->data = dev;
|
||||
}
|
||||
|
||||
return root;
|
||||
}
|
||||
|
||||
|
||||
int DRM(proc_cleanup)(int minor, struct proc_dir_entry *root,
|
||||
struct proc_dir_entry *dev_root)
|
||||
{
|
||||
int i;
|
||||
char name[64];
|
||||
|
||||
if (!root || !dev_root) return 0;
|
||||
|
||||
for (i = 0; i < DRM_PROC_ENTRIES; i++)
|
||||
remove_proc_entry(DRM(proc_list)[i].name, dev_root);
|
||||
sprintf(name, "%d", minor);
|
||||
remove_proc_entry(name, root);
|
||||
if (!minor) remove_proc_entry("dri", NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int DRM(name_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
if (dev->unique) {
|
||||
DRM_PROC_PRINT("%s 0x%x %s\n",
|
||||
dev->name, dev->device, dev->unique);
|
||||
} else {
|
||||
DRM_PROC_PRINT("%s 0x%x\n", dev->name, dev->device);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(_vm_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
drm_map_t *map;
|
||||
drm_map_list_t *r_list;
|
||||
struct list_head *list;
|
||||
|
||||
/* Hardcoded from _DRM_FRAME_BUFFER,
|
||||
_DRM_REGISTERS, _DRM_SHM, and
|
||||
_DRM_AGP. */
|
||||
const char *types[] = { "FB", "REG", "SHM", "AGP" };
|
||||
const char *type;
|
||||
int i;
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT("slot offset size type flags "
|
||||
"address mtrr\n\n");
|
||||
i = 0;
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *)list;
|
||||
map = r_list->map;
|
||||
if(!map) continue;
|
||||
if (map->type < 0 || map->type > 3) type = "??";
|
||||
else type = types[map->type];
|
||||
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
|
||||
i,
|
||||
map->offset,
|
||||
map->size,
|
||||
type,
|
||||
map->flags,
|
||||
(unsigned long)map->handle);
|
||||
if (map->mtrr < 0) {
|
||||
DRM_PROC_PRINT("none\n");
|
||||
} else {
|
||||
DRM_PROC_PRINT("%4d\n", map->mtrr);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(vm_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_vm_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int DRM(_queues_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
int i;
|
||||
drm_queue_t *q;
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT(" ctx/flags use fin"
|
||||
" blk/rw/rwf wait flushed queued"
|
||||
" locks\n\n");
|
||||
for (i = 0; i < dev->queue_count; i++) {
|
||||
q = dev->queuelist[i];
|
||||
atomic_inc(&q->use_count);
|
||||
DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
|
||||
"%5d/0x%03x %5d %5d"
|
||||
" %5d/%c%c/%c%c%c %5Zd\n",
|
||||
i,
|
||||
q->flags,
|
||||
atomic_read(&q->use_count),
|
||||
atomic_read(&q->finalization),
|
||||
atomic_read(&q->block_count),
|
||||
atomic_read(&q->block_read) ? 'r' : '-',
|
||||
atomic_read(&q->block_write) ? 'w' : '-',
|
||||
waitqueue_active(&q->read_queue) ? 'r':'-',
|
||||
waitqueue_active(&q->write_queue) ? 'w':'-',
|
||||
waitqueue_active(&q->flush_queue) ? 'f':'-',
|
||||
DRM_BUFCOUNT(&q->waitlist));
|
||||
atomic_dec(&q->use_count);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(queues_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_queues_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* drm_bufs_info is called whenever a process reads
|
||||
/dev/dri/<dev>/bufs. */
|
||||
|
||||
static int DRM(_bufs_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
int i;
|
||||
|
||||
if (!dma || offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT(" o size count free segs pages kB\n\n");
|
||||
for (i = 0; i <= DRM_MAX_ORDER; i++) {
|
||||
if (dma->bufs[i].buf_count)
|
||||
DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
|
||||
i,
|
||||
dma->bufs[i].buf_size,
|
||||
dma->bufs[i].buf_count,
|
||||
atomic_read(&dma->bufs[i]
|
||||
.freelist.count),
|
||||
dma->bufs[i].seg_count,
|
||||
dma->bufs[i].seg_count
|
||||
*(1 << dma->bufs[i].page_order),
|
||||
(dma->bufs[i].seg_count
|
||||
* (1 << dma->bufs[i].page_order))
|
||||
* PAGE_SIZE / 1024);
|
||||
}
|
||||
DRM_PROC_PRINT("\n");
|
||||
for (i = 0; i < dma->buf_count; i++) {
|
||||
if (i && !(i%32)) DRM_PROC_PRINT("\n");
|
||||
DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
|
||||
}
|
||||
DRM_PROC_PRINT("\n");
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(bufs_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_bufs_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int DRM(_clients_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
drm_file_t *priv;
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
|
||||
for (priv = dev->file_first; priv; priv = priv->next) {
|
||||
DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
|
||||
priv->authenticated ? 'y' : 'n',
|
||||
priv->minor,
|
||||
priv->pid,
|
||||
priv->uid,
|
||||
priv->magic,
|
||||
priv->ioctl_count);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(clients_info)(char *buf, char **start, off_t offset,
|
||||
int request, int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_clients_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if DRM_DEBUG_CODE
|
||||
|
||||
#define DRM_VMA_VERBOSE 0
|
||||
|
||||
static int DRM(_vma_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
drm_vma_entry_t *pt;
|
||||
struct vm_area_struct *vma;
|
||||
#if DRM_VMA_VERBOSE
|
||||
unsigned long i;
|
||||
unsigned long address;
|
||||
pgd_t *pgd;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
#endif
|
||||
#if defined(__i386__)
|
||||
unsigned int pgprot;
|
||||
#endif
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
|
||||
atomic_read(&dev->vma_count),
|
||||
high_memory, virt_to_phys(high_memory));
|
||||
for (pt = dev->vmalist; pt; pt = pt->next) {
|
||||
if (!(vma = pt->vma)) continue;
|
||||
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
|
||||
pt->pid,
|
||||
vma->vm_start,
|
||||
vma->vm_end,
|
||||
vma->vm_flags & VM_READ ? 'r' : '-',
|
||||
vma->vm_flags & VM_WRITE ? 'w' : '-',
|
||||
vma->vm_flags & VM_EXEC ? 'x' : '-',
|
||||
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
|
||||
vma->vm_flags & VM_LOCKED ? 'l' : '-',
|
||||
vma->vm_flags & VM_IO ? 'i' : '-',
|
||||
VM_OFFSET(vma));
|
||||
|
||||
#if defined(__i386__)
|
||||
pgprot = pgprot_val(vma->vm_page_prot);
|
||||
DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
|
||||
pgprot & _PAGE_PRESENT ? 'p' : '-',
|
||||
pgprot & _PAGE_RW ? 'w' : 'r',
|
||||
pgprot & _PAGE_USER ? 'u' : 's',
|
||||
pgprot & _PAGE_PWT ? 't' : 'b',
|
||||
pgprot & _PAGE_PCD ? 'u' : 'c',
|
||||
pgprot & _PAGE_ACCESSED ? 'a' : '-',
|
||||
pgprot & _PAGE_DIRTY ? 'd' : '-',
|
||||
pgprot & _PAGE_PSE ? 'm' : 'k',
|
||||
pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
|
||||
#endif
|
||||
DRM_PROC_PRINT("\n");
|
||||
#if 0
|
||||
for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
|
||||
pgd = pgd_offset(vma->vm_mm, i);
|
||||
pmd = pmd_offset(pgd, i);
|
||||
pte = pte_offset(pmd, i);
|
||||
if (pte_present(*pte)) {
|
||||
address = __pa(pte_page(*pte))
|
||||
+ (i & (PAGE_SIZE-1));
|
||||
DRM_PROC_PRINT(" 0x%08lx -> 0x%08lx"
|
||||
" %c%c%c%c%c\n",
|
||||
i,
|
||||
address,
|
||||
pte_read(*pte) ? 'r' : '-',
|
||||
pte_write(*pte) ? 'w' : '-',
|
||||
pte_exec(*pte) ? 'x' : '-',
|
||||
pte_dirty(*pte) ? 'd' : '-',
|
||||
pte_young(*pte) ? 'a' : '-' );
|
||||
} else {
|
||||
DRM_PROC_PRINT(" 0x%08lx\n", i);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(vma_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_vma_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if __HAVE_DMA_HISTOGRAM
|
||||
static int DRM(_histo_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int len = 0;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
int i;
|
||||
unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
|
||||
unsigned long prev_value = 0;
|
||||
drm_buf_t *buffer;
|
||||
|
||||
if (offset > DRM_PROC_LIMIT) {
|
||||
*eof = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
*start = &buf[offset];
|
||||
*eof = 0;
|
||||
|
||||
DRM_PROC_PRINT("general statistics:\n");
|
||||
DRM_PROC_PRINT("total %10u\n", atomic_read(&dev->histo.total));
|
||||
DRM_PROC_PRINT("open %10u\n",
|
||||
atomic_read(&dev->counts[_DRM_STAT_OPENS]));
|
||||
DRM_PROC_PRINT("close %10u\n",
|
||||
atomic_read(&dev->counts[_DRM_STAT_CLOSES]));
|
||||
DRM_PROC_PRINT("ioctl %10u\n",
|
||||
atomic_read(&dev->counts[_DRM_STAT_IOCTLS]));
|
||||
|
||||
DRM_PROC_PRINT("\nlock statistics:\n");
|
||||
DRM_PROC_PRINT("locks %10u\n",
|
||||
atomic_read(&dev->counts[_DRM_STAT_LOCKS]));
|
||||
DRM_PROC_PRINT("unlocks %10u\n",
|
||||
atomic_read(&dev->counts[_DRM_STAT_UNLOCKS]));
|
||||
|
||||
if (dma) {
|
||||
#if 0
|
||||
DRM_PROC_PRINT("\ndma statistics:\n");
|
||||
DRM_PROC_PRINT("prio %10u\n",
|
||||
atomic_read(&dma->total_prio));
|
||||
DRM_PROC_PRINT("bytes %10u\n",
|
||||
atomic_read(&dma->total_bytes));
|
||||
DRM_PROC_PRINT("dmas %10u\n",
|
||||
atomic_read(&dma->total_dmas));
|
||||
DRM_PROC_PRINT("missed:\n");
|
||||
DRM_PROC_PRINT(" dma %10u\n",
|
||||
atomic_read(&dma->total_missed_dma));
|
||||
DRM_PROC_PRINT(" lock %10u\n",
|
||||
atomic_read(&dma->total_missed_lock));
|
||||
DRM_PROC_PRINT(" free %10u\n",
|
||||
atomic_read(&dma->total_missed_free));
|
||||
DRM_PROC_PRINT(" sched %10u\n",
|
||||
atomic_read(&dma->total_missed_sched));
|
||||
DRM_PROC_PRINT("tried %10u\n",
|
||||
atomic_read(&dma->total_tried));
|
||||
DRM_PROC_PRINT("hit %10u\n",
|
||||
atomic_read(&dma->total_hit));
|
||||
DRM_PROC_PRINT("lost %10u\n",
|
||||
atomic_read(&dma->total_lost));
|
||||
#endif
|
||||
|
||||
buffer = dma->next_buffer;
|
||||
if (buffer) {
|
||||
DRM_PROC_PRINT("next_buffer %7d\n", buffer->idx);
|
||||
} else {
|
||||
DRM_PROC_PRINT("next_buffer none\n");
|
||||
}
|
||||
buffer = dma->this_buffer;
|
||||
if (buffer) {
|
||||
DRM_PROC_PRINT("this_buffer %7d\n", buffer->idx);
|
||||
} else {
|
||||
DRM_PROC_PRINT("this_buffer none\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DRM_PROC_PRINT("\nvalues:\n");
|
||||
if (dev->lock.hw_lock) {
|
||||
DRM_PROC_PRINT("lock 0x%08x\n",
|
||||
dev->lock.hw_lock->lock);
|
||||
} else {
|
||||
DRM_PROC_PRINT("lock none\n");
|
||||
}
|
||||
DRM_PROC_PRINT("context_flag 0x%08lx\n", dev->context_flag);
|
||||
DRM_PROC_PRINT("interrupt_flag 0x%08lx\n", dev->interrupt_flag);
|
||||
DRM_PROC_PRINT("dma_flag 0x%08lx\n", dev->dma_flag);
|
||||
|
||||
DRM_PROC_PRINT("queue_count %10d\n", dev->queue_count);
|
||||
DRM_PROC_PRINT("last_context %10d\n", dev->last_context);
|
||||
DRM_PROC_PRINT("last_switch %10lu\n", dev->last_switch);
|
||||
DRM_PROC_PRINT("last_checked %10d\n", dev->last_checked);
|
||||
|
||||
|
||||
DRM_PROC_PRINT("\n q2d d2c c2f"
|
||||
" q2c q2f dma sch"
|
||||
" ctx lacq lhld\n\n");
|
||||
for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
|
||||
DRM_PROC_PRINT("%s %10lu %10u %10u %10u %10u %10u"
|
||||
" %10u %10u %10u %10u %10u\n",
|
||||
i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
|
||||
i == DRM_DMA_HISTOGRAM_SLOTS - 1
|
||||
? prev_value : slot_value ,
|
||||
|
||||
atomic_read(&dev->histo
|
||||
.queued_to_dispatched[i]),
|
||||
atomic_read(&dev->histo
|
||||
.dispatched_to_completed[i]),
|
||||
atomic_read(&dev->histo
|
||||
.completed_to_freed[i]),
|
||||
|
||||
atomic_read(&dev->histo
|
||||
.queued_to_completed[i]),
|
||||
atomic_read(&dev->histo
|
||||
.queued_to_freed[i]),
|
||||
atomic_read(&dev->histo.dma[i]),
|
||||
atomic_read(&dev->histo.schedule[i]),
|
||||
atomic_read(&dev->histo.ctx[i]),
|
||||
atomic_read(&dev->histo.lacq[i]),
|
||||
atomic_read(&dev->histo.lhld[i]));
|
||||
prev_value = slot_value;
|
||||
slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
|
||||
}
|
||||
|
||||
if (len > request + offset) return request;
|
||||
*eof = 1;
|
||||
return len - offset;
|
||||
}
|
||||
|
||||
static int DRM(histo_info)(char *buf, char **start, off_t offset, int request,
|
||||
int *eof, void *data)
|
||||
{
|
||||
drm_device_t *dev = (drm_device_t *)data;
|
||||
int ret;
|
||||
|
||||
down(&dev->struct_sem);
|
||||
ret = DRM(_histo_info)(buf, start, offset, request, eof, data);
|
||||
up(&dev->struct_sem);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
416
linux/drm_vm.h
Normal file
416
linux/drm_vm.h
Normal file
|
|
@ -0,0 +1,416 @@
|
|||
/* drm_vm.h -- Memory mapping for DRM -*- linux-c -*-
|
||||
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
#include "drmP.h"
|
||||
|
||||
struct vm_operations_struct drm_vm_ops = {
|
||||
nopage: DRM(vm_nopage),
|
||||
open: DRM(vm_open),
|
||||
close: DRM(vm_close),
|
||||
};
|
||||
|
||||
struct vm_operations_struct drm_vm_shm_ops = {
|
||||
nopage: DRM(vm_shm_nopage),
|
||||
open: DRM(vm_open),
|
||||
close: DRM(vm_shm_close),
|
||||
};
|
||||
|
||||
struct vm_operations_struct drm_vm_dma_ops = {
|
||||
nopage: DRM(vm_dma_nopage),
|
||||
open: DRM(vm_open),
|
||||
close: DRM(vm_close),
|
||||
};
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020317
|
||||
unsigned long DRM(vm_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#else
|
||||
/* Return type changed in 2.3.23 */
|
||||
struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#endif
|
||||
{
|
||||
return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020317
|
||||
unsigned long DRM(vm_shm_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#else
|
||||
/* Return type changed in 2.3.23 */
|
||||
struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#endif
|
||||
{
|
||||
#if LINUX_VERSION_CODE >= 0x020300
|
||||
drm_map_t *map = (drm_map_t *)vma->vm_private_data;
|
||||
#else
|
||||
drm_map_t *map = (drm_map_t *)vma->vm_pte;
|
||||
#endif
|
||||
unsigned long physical;
|
||||
unsigned long offset;
|
||||
unsigned long i;
|
||||
pgd_t *pgd;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!map) return NOPAGE_OOM; /* Nothing allocated */
|
||||
|
||||
offset = address - vma->vm_start;
|
||||
i = (unsigned long)map->handle + offset;
|
||||
/* We have to walk page tables here because we need large SAREA's, and
|
||||
* they need to be virtually contigious in kernel space.
|
||||
*/
|
||||
pgd = pgd_offset_k( i );
|
||||
if( !pgd_present( *pgd ) ) return NOPAGE_OOM;
|
||||
pmd = pmd_offset( pgd, i );
|
||||
if( !pmd_present( *pmd ) ) return NOPAGE_OOM;
|
||||
pte = pte_offset( pmd, i );
|
||||
if( !pte_present( *pte ) ) return NOPAGE_OOM;
|
||||
physical = (unsigned long)pte_page( *pte )->virtual;
|
||||
atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
|
||||
|
||||
DRM_DEBUG("0x%08lx => 0x%08lx\n", address, physical);
|
||||
#if LINUX_VERSION_CODE < 0x020317
|
||||
return physical;
|
||||
#else
|
||||
return virt_to_page(physical);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Special close routine which deletes map information if we are the last
|
||||
* person to close a mapping and its not in the global maplist.
|
||||
*/
|
||||
|
||||
void DRM(vm_shm_close)(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = vma->vm_file->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_vma_entry_t *pt, *prev;
|
||||
drm_map_t *map;
|
||||
drm_map_list_t *r_list;
|
||||
struct list_head *list;
|
||||
int found_maps = 0;
|
||||
|
||||
DRM_DEBUG("0x%08lx,0x%08lx\n",
|
||||
vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
#if LINUX_VERSION_CODE < 0x020333
|
||||
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
|
||||
#endif
|
||||
atomic_dec(&dev->vma_count);
|
||||
|
||||
#if LINUX_VERSION_CODE >= 0x020300
|
||||
map = vma->vm_private_data;
|
||||
#else
|
||||
map = vma->vm_pte;
|
||||
#endif
|
||||
|
||||
down(&dev->struct_sem);
|
||||
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
|
||||
#if LINUX_VERSION_CODE >= 0x020300
|
||||
if (pt->vma->vm_private_data == map) found_maps++;
|
||||
#else
|
||||
if (pt->vma->vm_pte == map) found_maps++;
|
||||
#endif
|
||||
if (pt->vma == vma) {
|
||||
if (prev) {
|
||||
prev->next = pt->next;
|
||||
} else {
|
||||
dev->vmalist = pt->next;
|
||||
}
|
||||
DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
|
||||
}
|
||||
}
|
||||
/* We were the only map that was found */
|
||||
if(found_maps == 1 &&
|
||||
map->flags & _DRM_REMOVABLE) {
|
||||
/* Check to see if we are in the maplist, if we are not, then
|
||||
* we delete this mappings information.
|
||||
*/
|
||||
found_maps = 0;
|
||||
list = &dev->maplist->head;
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *) list;
|
||||
if (r_list->map == map) found_maps++;
|
||||
}
|
||||
|
||||
if(!found_maps) {
|
||||
switch (map->type) {
|
||||
case _DRM_REGISTERS:
|
||||
case _DRM_FRAME_BUFFER:
|
||||
#if __REALLY_HAVE_MTRR
|
||||
if (map->mtrr >= 0) {
|
||||
int retcode;
|
||||
retcode = mtrr_del(map->mtrr,
|
||||
map->offset,
|
||||
map->size);
|
||||
DRM_DEBUG("mtrr_del = %d\n", retcode);
|
||||
}
|
||||
#endif
|
||||
DRM(ioremapfree)(map->handle, map->size);
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vfree(map->handle);
|
||||
break;
|
||||
case _DRM_AGP:
|
||||
break;
|
||||
}
|
||||
DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
|
||||
}
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
}
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020317
|
||||
unsigned long DRM(vm_dma_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#else
|
||||
/* Return type changed in 2.3.23 */
|
||||
struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
|
||||
unsigned long address,
|
||||
int write_access)
|
||||
#endif
|
||||
{
|
||||
drm_file_t *priv = vma->vm_file->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_device_dma_t *dma = dev->dma;
|
||||
unsigned long physical;
|
||||
unsigned long offset;
|
||||
unsigned long page;
|
||||
|
||||
if (!dma) return NOPAGE_SIGBUS; /* Error */
|
||||
if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
|
||||
if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
|
||||
|
||||
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
|
||||
page = offset >> PAGE_SHIFT;
|
||||
physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
|
||||
atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
|
||||
|
||||
DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
|
||||
#if LINUX_VERSION_CODE < 0x020317
|
||||
return physical;
|
||||
#else
|
||||
return virt_to_page(physical);
|
||||
#endif
|
||||
}
|
||||
|
||||
void DRM(vm_open)(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = vma->vm_file->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_vma_entry_t *vma_entry;
|
||||
|
||||
DRM_DEBUG("0x%08lx,0x%08lx\n",
|
||||
vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
atomic_inc(&dev->vma_count);
|
||||
#if LINUX_VERSION_CODE < 0x020333
|
||||
/* The map can exist after the fd is closed. */
|
||||
MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
|
||||
#endif
|
||||
|
||||
vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
|
||||
if (vma_entry) {
|
||||
down(&dev->struct_sem);
|
||||
vma_entry->vma = vma;
|
||||
vma_entry->next = dev->vmalist;
|
||||
vma_entry->pid = current->pid;
|
||||
dev->vmalist = vma_entry;
|
||||
up(&dev->struct_sem);
|
||||
}
|
||||
}
|
||||
|
||||
void DRM(vm_close)(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = vma->vm_file->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_vma_entry_t *pt, *prev;
|
||||
|
||||
DRM_DEBUG("0x%08lx,0x%08lx\n",
|
||||
vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
#if LINUX_VERSION_CODE < 0x020333
|
||||
MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
|
||||
#endif
|
||||
atomic_dec(&dev->vma_count);
|
||||
|
||||
down(&dev->struct_sem);
|
||||
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
|
||||
if (pt->vma == vma) {
|
||||
if (prev) {
|
||||
prev->next = pt->next;
|
||||
} else {
|
||||
dev->vmalist = pt->next;
|
||||
}
|
||||
DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
|
||||
break;
|
||||
}
|
||||
}
|
||||
up(&dev->struct_sem);
|
||||
}
|
||||
|
||||
int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev;
|
||||
drm_device_dma_t *dma;
|
||||
unsigned long length = vma->vm_end - vma->vm_start;
|
||||
|
||||
lock_kernel();
|
||||
dev = priv->dev;
|
||||
dma = dev->dma;
|
||||
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
|
||||
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
|
||||
|
||||
/* Length must match exact page count */
|
||||
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
|
||||
unlock_kernel();
|
||||
return -EINVAL;
|
||||
}
|
||||
unlock_kernel();
|
||||
|
||||
vma->vm_ops = &drm_vm_dma_ops;
|
||||
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
|
||||
/* In Linux 2.2.3 and above, this is
|
||||
handled in do_mmap() in mm/mmap.c. */
|
||||
++filp->f_count;
|
||||
#endif
|
||||
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
||||
DRM(vm_open)(vma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->dev;
|
||||
drm_map_t *map = NULL;
|
||||
drm_map_list_t *r_list;
|
||||
struct list_head *list;
|
||||
|
||||
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
|
||||
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
|
||||
|
||||
if (!VM_OFFSET(vma)) return DRM(mmap_dma)(filp, vma);
|
||||
|
||||
/* A sequential search of a linked list is
|
||||
fine here because: 1) there will only be
|
||||
about 5-10 entries in the list and, 2) a
|
||||
DRI client only has to do this mapping
|
||||
once, so it doesn't have to be optimized
|
||||
for performance, even if the list was a
|
||||
bit longer. */
|
||||
list_for_each(list, &dev->maplist->head) {
|
||||
r_list = (drm_map_list_t *)list;
|
||||
map = r_list->map;
|
||||
if (!map) continue;
|
||||
if (map->offset == VM_OFFSET(vma)) break;
|
||||
}
|
||||
|
||||
if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
|
||||
return -EPERM;
|
||||
|
||||
/* Check for valid size. */
|
||||
if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
|
||||
vma->vm_flags &= VM_MAYWRITE;
|
||||
#if defined(__i386__)
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
|
||||
#else
|
||||
/* Ye gads this is ugly. With more thought
|
||||
we could move this up higher and use
|
||||
`protection_map' instead. */
|
||||
vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
|
||||
__pte(pgprot_val(vma->vm_page_prot)))));
|
||||
#endif
|
||||
}
|
||||
|
||||
switch (map->type) {
|
||||
case _DRM_FRAME_BUFFER:
|
||||
case _DRM_REGISTERS:
|
||||
case _DRM_AGP:
|
||||
if (VM_OFFSET(vma) >= __pa(high_memory)) {
|
||||
#if defined(__i386__)
|
||||
if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
|
||||
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
|
||||
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
|
||||
}
|
||||
#elif defined(__ia64__)
|
||||
if (map->type != _DRM_AGP)
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
#endif
|
||||
vma->vm_flags |= VM_IO; /* not in core dump */
|
||||
}
|
||||
if (remap_page_range(vma->vm_start,
|
||||
VM_OFFSET(vma),
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
|
||||
" offset = 0x%lx\n",
|
||||
map->type,
|
||||
vma->vm_start, vma->vm_end, VM_OFFSET(vma));
|
||||
vma->vm_ops = &drm_vm_ops;
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
vma->vm_ops = &drm_vm_shm_ops;
|
||||
#if LINUX_VERSION_CODE >= 0x020300
|
||||
vma->vm_private_data = (void *)map;
|
||||
#else
|
||||
vma->vm_pte = (unsigned long)map;
|
||||
#endif
|
||||
/* Don't let this area swap. Change when
|
||||
DRM_KERNEL advisory is supported. */
|
||||
vma->vm_flags |= VM_LOCKED;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL; /* This should never happen. */
|
||||
}
|
||||
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
|
||||
|
||||
#if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
|
||||
/* In Linux 2.2.3 and above, this is
|
||||
handled in do_mmap() in mm/mmap.c. */
|
||||
++filp->f_count;
|
||||
#endif
|
||||
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
||||
DRM(vm_open)(vma);
|
||||
return 0;
|
||||
}
|
||||
93
linux/gamma.h
Normal file
93
linux/gamma.h
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
/* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*-
|
||||
* Created: Mon Jan 4 08:58:31 1999 by gareth@valinux.com
|
||||
*
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#ifndef __GAMMA_H__
|
||||
#define __GAMMA_H__
|
||||
|
||||
/* This remains constant for all DRM template files.
|
||||
*/
|
||||
#define DRM(x) gamma_##x
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
#define __HAVE_MTRR 1
|
||||
|
||||
/* DMA customization:
|
||||
*/
|
||||
#define __HAVE_DMA 1
|
||||
#define __HAVE_OLD_DMA 1
|
||||
#define __HAVE_PCI_DMA 1
|
||||
|
||||
#define __HAVE_MULTIPLE_DMA_QUEUES 1
|
||||
#define __HAVE_DMA_WAITQUEUE 1
|
||||
|
||||
#define __HAVE_DMA_WAITLIST 1
|
||||
#define __HAVE_DMA_FREELIST 1
|
||||
|
||||
#define __HAVE_DMA_FLUSH 1
|
||||
#define __HAVE_DMA_SCHEDULE 1
|
||||
|
||||
#define __HAVE_DMA_READY 1
|
||||
#define DRIVER_DMA_READY() do { \
|
||||
gamma_dma_ready(dev); \
|
||||
} while (0)
|
||||
|
||||
#define __HAVE_DMA_QUIESCENT 1
|
||||
#define DRIVER_DMA_QUIESCENT() do { \
|
||||
/* FIXME ! */ \
|
||||
gamma_dma_quiescent_dual(dev); \
|
||||
return 0; \
|
||||
} while (0)
|
||||
|
||||
#define __HAVE_DMA_IRQ 1
|
||||
#define __HAVE_DMA_IRQ_BH 1
|
||||
#define DRIVER_PREINSTALL() do { \
|
||||
drm_gamma_private_t *dev_priv = \
|
||||
(drm_gamma_private_t *)dev->dev_private;\
|
||||
GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000000 ); \
|
||||
GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 ); \
|
||||
} while (0)
|
||||
|
||||
#define DRIVER_POSTINSTALL() do { \
|
||||
drm_gamma_private_t *dev_priv = \
|
||||
(drm_gamma_private_t *)dev->dev_private;\
|
||||
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 ); \
|
||||
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 ); \
|
||||
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 ); \
|
||||
} while (0)
|
||||
|
||||
#define DRIVER_UNINSTALL() do { \
|
||||
drm_gamma_private_t *dev_priv = \
|
||||
(drm_gamma_private_t *)dev->dev_private;\
|
||||
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 ); \
|
||||
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 ); \
|
||||
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 ); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __GAMMA_H__ */
|
||||
112
linux/i810.h
Normal file
112
linux/i810.h
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
/* i810.h -- Intel i810/i815 DRM template customization -*- linux-c -*-
|
||||
* Created: Thu Feb 15 00:01:12 2001 by gareth@valinux.com
|
||||
*
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
#ifndef __I810_H__
|
||||
#define __I810_H__
|
||||
|
||||
/* This remains constant for all DRM template files.
|
||||
*/
|
||||
#define DRM(x) i810_##x
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
#define __HAVE_AGP 1
|
||||
#define __MUST_HAVE_AGP 1
|
||||
#define __HAVE_MTRR 1
|
||||
#define __HAVE_CTX_BITMAP 1
|
||||
|
||||
/* Driver customization:
|
||||
*/
|
||||
#define __HAVE_RELEASE 1
|
||||
#define DRIVER_RELEASE() do { \
|
||||
i810_reclaim_buffers( dev, priv->pid ); \
|
||||
} while (0)
|
||||
|
||||
/* DMA customization:
|
||||
*/
|
||||
#define __HAVE_DMA 1
|
||||
#define __HAVE_DMA_QUEUE 1
|
||||
#define __HAVE_DMA_WAITLIST 1
|
||||
#define __HAVE_DMA_RECLAIM 1
|
||||
|
||||
#define __HAVE_DMA_QUIESCENT 1
|
||||
#define DRIVER_DMA_QUIESCENT() do { \
|
||||
i810_dma_quiescent( dev ); \
|
||||
} while (0)
|
||||
|
||||
#define __HAVE_DMA_IRQ 1
|
||||
#define __HAVE_DMA_IRQ_BH 1
|
||||
#define DRIVER_PREINSTALL() do { \
|
||||
drm_i810_private_t *dev_priv = \
|
||||
(drm_i810_private_t *)dev->dev_private; \
|
||||
u16 tmp; \
|
||||
tmp = I810_READ16( I810REG_HWSTAM ); \
|
||||
tmp = tmp & 0x6000; \
|
||||
I810_WRITE16( I810REG_HWSTAM, tmp ); \
|
||||
\
|
||||
tmp = I810_READ16( I810REG_INT_MASK_R ); \
|
||||
tmp = tmp & 0x6000; /* Unmask interrupts */ \
|
||||
I810_WRITE16( I810REG_INT_MASK_R, tmp ); \
|
||||
tmp = I810_READ16( I810REG_INT_ENABLE_R ); \
|
||||
tmp = tmp & 0x6000; /* Disable all interrupts */ \
|
||||
I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \
|
||||
} while (0)
|
||||
|
||||
#define DRIVER_POSTINSTALL() do { \
|
||||
drm_i810_private_t *dev_priv = \
|
||||
(drm_i810_private_t *)dev->dev_private; \
|
||||
u16 tmp; \
|
||||
tmp = I810_READ16( I810REG_INT_ENABLE_R ); \
|
||||
tmp = tmp & 0x6000; \
|
||||
tmp = tmp | 0x0003; /* Enable bp & user interrupts */ \
|
||||
I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \
|
||||
} while (0)
|
||||
|
||||
#define DRIVER_UNINSTALL() do { \
|
||||
drm_i810_private_t *dev_priv = \
|
||||
(drm_i810_private_t *)dev->dev_private; \
|
||||
u16 tmp; \
|
||||
tmp = I810_READ16( I810REG_INT_IDENTITY_R ); \
|
||||
tmp = tmp & ~(0x6000); /* Clear all interrupts */ \
|
||||
if ( tmp != 0 ) I810_WRITE16( I810REG_INT_IDENTITY_R, tmp ); \
|
||||
\
|
||||
tmp = I810_READ16( I810REG_INT_ENABLE_R ); \
|
||||
tmp = tmp & 0x6000; /* Disable all interrupts */ \
|
||||
I810_WRITE16( I810REG_INT_ENABLE_R, tmp ); \
|
||||
} while (0)
|
||||
|
||||
/* Buffer customization:
|
||||
*/
|
||||
|
||||
#define DRIVER_BUF_PRIV_T drm_i810_buf_priv_t
|
||||
|
||||
#define DRIVER_AGP_BUFFERS_MAP( dev ) \
|
||||
((drm_i810_private_t *)((dev)->dev_private))->buffer_map
|
||||
|
||||
#endif
|
||||
|
|
@ -621,6 +621,7 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev )
|
|||
static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
||||
{
|
||||
drm_radeon_private_t *dev_priv;
|
||||
drm_radeon_depth_clear_t *depth_clear;
|
||||
int i;
|
||||
|
||||
dev_priv = drm_alloc( sizeof(drm_radeon_private_t), DRM_MEM_DRIVER );
|
||||
|
|
@ -705,32 +706,31 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
|
|||
* all values to prevent unwanted 3D state from slipping through
|
||||
* and screwing with the clear operation.
|
||||
*/
|
||||
dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
|
||||
RADEON_Z_ENABLE |
|
||||
(dev_priv->color_fmt << 10) |
|
||||
RADEON_ZBLOCK16);
|
||||
depth_clear = &dev_priv->depth_clear;
|
||||
|
||||
dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt |
|
||||
RADEON_Z_TEST_ALWAYS |
|
||||
RADEON_STENCIL_TEST_ALWAYS |
|
||||
RADEON_STENCIL_S_FAIL_KEEP |
|
||||
RADEON_STENCIL_ZPASS_KEEP |
|
||||
RADEON_STENCIL_ZFAIL_KEEP |
|
||||
RADEON_Z_WRITE_ENABLE);
|
||||
depth_clear->rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
|
||||
(dev_priv->color_fmt << 10) |
|
||||
RADEON_ZBLOCK16);
|
||||
|
||||
dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
|
||||
RADEON_BFACE_SOLID |
|
||||
RADEON_FFACE_SOLID |
|
||||
RADEON_FLAT_SHADE_VTX_LAST |
|
||||
depth_clear->rb3d_zstencilcntl = (dev_priv->depth_fmt |
|
||||
RADEON_Z_TEST_ALWAYS |
|
||||
RADEON_STENCIL_TEST_ALWAYS |
|
||||
RADEON_STENCIL_FAIL_REPLACE |
|
||||
RADEON_STENCIL_ZPASS_REPLACE |
|
||||
RADEON_STENCIL_ZFAIL_REPLACE |
|
||||
RADEON_Z_WRITE_ENABLE);
|
||||
|
||||
RADEON_DIFFUSE_SHADE_FLAT |
|
||||
RADEON_ALPHA_SHADE_FLAT |
|
||||
RADEON_SPECULAR_SHADE_FLAT |
|
||||
RADEON_FOG_SHADE_FLAT |
|
||||
|
||||
RADEON_VTX_PIX_CENTER_OGL |
|
||||
RADEON_ROUND_MODE_TRUNC |
|
||||
RADEON_ROUND_PREC_8TH_PIX);
|
||||
depth_clear->se_cntl = (RADEON_FFACE_CULL_CW |
|
||||
RADEON_BFACE_SOLID |
|
||||
RADEON_FFACE_SOLID |
|
||||
RADEON_FLAT_SHADE_VTX_LAST |
|
||||
RADEON_DIFFUSE_SHADE_FLAT |
|
||||
RADEON_ALPHA_SHADE_FLAT |
|
||||
RADEON_SPECULAR_SHADE_FLAT |
|
||||
RADEON_FOG_SHADE_FLAT |
|
||||
RADEON_VTX_PIX_CENTER_OGL |
|
||||
RADEON_ROUND_MODE_TRUNC |
|
||||
RADEON_ROUND_PREC_8TH_PIX);
|
||||
|
||||
/* FIXME: We want multiple shared areas, including one shared
|
||||
* only by the X Server and kernel module.
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@
|
|||
#define RADEON_FRONT 0x1
|
||||
#define RADEON_BACK 0x2
|
||||
#define RADEON_DEPTH 0x4
|
||||
|
||||
#define RADEON_STENCIL 0x8
|
||||
|
||||
/* Vertex/indirect buffer size
|
||||
*/
|
||||
|
|
@ -141,6 +141,8 @@ typedef struct {
|
|||
unsigned int se_vport_yoffset;
|
||||
unsigned int se_vport_zscale;
|
||||
unsigned int se_vport_zoffset;
|
||||
unsigned int se_zbias_factor;
|
||||
unsigned int se_zbias_constant;
|
||||
} drm_radeon_viewport_state_t;
|
||||
|
||||
/* Setup state */
|
||||
|
|
@ -212,12 +214,12 @@ typedef struct {
|
|||
/* Space is crucial; there is some redunancy here:
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned int start;
|
||||
unsigned int finish;
|
||||
unsigned int start;
|
||||
unsigned int finish;
|
||||
unsigned int prim:8;
|
||||
unsigned int stateidx:8;
|
||||
unsigned int numverts:16; /* overloaded as offset/64 for elt prims */
|
||||
unsigned int vc_format;
|
||||
unsigned int vc_format;
|
||||
} drm_radeon_prim_t;
|
||||
|
||||
/* Really need a bigger area to hold the primitive and state data:
|
||||
|
|
@ -304,6 +306,7 @@ typedef struct drm_radeon_clear {
|
|||
unsigned int clear_color;
|
||||
unsigned int clear_colormask;
|
||||
unsigned int clear_depth;
|
||||
unsigned int clear_stencil;
|
||||
union {
|
||||
float f[5];
|
||||
unsigned int ui[5];
|
||||
|
|
|
|||
|
|
@ -326,9 +326,9 @@ extern int radeon_context_switch_complete(drm_device_t *dev, int new);
|
|||
# define RADEON_Z_TEST_MASK (7 << 4)
|
||||
# define RADEON_Z_TEST_ALWAYS (7 << 4)
|
||||
# define RADEON_STENCIL_TEST_ALWAYS (7 << 12)
|
||||
# define RADEON_STENCIL_S_FAIL_KEEP (0 << 16)
|
||||
# define RADEON_STENCIL_ZPASS_KEEP (0 << 20)
|
||||
# define RADEON_STENCIL_ZFAIL_KEEP (0 << 20)
|
||||
# define RADEON_STENCIL_FAIL_REPLACE (2 << 16)
|
||||
# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20)
|
||||
# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24)
|
||||
# define RADEON_Z_WRITE_ENABLE (1 << 30)
|
||||
#define RADEON_RBBM_SOFT_RESET 0x00f0
|
||||
# define RADEON_SOFT_RESET_CP (1 << 0)
|
||||
|
|
@ -705,6 +705,11 @@ do { \
|
|||
write &= mask; \
|
||||
} while (0)
|
||||
|
||||
#define OUT_RING_REG( reg, val ) do { \
|
||||
OUT_RING( CP_PACKET0( reg, 0 ) ); \
|
||||
OUT_RING( val ); \
|
||||
} while (0)
|
||||
|
||||
#define RADEON_PERFORMANCE_BOXES 0
|
||||
|
||||
#endif /* __RADEON_DRV_H__ */
|
||||
|
|
|
|||
|
|
@ -23,9 +23,8 @@
|
|||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Kevin E. Martin <martin@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*
|
||||
* Kevin E. Martin <martin@valinux.com>
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
|
@ -141,15 +140,17 @@ static inline void radeon_emit_viewport( drm_radeon_private_t *dev_priv,
|
|||
RING_LOCALS;
|
||||
DRM_DEBUG( " %s\n", __FUNCTION__ );
|
||||
|
||||
BEGIN_RING( 7 );
|
||||
BEGIN_RING( 9 );
|
||||
|
||||
OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) );
|
||||
OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 7 ) );
|
||||
OUT_RING( viewport->se_vport_xscale );
|
||||
OUT_RING( viewport->se_vport_xoffset );
|
||||
OUT_RING( viewport->se_vport_yscale );
|
||||
OUT_RING( viewport->se_vport_yoffset );
|
||||
OUT_RING( viewport->se_vport_zscale );
|
||||
OUT_RING( viewport->se_vport_zoffset );
|
||||
OUT_RING( viewport->se_zbias_factor );
|
||||
OUT_RING( viewport->se_zbias_constant );
|
||||
|
||||
ADVANCE_RING();
|
||||
}
|
||||
|
|
@ -440,12 +441,14 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
|
||||
drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
|
||||
int nbox = sarea_priv->nbox;
|
||||
drm_clip_rect_t *pbox = sarea_priv->boxes;
|
||||
unsigned int flags = clear->flags;
|
||||
u32 rb3d_cntl = 0, rb3d_stencilrefmask= 0;
|
||||
int i;
|
||||
RING_LOCALS;
|
||||
DRM_DEBUG( "%s\n", __FUNCTION__ );
|
||||
DRM_DEBUG( __FUNCTION__": flags = 0x%x\n", flags );
|
||||
|
||||
radeon_update_ring_snapshot( dev_priv );
|
||||
|
||||
|
|
@ -457,6 +460,32 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
if ( tmp & RADEON_BACK ) flags |= RADEON_FRONT;
|
||||
}
|
||||
|
||||
/* We have to clear the depth and/or stencil buffers by
|
||||
* rendering a quad into just those buffers. Thus, we have to
|
||||
* make sure the 3D engine is configured correctly.
|
||||
*/
|
||||
if ( flags & (RADEON_DEPTH | RADEON_STENCIL) ) {
|
||||
if ( sarea_priv->state[0].dirty ) {
|
||||
radeon_emit_state( dev_priv,
|
||||
&sarea_priv->state[0] );
|
||||
}
|
||||
rb3d_cntl = depth_clear->rb3d_cntl;
|
||||
|
||||
if ( flags & RADEON_DEPTH ) {
|
||||
rb3d_cntl |= RADEON_Z_ENABLE;
|
||||
} else {
|
||||
rb3d_cntl &= ~RADEON_Z_ENABLE;
|
||||
}
|
||||
|
||||
if ( flags & RADEON_STENCIL ) {
|
||||
rb3d_cntl |= RADEON_STENCIL_ENABLE;
|
||||
rb3d_stencilrefmask = clear->clear_stencil;
|
||||
} else {
|
||||
rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
|
||||
rb3d_stencilrefmask = 0x00000000;
|
||||
}
|
||||
}
|
||||
|
||||
for ( i = 0 ; i < nbox ; i++ ) {
|
||||
int x = pbox[i].x1;
|
||||
int y = pbox[i].y1;
|
||||
|
|
@ -522,34 +551,28 @@ static void radeon_cp_dispatch_clear( drm_device_t *dev,
|
|||
OUT_RING( (w << 16) | h );
|
||||
|
||||
ADVANCE_RING();
|
||||
|
||||
}
|
||||
|
||||
if ( flags & RADEON_DEPTH ) {
|
||||
drm_radeon_depth_clear_t *depth_clear =
|
||||
&dev_priv->depth_clear;
|
||||
|
||||
if ( sarea_priv->state[0].dirty ) {
|
||||
radeon_emit_state( dev_priv,
|
||||
&sarea_priv->state[0] );
|
||||
}
|
||||
|
||||
/* FIXME: Render a rectangle to clear the depth
|
||||
* buffer. So much for those "fast Z clears"...
|
||||
if ( flags & (RADEON_DEPTH | RADEON_STENCIL) ) {
|
||||
/* FIXME: Emit cliprect...
|
||||
*/
|
||||
BEGIN_RING( 23 );
|
||||
|
||||
BEGIN_RING( 25 );
|
||||
|
||||
RADEON_WAIT_UNTIL_2D_IDLE();
|
||||
|
||||
OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) );
|
||||
OUT_RING( 0x00000000 );
|
||||
OUT_RING( depth_clear->rb3d_cntl );
|
||||
OUT_RING( CP_PACKET0( RADEON_RB3D_ZSTENCILCNTL, 0 ) );
|
||||
OUT_RING( depth_clear->rb3d_zstencilcntl );
|
||||
OUT_RING( CP_PACKET0( RADEON_RB3D_PLANEMASK, 0 ) );
|
||||
OUT_RING( 0x00000000 );
|
||||
OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) );
|
||||
OUT_RING( depth_clear->se_cntl );
|
||||
OUT_RING( rb3d_cntl );
|
||||
|
||||
OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL,
|
||||
depth_clear->rb3d_zstencilcntl );
|
||||
OUT_RING_REG( RADEON_RB3D_STENCILREFMASK,
|
||||
rb3d_stencilrefmask );
|
||||
OUT_RING_REG( RADEON_RB3D_PLANEMASK,
|
||||
0x00000000 );
|
||||
OUT_RING_REG( RADEON_SE_CNTL,
|
||||
depth_clear->se_cntl );
|
||||
|
||||
OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 10 ) );
|
||||
OUT_RING( RADEON_VTX_Z_PRESENT );
|
||||
|
|
@ -722,7 +745,7 @@ static void radeon_cp_dispatch_vertex( drm_device_t *dev,
|
|||
int i = 0;
|
||||
RING_LOCALS;
|
||||
|
||||
DRM_DEBUG( "%s: nbox=%d %d..%d prim %x nvert %d\n",
|
||||
DRM_DEBUG( "%s: nbox=%d %d..%d prim %x nvert %d\n",
|
||||
__FUNCTION__, sarea_priv->nbox,
|
||||
prim->start, prim->finish, prim->prim,
|
||||
numverts);
|
||||
|
|
@ -730,7 +753,7 @@ static void radeon_cp_dispatch_vertex( drm_device_t *dev,
|
|||
radeon_update_ring_snapshot( dev_priv );
|
||||
|
||||
buf_priv->dispatched = 1;
|
||||
|
||||
|
||||
do {
|
||||
/* Emit the next set of up to three cliprects */
|
||||
if ( i < sarea_priv->nbox ) {
|
||||
|
|
@ -791,7 +814,7 @@ static void radeon_cp_dispatch_indirect( drm_device_t *dev,
|
|||
buf->idx, start, end );
|
||||
|
||||
radeon_update_ring_snapshot( dev_priv );
|
||||
|
||||
|
||||
if ( start != end ) {
|
||||
int offset = (dev_priv->agp_buffers_offset
|
||||
+ buf->offset + start);
|
||||
|
|
@ -842,20 +865,20 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
|
|||
/* offset); */
|
||||
|
||||
radeon_update_ring_snapshot( dev_priv );
|
||||
|
||||
|
||||
if ( start < prim->finish ) {
|
||||
buf_priv->dispatched = 1;
|
||||
|
||||
dwords = (prim->finish - prim->start + 3) / sizeof(u32);
|
||||
|
||||
data = (u32 *)((char *)dev_priv->buffers->handle +
|
||||
data = (u32 *)((char *)dev_priv->buffers->handle +
|
||||
elt_buf->offset + prim->start);
|
||||
|
||||
data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 );
|
||||
data[1] = offset;
|
||||
data[2] = RADEON_MAX_VB_VERTS;
|
||||
data[3] = prim->vc_format;
|
||||
data[4] = (prim->prim |
|
||||
data[4] = (prim->prim |
|
||||
RADEON_PRIM_WALK_IND |
|
||||
RADEON_COLOR_ORDER_RGBA |
|
||||
RADEON_VTX_FMT_RADEON_MODE |
|
||||
|
|
@ -872,7 +895,7 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
|
|||
/* printk("data[3]: %x\n", data[3]); */
|
||||
/* printk("data[4]: %x\n", data[4]); */
|
||||
/* for (i = 0 ; i < 10 ; i++) */
|
||||
/* printk("%d: %d\n", i, ((short *)(data+5))[i]); */
|
||||
/* printk("%d: %d\n", i, ((short *)(data+5))[i]); */
|
||||
/* printk("...\n"); */
|
||||
|
||||
if (1)
|
||||
|
|
@ -883,8 +906,8 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
|
|||
&sarea_priv->boxes[i] );
|
||||
}
|
||||
|
||||
radeon_cp_dispatch_indirect( dev, elt_buf,
|
||||
prim->start,
|
||||
radeon_cp_dispatch_indirect( dev, elt_buf,
|
||||
prim->start,
|
||||
prim->finish );
|
||||
|
||||
i++;
|
||||
|
|
@ -1157,11 +1180,11 @@ int radeon_cp_vertex( struct inode *inode, struct file *filp,
|
|||
}
|
||||
|
||||
if ( prim->stateidx != 0xff ) {
|
||||
radeon_emit_state( dev_priv,
|
||||
radeon_emit_state( dev_priv,
|
||||
&sarea_priv->state[prim->stateidx] );
|
||||
}
|
||||
|
||||
if ( prim->finish <= prim->start )
|
||||
if ( prim->finish <= prim->start )
|
||||
continue;
|
||||
|
||||
if ( prim->start & 0x7 ) {
|
||||
|
|
@ -1174,7 +1197,7 @@ int radeon_cp_vertex( struct inode *inode, struct file *filp,
|
|||
} else {
|
||||
radeon_cp_dispatch_vertex( dev, buf, prim );
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
if (vertex.discard) {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue