Port the Linux mga driver to BSD. This has been lightly tested with one

G400 card in an Intel BX motherboard and requires the AGP driver which
    I just committed to FreeBSD-current.
This commit is contained in:
Doug Rabson 2000-06-09 17:16:10 +00:00
parent 91c45d3e8c
commit 85dba8ea02
9 changed files with 4406 additions and 0 deletions

16
bsd-core/mga/Makefile Normal file
View file

@ -0,0 +1,16 @@
# $FreeBSD$
KMOD = mga
SRCS = mga_drv.c mga_context.c mga_state.c mga_bufs.c mga_dma.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS} -I.. -DDRM_AGP
KERN = /usr/src/sys
KMODDEPS = drm
@:
ln -sf /sys @
machine:
ln -sf /sys/i386/include machine
.include <bsd.kmod.mk>

16
bsd/mga/Makefile Normal file
View file

@ -0,0 +1,16 @@
# $FreeBSD$
KMOD = mga
SRCS = mga_drv.c mga_context.c mga_state.c mga_bufs.c mga_dma.c
SRCS += device_if.h bus_if.h pci_if.h
CFLAGS += ${DEBUG_FLAGS} -I.. -DDRM_AGP
KERN = /usr/src/sys
KMODDEPS = drm
@:
ln -sf /sys @
machine:
ln -sf /sys/i386/include machine
.include <bsd.kmod.mk>

604
bsd/mga/mga_bufs.c Normal file
View file

@ -0,0 +1,604 @@
/* mga_bufs.c -- IOCTLs to manage buffers
* Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86$
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "mga_drv.h"
#include <sys/mman.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_extern.h>
#include <vm/vm_map.h>
static int
mga_addbufs_agp(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
if (!dma) return EINVAL;
request = *(drm_buf_desc_t *) data;
count = request.count;
order = drm_order(request.size);
size = 1 << order;
agp_offset = request.agp_start;
alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) :size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %ld\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
DRM_DEBUG("byte_count: %d\n", byte_count);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
if (dev->queue_count) return EBUSY; /* Not while in use */
simple_lock(&dev->count_lock);
if (dev->buf_use) {
simple_unlock(&dev->count_lock);
return EBUSY;
}
atomic_inc(&dev->buf_alloc);
simple_unlock(&dev->count_lock);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
entry = &dma->bufs[order];
if (entry->buf_count) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
while(entry->buf_count < count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
DRM_DEBUG("offset : %ld\n", offset);
buf->offset = offset; /* Hrm */
buf->bus_address = dev->agp->base + agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->agp->base);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
buf->dma_wait = 0;
buf->pid = 0;
buf->dev_private = drm_alloc(sizeof(drm_mga_buf_priv_t), DRM_MEM_BUFS);
buf->dev_priv_size = sizeof(drm_mga_buf_priv_t);
#if DRM_DMA_HISTOGRAM
timespecclear(&buf->time_queued);
timespecclear(&buf->time_dispatched);
timespecclear(&buf->time_completed);
timespecclear(&buf->time_freed);
#endif
offset = offset + alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
dma->byte_count += byte_count;
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
request.count = entry->buf_count;
request.size = size;
*(drm_buf_desc_t *) data = request;
atomic_dec(&dev->buf_alloc);
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %ld\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
DRM_DEBUG("byte_count: %d\n", byte_count);
dma->flags = _DRM_DMA_USE_AGP;
DRM_DEBUG("dma->flags : %x\n", dma->flags);
return 0;
}
static int
mga_addbufs_pci(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int count;
int order;
int size;
int total;
int page_order;
drm_buf_entry_t *entry;
unsigned long page;
drm_buf_t *buf;
int alignment;
unsigned long offset;
int i;
int byte_count;
int page_count;
if (!dma) return EINVAL;
request = *(drm_buf_desc_t *) data;
count = request.count;
order = drm_order(request.size);
size = 1 << order;
DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
request.count, request.size, size, order, dev->queue_count);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
if (dev->queue_count) return EBUSY; /* Not while in use */
alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) :size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
simple_lock(&dev->count_lock);
if (dev->buf_use) {
simple_unlock(&dev->count_lock);
return EBUSY;
}
atomic_inc(&dev->buf_alloc);
simple_unlock(&dev->count_lock);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
entry = &dma->bufs[order];
if (entry->buf_count) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM; /* May only call once for each order */
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
DRM_MEM_SEGS);
if (!entry->seglist) {
drm_free(entry->buflist,
count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
atomic_dec(&dev->buf_alloc);
return ENOMEM;
}
memset(entry->seglist, 0, count * sizeof(*entry->seglist));
dma->pagelist = drm_realloc(dma->pagelist,
dma->page_count * sizeof(*dma->pagelist),
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES);
DRM_DEBUG("pagelist: %d entries\n",
dma->page_count + (count << page_order));
entry->buf_size = size;
entry->page_order = page_order;
byte_count = 0;
page_count = 0;
while (entry->buf_count < count) {
if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
entry->seglist[entry->seg_count++] = page;
for (i = 0; i < (1 << page_order); i++) {
DRM_DEBUG("page %d @ 0x%08lx\n",
dma->page_count + page_count,
page + PAGE_SIZE * i);
dma->pagelist[dma->page_count + page_count++]
= page + PAGE_SIZE * i;
}
for (offset = 0;
offset + size <= total && entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + byte_count + offset);
buf->address = (void *)(page + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
buf->dma_wait = 0;
buf->pid = 0;
#if DRM_DMA_HISTOGRAM
timespecclear(&buf->time_queued);
timespecclear(&buf->time_dispatched);
timespecclear(&buf->time_completed);
timespecclear(&buf->time_freed);
#endif
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
byte_count += PAGE_SIZE << page_order;
}
dma->buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist),
DRM_MEM_BUFS);
for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
dma->buflist[i] = &entry->buflist[i - dma->buf_count];
dma->buf_count += entry->buf_count;
dma->seg_count += entry->seg_count;
dma->page_count += entry->seg_count << page_order;
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
drm_freelist_create(&entry->freelist, entry->buf_count);
for (i = 0; i < entry->buf_count; i++) {
drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
request.count = entry->buf_count;
request.size = size;
*(drm_buf_desc_t *) data = request;
atomic_dec(&dev->buf_alloc);
return 0;
}
int
mga_addbufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_buf_desc_t request;
request = *(drm_buf_desc_t *) data;
if(request.flags & _DRM_AGP_BUFFER)
return mga_addbufs_agp(kdev, cmd, data, flags, p);
else
return mga_addbufs_pci(kdev, cmd, data, flags, p);
}
int
mga_infobufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_info_t request;
int i;
int count;
int error;
if (!dma) return EINVAL;
simple_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
simple_unlock(&dev->count_lock);
return EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
simple_unlock(&dev->count_lock);
request = *(drm_buf_info_t *) data;
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) ++count;
}
DRM_DEBUG("count = %d\n", count);
if (request.count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
if (dma->bufs[i].buf_count) {
error = copyout(&dma->bufs[i].buf_count,
&request.list[count].count,
sizeof(dma->bufs[0]
.buf_count));
if (error) return error;
error = copyout(&dma->bufs[i].buf_size,
&request.list[count].size,
sizeof(dma->bufs[0]
.buf_size));
if (error) return error;
error = copyout(&dma->bufs[i]
.freelist.low_mark,
&request.list[count].low_mark,
sizeof(dma->bufs[0]
.freelist.low_mark));
if (error) return error;
error = copyout(&dma->bufs[i]
.freelist.high_mark,
&request.list[count].high_mark,
sizeof(dma->bufs[0]
.freelist.high_mark));
if (error) return error;
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
dma->bufs[i].freelist.low_mark,
dma->bufs[i].freelist.high_mark);
++count;
}
}
}
request.count = count;
*(drm_buf_info_t *) data = request;
return 0;
}
int
mga_markbufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int order;
drm_buf_entry_t *entry;
if (!dma) return EINVAL;
request = *(drm_buf_desc_t *) data;
DRM_DEBUG("%d, %d, %d\n",
request.size, request.low_mark, request.high_mark);
order = drm_order(request.size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
entry = &dma->bufs[order];
if (request.low_mark < 0 || request.low_mark > entry->buf_count)
return EINVAL;
if (request.high_mark < 0 || request.high_mark > entry->buf_count)
return EINVAL;
entry->freelist.low_mark = request.low_mark;
entry->freelist.high_mark = request.high_mark;
return 0;
}
int
mga_freebufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
drm_buf_free_t request;
int i;
int error;
int idx;
drm_buf_t *buf;
if (!dma) return EINVAL;
request = *(drm_buf_free_t *) data;
DRM_DEBUG("%d\n", request.count);
for (i = 0; i < request.count; i++) {
error = copyin(&request.list[i],
&idx,
sizeof(idx));
if (error) return error;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
return EINVAL;
}
buf = dma->buflist[idx];
if (buf->pid != p->p_pid) {
DRM_ERROR("Process %d freeing buffer owned by %d\n",
p->p_pid, buf->pid);
return EINVAL;
}
drm_free_buffer(dev, buf);
}
return 0;
}
int
mga_mapbufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
vm_offset_t virtual;
vm_offset_t address;
drm_buf_map_t request;
int i;
if (!dma) return EINVAL;
DRM_DEBUG("\n");
simple_lock(&dev->count_lock);
if (atomic_read(&dev->buf_alloc)) {
simple_unlock(&dev->count_lock);
DRM_DEBUG("Busy\n");
return EBUSY;
}
++dev->buf_use; /* Can't allocate more after this call */
simple_unlock(&dev->count_lock);
request = *(drm_buf_map_t *) data;
DRM_DEBUG("mga_mapbufs\n");
DRM_DEBUG("dma->flags : %x\n", dma->flags);
if (request.count >= dma->buf_count) {
if(dma->flags & _DRM_DMA_USE_AGP) {
drm_mga_private_t *dev_priv = dev->dev_private;
drm_map_t *map = NULL;
map = dev->maplist[dev_priv->buffer_map_idx];
if (!map) {
DRM_DEBUG("map is null\n");
retcode = EINVAL;
goto done;
}
DRM_DEBUG("map->offset : %lx\n", map->offset);
DRM_DEBUG("map->size : %lx\n", map->size);
DRM_DEBUG("map->type : %d\n", map->type);
DRM_DEBUG("map->flags : %x\n", map->flags);
DRM_DEBUG("map->handle : %p\n", map->handle);
DRM_DEBUG("map->mtrr : %d\n", map->mtrr);
virtual = 0;
retcode = vm_mmap(&p->p_vmspace->vm_map,
&virtual,
map->size,
PROT_READ|PROT_WRITE, VM_PROT_ALL,
MAP_SHARED,
SLIST_FIRST(&kdev->si_hlist),
map->offset);
} else {
virtual = 0;
retcode = vm_mmap(&p->p_vmspace->vm_map,
&virtual,
round_page(dma->byte_count),
PROT_READ|PROT_WRITE, VM_PROT_ALL,
MAP_SHARED,
SLIST_FIRST(&kdev->si_hlist),
0);
}
if (retcode) {
/* Real error */
DRM_DEBUG("mmap error\n");
goto done;
}
request.virtual = (void *)virtual;
for (i = 0; i < dma->buf_count; i++) {
retcode = copyout(&dma->buflist[i]->idx,
&request.list[i].idx,
sizeof(request.list[0].idx));
if (retcode) goto done;
retcode = copyout(&dma->buflist[i]->total,
&request.list[i].total,
sizeof(request.list[0].total));
if (retcode) goto done;
retcode = copyout(&zero,
&request.list[i].used,
sizeof(request.list[0].used));
if (retcode) goto done;
address = virtual + dma->buflist[i]->offset;
retcode = copyout(&address,
&request.list[i].address,
sizeof(address));
if (retcode) goto done;
}
}
done:
request.count = dma->buf_count;
DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
*(drm_buf_map_t *) data = request;
DRM_DEBUG("retcode : %d\n", retcode);
return retcode;
}

196
bsd/mga/mga_context.c Normal file
View file

@ -0,0 +1,196 @@
/* mga_context.c -- IOCTLs for mga contexts
* Created: Mon Dec 13 09:51:35 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Rickard E. (Rik) Faith <faith@precisioninsight.com>
*
* $XFree86$
*
*/
#define __NO_VERSION__
#include "drmP.h"
#include "mga_drv.h"
static int mga_alloc_queue(drm_device_t *dev)
{
int temp = drm_ctxbitmap_next(dev);
DRM_DEBUG("mga_alloc_queue: %d\n", temp);
return temp;
}
int mga_context_switch(drm_device_t *dev, int old, int new)
{
char buf[64];
atomic_inc(&dev->total_ctx);
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return EBUSY;
}
#if DRM_DMA_HISTOGRAM
getnanotime(&dev->ctx_start);
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
if (drm_flags & DRM_FLAG_NOCTX) {
mga_context_switch_complete(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
drm_write_string(dev, buf);
}
return 0;
}
int mga_context_switch_complete(drm_device_t *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = ticks;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
#if DRM_DMA_HISTOGRAM
{
struct timespec ts;
getnanotime(&ts);
timespecsub(&ts, &dev->lck_start);
atomic_inc(&dev->histo.ctx[drm_histogram_slot(&ts)]);
}
#endif
clear_bit(0, &dev->context_flag);
wakeup(&dev->context_wait);
return 0;
}
int
mga_resctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i, error;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
res = *(drm_ctx_res_t *) data;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
error = copyout(&i, &res.contexts[i], sizeof(i));
if (error) return error;
}
}
res.count = DRM_RESERVED_CONTEXTS;
*(drm_ctx_res_t *) data = res;
return 0;
}
int
mga_addctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
if ((ctx.handle = mga_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx.handle = mga_alloc_queue(dev);
}
if (ctx.handle == -1) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return EBUSY instead? */
return ENOMEM;
}
DRM_DEBUG("%d\n", ctx.handle);
*(drm_ctx_t *) data = ctx;
return 0;
}
int
mga_modctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
/* This does nothing for the mga */
return 0;
}
int mga_getctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
/* This is 0, because we don't hanlde any context flags */
ctx.flags = 0;
*(drm_ctx_t *) data = ctx;
return 0;
}
int mga_switchctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
return mga_context_switch(dev, dev->last_context, ctx.handle);
}
int mga_newctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
mga_context_switch_complete(dev, ctx.handle);
return 0;
}
int mga_rmctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_ctx_t ctx;
ctx = *(drm_ctx_t *) data;
DRM_DEBUG("%d\n", ctx.handle);
if(ctx.handle != DRM_KERNEL_CONTEXT) {
drm_ctxbitmap_free(dev, ctx.handle);
}
return 0;
}

1109
bsd/mga/mga_dma.c Normal file

File diff suppressed because it is too large Load diff

269
bsd/mga/mga_drm.h Normal file
View file

@ -0,0 +1,269 @@
/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
* Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Jeff Hartmann <jhartmann@precisioninsight.com>
* Keith Whitwell <keithw@precisioninsight.com>
*
* $XFree86$
*/
#ifndef _MGA_DRM_H_
#define _MGA_DRM_H_
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmMga.h)
*/
#ifndef _MGA_DEFINES_
#define _MGA_DEFINES_
#define MGA_F 0x1 /* fog */
#define MGA_A 0x2 /* alpha */
#define MGA_S 0x4 /* specular */
#define MGA_T2 0x8 /* multitexture */
#define MGA_WARP_TGZ 0
#define MGA_WARP_TGZF (MGA_F)
#define MGA_WARP_TGZA (MGA_A)
#define MGA_WARP_TGZAF (MGA_F|MGA_A)
#define MGA_WARP_TGZS (MGA_S)
#define MGA_WARP_TGZSF (MGA_S|MGA_F)
#define MGA_WARP_TGZSA (MGA_S|MGA_A)
#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
#define MGA_WARP_T2GZ (MGA_T2)
#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
#define MGA_MAX_G400_PIPES 16
#define MGA_MAX_G200_PIPES 8 /* no multitex */
#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
#define MGA_CARD_TYPE_G200 1
#define MGA_CARD_TYPE_G400 2
#define MGA_FRONT 0x1
#define MGA_BACK 0x2
#define MGA_DEPTH 0x4
/* 3d state excluding texture units:
*/
#define MGA_CTXREG_DSTORG 0 /* validated */
#define MGA_CTXREG_MACCESS 1
#define MGA_CTXREG_PLNWT 2
#define MGA_CTXREG_DWGCTL 3
#define MGA_CTXREG_ALPHACTRL 4
#define MGA_CTXREG_FOGCOLOR 5
#define MGA_CTXREG_WFLAG 6
#define MGA_CTXREG_TDUAL0 7
#define MGA_CTXREG_TDUAL1 8
#define MGA_CTXREG_FCOL 9
#define MGA_CTX_SETUP_SIZE 10
/* 2d state
*/
#define MGA_2DREG_PITCH 0
#define MGA_2D_SETUP_SIZE 1
/* Each texture unit has a state:
*/
#define MGA_TEXREG_CTL 0
#define MGA_TEXREG_CTL2 1
#define MGA_TEXREG_FILTER 2
#define MGA_TEXREG_BORDERCOL 3
#define MGA_TEXREG_ORG 4 /* validated */
#define MGA_TEXREG_ORG1 5
#define MGA_TEXREG_ORG2 6
#define MGA_TEXREG_ORG3 7
#define MGA_TEXREG_ORG4 8
#define MGA_TEXREG_WIDTH 9
#define MGA_TEXREG_HEIGHT 10
#define MGA_TEX_SETUP_SIZE 11
/* What needs to be changed for the current vertex dma buffer?
*/
#define MGA_UPLOAD_CTX 0x1
#define MGA_UPLOAD_TEX0 0x2
#define MGA_UPLOAD_TEX1 0x4
#define MGA_UPLOAD_PIPE 0x8
#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
#define MGA_UPLOAD_2D 0x40
#define MGA_WAIT_AGE 0x80 /* handled client-side */
#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
quiescent */
/* 32 buffers of 64k each, total 2 meg.
*/
#define MGA_DMA_BUF_ORDER 16
#define MGA_DMA_BUF_SZ (1<<MGA_DMA_BUF_ORDER)
#define MGA_DMA_BUF_NR 31
/* Keep these small for testing.
*/
#define MGA_NR_SAREA_CLIPRECTS 8
/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
* regions, subject to a minimum region size of (1<<16) == 64k.
*
* Clients may subdivide regions internally, but when sharing between
* clients, the region size is the minimum granularity.
*/
#define MGA_CARD_HEAP 0
#define MGA_AGP_HEAP 1
#define MGA_NR_TEX_HEAPS 2
#define MGA_NR_TEX_REGIONS 16
#define MGA_LOG_MIN_TEX_REGION_SIZE 16
#endif
typedef struct _drm_mga_warp_index {
int installed;
unsigned long phys_addr;
int size;
} drm_mga_warp_index_t;
typedef struct drm_mga_init {
enum {
MGA_INIT_DMA = 0x01,
MGA_CLEANUP_DMA = 0x02
} func;
int reserved_map_agpstart;
int reserved_map_idx;
int buffer_map_idx;
int sarea_priv_offset;
int primary_size;
int warp_ucode_size;
unsigned int frontOffset;
unsigned int backOffset;
unsigned int depthOffset;
unsigned int textureOffset;
unsigned int textureSize;
unsigned int agpTextureOffset;
unsigned int agpTextureSize;
unsigned int cpp;
unsigned int stride;
int sgram;
int chipset;
drm_mga_warp_index_t WarpIndex[MGA_MAX_WARP_PIPES];
unsigned int mAccess;
} drm_mga_init_t;
/* Warning: if you change the sarea structure, you must change the Xserver
* structures as well */
typedef struct _drm_mga_tex_region {
unsigned char next, prev;
unsigned char in_use;
unsigned int age;
} drm_mga_tex_region_t;
typedef struct _drm_mga_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex dma buffer.
*/
unsigned int ContextState[MGA_CTX_SETUP_SIZE];
unsigned int ServerState[MGA_2D_SETUP_SIZE];
unsigned int TexState[2][MGA_TEX_SETUP_SIZE];
unsigned int WarpPipe;
unsigned int dirty;
unsigned int nbox;
drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS];
/* Information about the most recently used 3d drawable. The
* client fills in the req_* fields, the server fills in the
* exported_ fields and puts the cliprects into boxes, above.
*
* The client clears the exported_drawable field before
* clobbering the boxes data.
*/
unsigned int req_drawable; /* the X drawable id */
unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
unsigned int exported_drawable;
unsigned int exported_index;
unsigned int exported_stamp;
unsigned int exported_buffers;
unsigned int exported_nfront;
unsigned int exported_nback;
int exported_back_x, exported_front_x, exported_w;
int exported_back_y, exported_front_y, exported_h;
drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS];
/* Counters for aging textures and for client-side throttling.
*/
unsigned int last_enqueue; /* last time a buffer was enqueued */
unsigned int last_dispatch; /* age of the most recently dispatched buffer */
unsigned int last_quiescent; /* */
/* LRU lists for texture memory in agp space and on the card
*/
drm_mga_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS+1];
unsigned int texAge[MGA_NR_TEX_HEAPS];
/* Mechanism to validate card state.
*/
int ctxOwner;
} drm_mga_sarea_t;
/* Device specific ioctls:
*/
typedef struct _drm_mga_clear {
unsigned int clear_color;
unsigned int clear_depth;
unsigned int flags;
} drm_mga_clear_t;
typedef struct _drm_mga_swap {
int dummy;
} drm_mga_swap_t;
typedef struct _drm_mga_iload {
int idx;
int length;
unsigned int destOrg;
} drm_mga_iload_t;
typedef struct _drm_mga_vertex {
int idx; /* buffer to queue */
int used; /* bytes in use */
int discard; /* client finished with buffer? */
} drm_mga_vertex_t;
typedef struct _drm_mga_indices {
int idx; /* buffer to queue */
unsigned int start;
unsigned int end;
int discard; /* client finished with buffer? */
} drm_mga_indices_t;
#endif

713
bsd/mga/mga_drv.c Normal file
View file

@ -0,0 +1,713 @@
/* mga_drv.c -- Matrox g200/g400 driver
* Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86$
*
*/
#include "drmP.h"
#include "mga_drv.h"
#include <pci/pcivar.h>
MODULE_DEPEND(mga, drm, 1, 1, 1);
MODULE_DEPEND(mga, agp, 1, 1, 1);
#define MGA_NAME "mga"
#define MGA_DESC "Matrox g200/g400"
#define MGA_DATE "19991213"
#define MGA_MAJOR 0
#define MGA_MINOR 0
#define MGA_PATCHLEVEL 1
drm_ctx_t mga_res_ctx;
static int mga_probe(device_t dev)
{
const char *s = 0;
switch (pci_get_devid(dev)) {
case 0x0525102b:
s = "Matrox MGA G400 AGP graphics accelerator";
break;
case 0x0521102b:
s = "Matrox MGA G200 AGP graphics accelerator";
break;
}
if (s) {
device_set_desc(dev, s);
return 0;
}
return ENXIO;
}
static int mga_attach(device_t dev)
{
return mga_init(dev);
}
static int mga_detach(device_t dev)
{
mga_cleanup(dev);
return 0;
}
static device_method_t mga_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, mga_probe),
DEVMETHOD(device_attach, mga_attach),
DEVMETHOD(device_detach, mga_detach),
{ 0, 0 }
};
static driver_t mga_driver = {
"drm",
mga_methods,
sizeof(drm_device_t),
};
static devclass_t mga_devclass;
#define MGA_SOFTC(unit) \
((drm_device_t *) devclass_get_softc(mga_devclass, unit))
DRIVER_MODULE(if_mga, pci, mga_driver, mga_devclass, 0, 0);
#define CDEV_MAJOR 145
/* mga_drv.c */
static struct cdevsw mga_cdevsw = {
/* open */ mga_open,
/* close */ mga_close,
/* read */ drm_read,
/* write */ drm_write,
/* ioctl */ mga_ioctl,
/* poll */ drm_poll,
/* mmap */ drm_mmap,
/* strategy */ nostrategy,
/* name */ "mga",
/* maj */ CDEV_MAJOR,
/* dump */ nodump,
/* psize */ nopsize,
/* flags */ D_TTY | D_TRACKCLOSE,
/* bmaj */ -1
};
static drm_ioctl_desc_t mga_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { mga_version, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { mga_control, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { mga_addbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { mga_markbufs, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { mga_infobufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { mga_mapbufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mga_freebufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mga_addctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mga_rmctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mga_modctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mga_getctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mga_switchctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mga_newctx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mga_resctx, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { mga_lock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { mga_unlock, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_swap_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_clear_bufs, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_vertex, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_flush_ioctl, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_indices, 1, 0 },
};
#define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls)
static int mga_setup(drm_device_t *dev)
{
int i;
device_busy(dev->device);
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
drm_dma_setup(dev);
atomic_set(&dev->total_open, 0);
atomic_set(&dev->total_close, 0);
atomic_set(&dev->total_ioctl, 0);
atomic_set(&dev->total_irq, 0);
atomic_set(&dev->total_ctx, 0);
atomic_set(&dev->total_locks, 0);
atomic_set(&dev->total_unlocks, 0);
atomic_set(&dev->total_contends, 0);
atomic_set(&dev->total_sleeps, 0);
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->maplist = NULL;
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL;
dev->lock.lock_queue = 0;
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
callout_init(&dev->timer);
dev->context_wait = 0;
timespecclear(&dev->ctx_start);
timespecclear(&dev->lck_start);
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
bzero(&dev->buf_sel, sizeof dev->buf_sel);
dev->buf_sigio = NULL;
dev->buf_readers = 0;
dev->buf_writers = 0;
dev->buf_selecting = 0;
DRM_DEBUG("\n");
/* The kernel's context could be created here, but is now created
in drm_dma_enqueue. This is more resource-efficient for
hardware that does not do DMA, but may mean that
drm_select_queue fails between the time the interrupt is
initialized and the time the queues are initialized. */
return 0;
}
static int mga_takedown(drm_device_t *dev)
{
int i;
drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_vma_entry_t *vma, *vma_next;
DRM_DEBUG("\n");
if (dev->irq) mga_irq_uninstall(dev);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
callout_stop(&dev->timer);
if (dev->devname) {
drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
dev->devname = NULL;
}
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear AGP information */
if (dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
/* Remove AGP resources, but leave dev->agp
intact until cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
if (entry->bound) drm_unbind_agp(entry->handle);
drm_free_agp(entry->handle, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
if (dev->agp->acquired)
agp_release(dev->agp->agpdev);
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
/* Clear map area and mtrr information */
if (dev->maplist) {
for (i = 0; i < dev->map_count; i++) {
map = dev->maplist[i];
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#ifdef CONFIG_MTRR
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
#endif
drm_ioremapfree(map->handle, map->size);
break;
case _DRM_SHM:
drm_free_pages((unsigned long)map->handle,
drm_order(map->size)
- PAGE_SHIFT,
DRM_MEM_SAREA);
break;
case _DRM_AGP:
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
drm_free(dev->maplist,
dev->map_count * sizeof(*dev->maplist),
DRM_MEM_MAPS);
dev->maplist = NULL;
dev->map_count = 0;
}
if (dev->queuelist) {
for (i = 0; i < dev->queue_count; i++) {
drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
if (dev->queuelist[i]) {
drm_free(dev->queuelist[i],
sizeof(*dev->queuelist[0]),
DRM_MEM_QUEUES);
dev->queuelist[i] = NULL;
}
}
drm_free(dev->queuelist,
dev->queue_slots * sizeof(*dev->queuelist),
DRM_MEM_QUEUES);
dev->queuelist = NULL;
}
drm_dma_takedown(dev);
dev->queue_count = 0;
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0;
wakeup(&dev->lock.lock_queue);
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
device_unbusy(dev->device);
return 0;
}
/* mga_init is called via mga_attach at module load time, */
int
mga_init(device_t nbdev)
{
int retcode;
drm_device_t *dev = device_get_softc(nbdev);
DRM_DEBUG("\n");
memset((void *)dev, 0, sizeof(*dev));
simple_lock_init(&dev->count_lock);
lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
#if 0
drm_parse_options(mga);
#endif
dev->device = nbdev;
dev->devnode = make_dev(&mga_cdevsw,
device_get_unit(nbdev),
DRM_DEV_UID,
DRM_DEV_GID,
DRM_DEV_MODE,
MGA_NAME);
dev->name = MGA_NAME;
DRM_DEBUG("doing mem init\n");
drm_mem_init();
DRM_DEBUG("doing proc init\n");
drm_sysctl_init(dev);
TAILQ_INIT(&dev->files);
DRM_DEBUG("doing agp init\n");
dev->agp = drm_agp_init();
if(dev->agp == NULL) {
DRM_DEBUG("The mga drm module requires the agp module"
" to function correctly\nPlease load the agp"
" module before you load the mga module\n");
drm_sysctl_cleanup(dev);
mga_takedown(dev);
return ENOMEM;
}
#if 0
dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size * 1024 * 1024,
MTRR_TYPE_WRCOMB,
1);
#endif
DRM_DEBUG("doing ctxbitmap init\n");
if((retcode = drm_ctxbitmap_init(dev))) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
drm_sysctl_cleanup(dev);
mga_takedown(dev);
return retcode;
}
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
MGA_NAME,
MGA_MAJOR,
MGA_MINOR,
MGA_PATCHLEVEL,
MGA_DATE,
device_get_unit(nbdev));
return 0;
}
/* mga_cleanup is called via cleanup_module at module unload time. */
void mga_cleanup(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
DRM_DEBUG("\n");
drm_sysctl_cleanup(dev);
destroy_dev(dev->devnode);
DRM_INFO("Module unloaded\n");
drm_ctxbitmap_cleanup(dev);
mga_dma_cleanup(dev);
#if 0
if(dev->agp && dev->agp->agp_mtrr) {
int retval;
retval = mtrr_del(dev->agp->agp_mtrr,
dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size * 1024*1024);
DRM_DEBUG("mtrr_del = %d\n", retval);
}
#endif
device_busy(dev->device);
mga_takedown(dev);
if (dev->agp) {
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
}
int
mga_version(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_version_t version;
int len;
version = *(drm_version_t *) data;
#define DRM_COPY(name,value) \
len = strlen(value); \
if (len > name##_len) len = name##_len; \
name##_len = strlen(value); \
if (len && name) { \
int error = copyout(value, name, len); \
if (error) return error; \
}
version.version_major = MGA_MAJOR;
version.version_minor = MGA_MINOR;
version.version_patchlevel = MGA_PATCHLEVEL;
DRM_COPY(version.name, MGA_NAME);
DRM_COPY(version.date, MGA_DATE);
DRM_COPY(version.desc, MGA_DESC);
*(drm_version_t *) data = version;
return 0;
}
int
mga_open(dev_t kdev, int flags, int fmt, struct proc *p)
{
drm_device_t *dev = MGA_SOFTC(minor(kdev));
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
device_busy(dev->device);
if (!(retcode = drm_open_helper(kdev, flags, fmt, p, dev))) {
atomic_inc(&dev->total_open);
simple_lock(&dev->count_lock);
if (!dev->open_count++) {
simple_unlock(&dev->count_lock);
return mga_setup(dev);
}
simple_unlock(&dev->count_lock);
}
device_unbusy(dev->device);
return retcode;
}
int
mga_close(dev_t kdev, int flags, int fmt, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_file_t *priv;
int retcode = 0;
DRM_DEBUG("pid = %d, open_count = %d\n",
p->p_pid, dev->open_count);
priv = drm_find_file_by_proc(dev, p);
if (!priv) {
DRM_DEBUG("can't find authenticator\n");
return EINVAL;
}
if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
&& dev->lock.pid == p->p_pid) {
mga_reclaim_buffers(dev, priv->pid);
DRM_ERROR("Process %d dead, freeing lock for context %d\n",
p->p_pid,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
drm_lock_free(dev,
&dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
/* FIXME: may require heavy-handed reset of
hardware at this point, possibly
processed via a callback to the X
server. */
} else if (dev->lock.hw_lock) {
/* The lock is required to reclaim buffers */
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
retcode = EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
dev->lock.pid = p->p_pid;
dev->lock.lock_time = ticks;
atomic_inc(&dev->total_locks);
break; /* Got lock */
}
/* Contention */
atomic_inc(&dev->total_sleeps);
retcode = tsleep(&dev->lock.lock_queue,
PZERO|PCATCH,
"drmlk2",
0);
if (retcode)
break;
}
if(!retcode) {
mga_reclaim_buffers(dev, priv->pid);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT);
}
}
funsetown(dev->buf_sigio);
lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, p);
priv = drm_find_file_by_proc(dev, p);
if (priv) {
priv->refs--;
if (!priv->refs) {
TAILQ_REMOVE(&dev->files, priv, link);
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
}
}
lockmgr(&dev->dev_lock, LK_RELEASE, 0, p);
atomic_inc(&dev->total_close);
simple_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count) || dev->blocked) {
DRM_ERROR("Device busy: %d %d\n",
atomic_read(&dev->ioctl_count),
dev->blocked);
simple_unlock(&dev->count_lock);
return EBUSY;
}
simple_unlock(&dev->count_lock);
return mga_takedown(dev);
}
simple_unlock(&dev->count_lock);
return retcode;
}
/* mga_ioctl is called whenever a process performs an ioctl on /dev/drm. */
int
mga_ioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
int nr = DRM_IOCTL_NR(cmd);
drm_device_t *dev = kdev->si_drv1;
drm_file_t *priv;
int retcode = 0;
drm_ioctl_desc_t *ioctl;
d_ioctl_t *func;
DRM_DEBUG("dev=%p\n", dev);
priv = drm_find_file_by_proc(dev, p);
if (!priv) {
DRM_DEBUG("can't find authenticator\n");
return EINVAL;
}
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->total_ioctl);
++priv->ioctl_count;
DRM_DEBUG("pid = %d, cmd = 0x%02lx, nr = 0x%02x, auth = %d\n",
p->p_pid, cmd, nr, priv->authenticated);
switch (cmd) {
case FIONBIO:
atomic_dec(&dev->ioctl_count);
return 0;
case FIOASYNC:
atomic_dec(&dev->ioctl_count);
dev->flags |= FASYNC;
return 0;
case FIOSETOWN:
atomic_dec(&dev->ioctl_count);
return fsetown(*(int *)data, &dev->buf_sigio);
case FIOGETOWN:
atomic_dec(&dev->ioctl_count);
*(int *) data = fgetown(dev->buf_sigio);
return 0;
}
if (nr >= MGA_IOCTL_COUNT) {
retcode = EINVAL;
} else {
ioctl = &mga_ioctls[nr];
func = ioctl->func;
if (!func) {
DRM_DEBUG("no function\n");
retcode = EINVAL;
} else if ((ioctl->root_only && suser(p))
|| (ioctl->auth_needed && !priv->authenticated)) {
retcode = EACCES;
} else {
retcode = (func)(kdev, cmd, data, flags, p);
}
}
atomic_dec(&dev->ioctl_count);
return retcode;
}
int
mga_unlock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
{
drm_device_t *dev = kdev->si_drv1;
drm_lock_t lock;
lock = *(drm_lock_t *) data;
if (lock.context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
p->p_pid, lock.context);
return EINVAL;
}
DRM_DEBUG("%d frees lock (%d holds)\n",
lock.context,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
atomic_inc(&dev->total_unlocks);
if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
atomic_inc(&dev->total_contends);
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
mga_dma_schedule(dev, 1);
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
return 0;
}

420
bsd/mga/mga_drv.h Normal file
View file

@ -0,0 +1,420 @@
/* mga_drv.h -- Private header for the Matrox g200/g400 driver
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com>
* Jeff Hartmann <jhartmann@precisioninsight.com>
*
* $XFree86$
*/
#ifndef _MGA_DRV_H_
#define _MGA_DRV_H_
#define MGA_BUF_IN_USE 0
#define MGA_BUF_SWAP_PENDING 1
#define MGA_BUF_FORCE_FIRE 2
#define MGA_BUF_NEEDS_OVERFLOW 3
typedef struct {
u_int32_t buffer_status;
unsigned int num_dwords;
unsigned int max_dwords;
u_int32_t *current_dma_ptr;
u_int32_t *head;
u_int32_t phys_head;
unsigned int prim_age;
int sec_used;
int idx;
} drm_mga_prim_buf_t;
typedef struct _drm_mga_freelist {
unsigned int age;
drm_buf_t *buf;
struct _drm_mga_freelist *next;
struct _drm_mga_freelist *prev;
} drm_mga_freelist_t;
#define MGA_IN_DISPATCH 0
#define MGA_IN_FLUSH 1
#define MGA_IN_WAIT 2
#define MGA_IN_GETBUF 3
typedef struct _drm_mga_private {
u_int32_t dispatch_status;
unsigned int next_prim_age;
__volatile__ unsigned int last_prim_age;
int reserved_map_idx;
int buffer_map_idx;
drm_mga_sarea_t *sarea_priv;
int primary_size;
int warp_ucode_size;
int chipset;
unsigned int frontOffset;
unsigned int backOffset;
unsigned int depthOffset;
unsigned int textureOffset;
unsigned int textureSize;
int cpp;
unsigned int stride;
int sgram;
int use_agp;
drm_mga_warp_index_t WarpIndex[MGA_MAX_G400_PIPES];
unsigned int WarpPipe;
atomic_t pending_bufs;
void *status_page;
unsigned long real_status_page;
u_int8_t *ioremap;
drm_mga_prim_buf_t **prim_bufs;
drm_mga_prim_buf_t *next_prim;
drm_mga_prim_buf_t *last_prim;
drm_mga_prim_buf_t *current_prim;
int current_prim_idx;
drm_mga_freelist_t *head;
drm_mga_freelist_t *tail;
int flush_queue; /* Processes waiting until flush */
int wait_queue; /* Processes waiting until interrupt */
int buf_queue; /* Processes waiting for a free buf */
/* Some validated register values:
*/
u_int32_t mAccess;
} drm_mga_private_t;
/* mga_drv.c */
extern int mga_init(device_t);
extern void mga_cleanup(device_t);
extern d_ioctl_t mga_version;
extern d_open_t mga_open;
extern d_close_t mga_close;
extern d_ioctl_t mga_ioctl;
extern d_ioctl_t mga_unlock;
/* mga_dma.c */
extern int mga_dma_schedule(drm_device_t *dev, int locked);
extern int mga_irq_install(drm_device_t *dev, int irq);
extern int mga_irq_uninstall(drm_device_t *dev);
extern d_ioctl_t mga_dma;
extern d_ioctl_t mga_control;
extern d_ioctl_t mga_lock;
/* mga_dma_init does init and release */
extern int mga_dma_cleanup(drm_device_t *dev);
extern d_ioctl_t mga_dma_init;
extern d_ioctl_t mga_flush_ioctl;
extern void mga_flush_write_combine(void);
extern unsigned int mga_create_sync_tag(drm_device_t *dev);
extern drm_buf_t *mga_freelist_get(drm_device_t *dev);
extern int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf);
extern int mga_advance_primary(drm_device_t *dev);
extern void mga_reclaim_buffers(drm_device_t *dev, pid_t pid);
/* mga_bufs.c */
extern d_ioctl_t mga_addbufs;
extern d_ioctl_t mga_infobufs;
extern d_ioctl_t mga_markbufs;
extern d_ioctl_t mga_freebufs;
extern d_ioctl_t mga_mapbufs;
extern d_ioctl_t mga_addmap;
/* mga_state.c */
extern d_ioctl_t mga_clear_bufs;
extern d_ioctl_t mga_swap_bufs;
extern d_ioctl_t mga_iload;
extern d_ioctl_t mga_vertex;
extern d_ioctl_t mga_indices;
/* mga_context.c */
extern d_ioctl_t mga_resctx;
extern d_ioctl_t mga_addctx;
extern d_ioctl_t mga_modctx;
extern d_ioctl_t mga_getctx;
extern d_ioctl_t mga_switchctx;
extern d_ioctl_t mga_newctx;
extern d_ioctl_t mga_rmctx;
extern int mga_context_switch(drm_device_t *dev, int old, int new);
extern int mga_context_switch_complete(drm_device_t *dev, int new);
typedef enum {
TT_GENERAL,
TT_BLIT,
TT_VECTOR,
TT_VERTEX
} transferType_t;
typedef struct {
drm_mga_freelist_t *my_freelist;
int discard;
int dispatched;
} drm_mga_buf_priv_t;
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
#define DWGREG1 0x2c00
#define DWGREG1_END 0x2dff
#define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END)
#define ADRINDEX0(r) (u_int8_t)((r - DWGREG0) >> 2)
#define ADRINDEX1(r) (u_int8_t)(((r - DWGREG1) >> 2) | 0x80)
#define ADRINDEX(r) (ISREG0(r) ? ADRINDEX0(r) : ADRINDEX1(r))
#define MGA_VERBOSE 0
#define MGA_NUM_PRIM_BUFS 8
#define PRIMLOCALS u_int8_t tempIndex[4]; u_int32_t *dma_ptr; u_int32_t phys_head; \
int outcount, num_dwords
#define PRIM_OVERFLOW(dev, dev_priv, length) do { \
drm_mga_prim_buf_t *tmp_buf = \
dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
if( test_bit(MGA_BUF_NEEDS_OVERFLOW, \
&tmp_buf->buffer_status)) { \
mga_advance_primary(dev); \
mga_dma_schedule(dev, 1); \
} else if( tmp_buf->max_dwords - tmp_buf->num_dwords < length ||\
tmp_buf->sec_used > MGA_DMA_BUF_NR/2) { \
set_bit(MGA_BUF_FORCE_FIRE, &tmp_buf->buffer_status); \
mga_advance_primary(dev); \
mga_dma_schedule(dev, 1); \
} \
} while(0)
#define PRIMGETPTR(dev_priv) do { \
drm_mga_prim_buf_t *tmp_buf = \
dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
if(MGA_VERBOSE) \
DRM_DEBUG("PRIMGETPTR in %s\n", __FUNCTION__); \
dma_ptr = tmp_buf->current_dma_ptr; \
num_dwords = tmp_buf->num_dwords; \
phys_head = tmp_buf->phys_head; \
outcount = 0; \
} while(0)
#define PRIMPTR(prim_buf) do { \
if(MGA_VERBOSE) \
DRM_DEBUG("PRIMPTR in %s\n", __FUNCTION__); \
dma_ptr = prim_buf->current_dma_ptr; \
num_dwords = prim_buf->num_dwords; \
phys_head = prim_buf->phys_head; \
outcount = 0; \
} while(0)
#define PRIMFINISH(prim_buf) do { \
if (MGA_VERBOSE) { \
DRM_DEBUG( "PRIMFINISH in %s\n", __FUNCTION__); \
if (outcount & 3) \
DRM_DEBUG(" --- truncation\n"); \
} \
prim_buf->num_dwords = num_dwords; \
prim_buf->current_dma_ptr = dma_ptr; \
} while(0)
#define PRIMADVANCE(dev_priv) do { \
drm_mga_prim_buf_t *tmp_buf = \
dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
if (MGA_VERBOSE) { \
DRM_DEBUG("PRIMADVANCE in %s\n", __FUNCTION__); \
if (outcount & 3) \
DRM_DEBUG(" --- truncation\n"); \
} \
tmp_buf->num_dwords = num_dwords; \
tmp_buf->current_dma_ptr = dma_ptr; \
} while (0)
#define PRIMUPDATE(dev_priv) do { \
drm_mga_prim_buf_t *tmp_buf = \
dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
tmp_buf->sec_used++; \
} while (0)
#define AGEBUF(dev_priv, buf_priv) do { \
drm_mga_prim_buf_t *tmp_buf = \
dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
buf_priv->my_freelist->age = tmp_buf->prim_age; \
} while (0)
#define PRIMOUTREG(reg, val) do { \
tempIndex[outcount]=ADRINDEX(reg); \
dma_ptr[1+outcount] = val; \
if (MGA_VERBOSE) \
DRM_DEBUG(" PRIMOUT %d: 0x%x -- 0x%x\n", \
num_dwords + 1 + outcount, ADRINDEX(reg), val); \
if( ++outcount == 4) { \
outcount = 0; \
dma_ptr[0] = *(u_int32_t *)tempIndex; \
dma_ptr+=5; \
num_dwords += 5; \
} \
}while (0)
/* A reduced set of the mga registers.
*/
#define MGAREG_MGA_EXEC 0x0100
#define MGAREG_ALPHACTRL 0x2c7c
#define MGAREG_AR0 0x1c60
#define MGAREG_AR1 0x1c64
#define MGAREG_AR2 0x1c68
#define MGAREG_AR3 0x1c6c
#define MGAREG_AR4 0x1c70
#define MGAREG_AR5 0x1c74
#define MGAREG_AR6 0x1c78
#define MGAREG_CXBNDRY 0x1c80
#define MGAREG_CXLEFT 0x1ca0
#define MGAREG_CXRIGHT 0x1ca4
#define MGAREG_DMAPAD 0x1c54
#define MGAREG_DSTORG 0x2cb8
#define MGAREG_DWGCTL 0x1c00
#define MGAREG_DWGSYNC 0x2c4c
#define MGAREG_FCOL 0x1c24
#define MGAREG_FIFOSTATUS 0x1e10
#define MGAREG_FOGCOL 0x1cf4
#define MGAREG_FXBNDRY 0x1c84
#define MGAREG_FXLEFT 0x1ca8
#define MGAREG_FXRIGHT 0x1cac
#define MGAREG_ICLEAR 0x1e18
#define MGAREG_IEN 0x1e1c
#define MGAREG_LEN 0x1c5c
#define MGAREG_MACCESS 0x1c04
#define MGAREG_PITCH 0x1c8c
#define MGAREG_PLNWT 0x1c1c
#define MGAREG_PRIMADDRESS 0x1e58
#define MGAREG_PRIMEND 0x1e5c
#define MGAREG_PRIMPTR 0x1e50
#define MGAREG_SECADDRESS 0x2c40
#define MGAREG_SECEND 0x2c44
#define MGAREG_SETUPADDRESS 0x2cd0
#define MGAREG_SETUPEND 0x2cd4
#define MGAREG_SOFTRAP 0x2c48
#define MGAREG_SRCORG 0x2cb4
#define MGAREG_STATUS 0x1e14
#define MGAREG_STENCIL 0x2cc8
#define MGAREG_STENCILCTL 0x2ccc
#define MGAREG_TDUALSTAGE0 0x2cf8
#define MGAREG_TDUALSTAGE1 0x2cfc
#define MGAREG_TEXBORDERCOL 0x2c5c
#define MGAREG_TEXCTL 0x2c30
#define MGAREG_TEXCTL2 0x2c3c
#define MGAREG_TEXFILTER 0x2c58
#define MGAREG_TEXHEIGHT 0x2c2c
#define MGAREG_TEXORG 0x2c24
#define MGAREG_TEXORG1 0x2ca4
#define MGAREG_TEXORG2 0x2ca8
#define MGAREG_TEXORG3 0x2cac
#define MGAREG_TEXORG4 0x2cb0
#define MGAREG_TEXTRANS 0x2c34
#define MGAREG_TEXTRANSHIGH 0x2c38
#define MGAREG_TEXWIDTH 0x2c28
#define MGAREG_WACCEPTSEQ 0x1dd4
#define MGAREG_WCODEADDR 0x1e6c
#define MGAREG_WFLAG 0x1dc4
#define MGAREG_WFLAG1 0x1de0
#define MGAREG_WFLAGNB 0x1e64
#define MGAREG_WFLAGNB1 0x1e08
#define MGAREG_WGETMSB 0x1dc8
#define MGAREG_WIADDR 0x1dc0
#define MGAREG_WIADDR2 0x1dd8
#define MGAREG_WMISC 0x1e70
#define MGAREG_WVRTXSZ 0x1dcc
#define MGAREG_YBOT 0x1c9c
#define MGAREG_YDST 0x1c90
#define MGAREG_YDSTLEN 0x1c88
#define MGAREG_YDSTORG 0x1c94
#define MGAREG_YTOP 0x1c98
#define MGAREG_ZORG 0x1c0c
#define PDEA_pagpxfer_enable 0x2
#define WIA_wmode_suspend 0x0
#define WIA_wmode_start 0x3
#define WIA_wagp_agp 0x4
#define DC_opcod_line_open 0x0
#define DC_opcod_autoline_open 0x1
#define DC_opcod_line_close 0x2
#define DC_opcod_autoline_close 0x3
#define DC_opcod_trap 0x4
#define DC_opcod_texture_trap 0x6
#define DC_opcod_bitblt 0x8
#define DC_opcod_iload 0x9
#define DC_atype_rpl 0x0
#define DC_atype_rstr 0x10
#define DC_atype_zi 0x30
#define DC_atype_blk 0x40
#define DC_atype_i 0x70
#define DC_linear_xy 0x0
#define DC_linear_linear 0x80
#define DC_zmode_nozcmp 0x0
#define DC_zmode_ze 0x200
#define DC_zmode_zne 0x300
#define DC_zmode_zlt 0x400
#define DC_zmode_zlte 0x500
#define DC_zmode_zgt 0x600
#define DC_zmode_zgte 0x700
#define DC_solid_disable 0x0
#define DC_solid_enable 0x800
#define DC_arzero_disable 0x0
#define DC_arzero_enable 0x1000
#define DC_sgnzero_disable 0x0
#define DC_sgnzero_enable 0x2000
#define DC_shftzero_disable 0x0
#define DC_shftzero_enable 0x4000
#define DC_bop_SHIFT 16
#define DC_trans_SHIFT 20
#define DC_bltmod_bmonolef 0x0
#define DC_bltmod_bmonowf 0x8000000
#define DC_bltmod_bplan 0x2000000
#define DC_bltmod_bfcol 0x4000000
#define DC_bltmod_bu32bgr 0x6000000
#define DC_bltmod_bu32rgb 0xe000000
#define DC_bltmod_bu24bgr 0x16000000
#define DC_bltmod_bu24rgb 0x1e000000
#define DC_pattern_disable 0x0
#define DC_pattern_enable 0x20000000
#define DC_transc_disable 0x0
#define DC_transc_enable 0x40000000
#define DC_clipdis_disable 0x0
#define DC_clipdis_enable 0x80000000
#define SETADD_mode_vertlist 0x0
#define MGA_CLEAR_CMD (DC_opcod_trap | DC_arzero_enable | \
DC_sgnzero_enable | DC_shftzero_enable | \
(0xC << DC_bop_SHIFT) | DC_clipdis_enable | \
DC_solid_enable | DC_transc_enable)
#define MGA_COPY_CMD (DC_opcod_bitblt | DC_atype_rpl | DC_linear_xy | \
DC_solid_disable | DC_arzero_disable | \
DC_sgnzero_enable | DC_shftzero_enable | \
(0xC << DC_bop_SHIFT) | DC_bltmod_bfcol | \
DC_pattern_disable | DC_transc_disable | \
DC_clipdis_enable) \
#define MGA_FLUSH_CMD (DC_opcod_texture_trap | (0xF << DC_trans_SHIFT) |\
DC_arzero_enable | DC_sgnzero_enable | \
DC_atype_i)
#endif

1063
bsd/mga/mga_state.c Normal file

File diff suppressed because it is too large Load diff