Merge branch 'master' of ssh+git://git.freedesktop.org/git/mesa/drm into xgi-0-0-2

This commit is contained in:
Ian Romanick 2007-08-31 10:54:55 -07:00
commit fee49e2071
69 changed files with 5779 additions and 1625 deletions

4
.gitignore vendored
View file

@ -51,9 +51,13 @@ sis.kld
stamp-h1 stamp-h1
tdfx.kld tdfx.kld
via.kld via.kld
tests/auth
tests/dristat tests/dristat
tests/drmstat tests/drmstat
tests/getclient tests/getclient
tests/getstats
tests/getversion tests/getversion
tests/lock
tests/openclose tests/openclose
tests/setversion
tests/updatedraw tests/updatedraw

View file

@ -1,6 +1,3 @@
/* ati_pcigart.h -- ATI PCI GART support -*- linux-c -*-
* Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
*/
/*- /*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved. * All Rights Reserved.
@ -29,6 +26,11 @@
* *
*/ */
/** @file ati_pcigart.c
* Implementation of ATI's PCIGART, which provides an aperture in card virtual
* address space with addresses remapped to system memory.
*/
#include "drmP.h" #include "drmP.h"
#define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */ #define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */

View file

@ -377,6 +377,7 @@ do { \
} while (0) } while (0)
#if defined(__FreeBSD__) && __FreeBSD_version > 500000 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
/* Returns -errno to shared code */
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ #define DRM_WAIT_ON( ret, queue, timeout, condition ) \
for ( ret = 0 ; !ret && !(condition) ; ) { \ for ( ret = 0 ; !ret && !(condition) ; ) { \
DRM_UNLOCK(); \ DRM_UNLOCK(); \
@ -388,11 +389,12 @@ for ( ret = 0 ; !ret && !(condition) ; ) { \
DRM_LOCK(); \ DRM_LOCK(); \
} }
#else #else
/* Returns -errno to shared code */
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \ #define DRM_WAIT_ON( ret, queue, timeout, condition ) \
for ( ret = 0 ; !ret && !(condition) ; ) { \ for ( ret = 0 ; !ret && !(condition) ; ) { \
int s = spldrm(); \ int s = spldrm(); \
if (!(condition)) \ if (!(condition)) \
ret = tsleep( &(queue), PZERO | PCATCH, \ ret = -tsleep( &(queue), PZERO | PCATCH, \
"drmwtq", (timeout) ); \ "drmwtq", (timeout) ); \
splx(s); \ splx(s); \
} }

View file

@ -1,6 +1,3 @@
/* drm_agpsupport.h -- DRM support for AGP/GART backend -*- linux-c -*-
* Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
*/
/*- /*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,11 @@
* *
*/ */
/** @file drm_agpsupport.c
* Support code for tying the kernel AGP support to DRM drivers and
* the DRM's AGP ioctls.
*/
#include "drmP.h" #include "drmP.h"
#ifdef __FreeBSD__ #ifdef __FreeBSD__
@ -182,7 +184,6 @@ int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
dev->agp->mode = mode.mode; dev->agp->mode = mode.mode;
agp_enable(dev->agp->agpdev, mode.mode); agp_enable(dev->agp->agpdev, mode.mode);
dev->agp->base = dev->agp->info.ai_aperture_base;
dev->agp->enabled = 1; dev->agp->enabled = 1;
return 0; return 0;
} }
@ -403,6 +404,7 @@ drm_agp_head_t *drm_agp_init(void)
return NULL; return NULL;
head->agpdev = agpdev; head->agpdev = agpdev;
agp_get_info(agpdev, &head->info); agp_get_info(agpdev, &head->info);
head->base = head->info.ai_aperture_base;
head->memory = NULL; head->memory = NULL;
DRM_INFO("AGP at 0x%08lx %dMB\n", DRM_INFO("AGP at 0x%08lx %dMB\n",
(long)head->info.ai_aperture_base, (long)head->info.ai_aperture_base,

View file

@ -1,6 +1,3 @@
/* drm_auth.h -- IOCTLs for authentication -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*/
/*- /*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,11 @@
* *
*/ */
/** @file drm_auth.c
* Implementation of the get/authmagic ioctls implementing the authentication
* scheme between the master and clients.
*/
#include "drmP.h" #include "drmP.h"
static int drm_hash_magic(drm_magic_t magic) static int drm_hash_magic(drm_magic_t magic)
@ -38,11 +40,16 @@ static int drm_hash_magic(drm_magic_t magic)
return magic & (DRM_HASH_SIZE-1); return magic & (DRM_HASH_SIZE-1);
} }
/**
* Returns the file private associated with the given magic number.
*/
static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic) static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic)
{ {
drm_magic_entry_t *pt; drm_magic_entry_t *pt;
int hash = drm_hash_magic(magic); int hash = drm_hash_magic(magic);
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
for (pt = dev->magiclist[hash].head; pt; pt = pt->next) { for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
if (pt->magic == magic) { if (pt->magic == magic) {
return pt->priv; return pt->priv;
@ -52,6 +59,10 @@ static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic)
return NULL; return NULL;
} }
/**
* Inserts the given magic number into the hash table of used magic number
* lists.
*/
static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic) static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
{ {
int hash; int hash;
@ -59,6 +70,8 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
DRM_DEBUG("%d\n", magic); DRM_DEBUG("%d\n", magic);
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
hash = drm_hash_magic(magic); hash = drm_hash_magic(magic);
entry = malloc(sizeof(*entry), M_DRM, M_ZERO | M_NOWAIT); entry = malloc(sizeof(*entry), M_DRM, M_ZERO | M_NOWAIT);
if (!entry) return ENOMEM; if (!entry) return ENOMEM;
@ -79,16 +92,21 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
return 0; return 0;
} }
/**
* Removes the given magic number from the hash table of used magic number
* lists.
*/
static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic) static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
{ {
drm_magic_entry_t *prev = NULL; drm_magic_entry_t *prev = NULL;
drm_magic_entry_t *pt; drm_magic_entry_t *pt;
int hash; int hash;
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
DRM_DEBUG("%d\n", magic); DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic); hash = drm_hash_magic(magic);
DRM_LOCK();
for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
if (pt->magic == magic) { if (pt->magic == magic) {
if (dev->magiclist[hash].head == pt) { if (dev->magiclist[hash].head == pt) {
@ -100,16 +118,22 @@ static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
if (prev) { if (prev) {
prev->next = pt->next; prev->next = pt->next;
} }
DRM_UNLOCK();
return 0; return 0;
} }
} }
DRM_UNLOCK();
free(pt, M_DRM); free(pt, M_DRM);
return EINVAL; return EINVAL;
} }
/**
* Called by the client, this returns a unique magic number to be authorized
* by the master.
*
* The master may use its own knowledge of the client (such as the X
* connection that the magic is passed over) to determine if the magic number
* should be authenticated.
*/
int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv) int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv)
{ {
static drm_magic_t sequence = 0; static drm_magic_t sequence = 0;
@ -122,15 +146,15 @@ int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv)
DRM_LOCK(); DRM_LOCK();
do { do {
int old = sequence; int old = sequence;
auth->magic = old+1; auth->magic = old+1;
if (!atomic_cmpset_int(&sequence, old, auth->magic)) if (!atomic_cmpset_int(&sequence, old, auth->magic))
continue; continue;
} while (drm_find_file(dev, auth->magic)); } while (drm_find_file(dev, auth->magic));
file_priv->magic = auth->magic; file_priv->magic = auth->magic;
DRM_UNLOCK();
drm_add_magic(dev, file_priv, auth->magic); drm_add_magic(dev, file_priv, auth->magic);
DRM_UNLOCK();
} }
DRM_DEBUG("%u\n", auth->magic); DRM_DEBUG("%u\n", auth->magic);
@ -138,6 +162,9 @@ int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv)
return 0; return 0;
} }
/**
* Marks the client associated with the given magic number as authenticated.
*/
int drm_authmagic(drm_device_t *dev, void *data, struct drm_file *file_priv) int drm_authmagic(drm_device_t *dev, void *data, struct drm_file *file_priv)
{ {
drm_auth_t *auth = data; drm_auth_t *auth = data;

View file

@ -1,6 +1,3 @@
/* drm_bufs.h -- Generic buffer template -*- linux-c -*-
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
*/
/*- /*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,10 @@
* *
*/ */
/** @file drm_bufs.c
* Implementation of the ioctls for setup of DRM mappings and DMA buffers.
*/
#include "dev/pci/pcireg.h" #include "dev/pci/pcireg.h"
#include "drmP.h" #include "drmP.h"
@ -190,7 +191,17 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
break; break;
case _DRM_AGP: case _DRM_AGP:
/*valid = 0;*/ /*valid = 0;*/
map->offset += dev->agp->base; /* In some cases (i810 driver), user space may have already
* added the AGP base itself, because dev->agp->base previously
* only got set during AGP enable. So, only add the base
* address if the map's offset isn't already within the
* aperture.
*/
if (map->offset < dev->agp->base ||
map->offset > dev->agp->base +
dev->agp->info.ai_aperture_size - 1) {
map->offset += dev->agp->base;
}
map->mtrr = dev->agp->mtrr; /* for getmap */ map->mtrr = dev->agp->mtrr; /* for getmap */
/*for (entry = dev->agp->memory; entry; entry = entry->next) { /*for (entry = dev->agp->memory; entry; entry = entry->next) {
if ((map->offset >= entry->bound) && if ((map->offset >= entry->bound) &&

View file

@ -1,6 +1,3 @@
/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
* Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
*/
/*- /*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,10 @@
* *
*/ */
/** @file drm_context.c
* Implementation of the context management ioctls.
*/
#include "drmP.h" #include "drmP.h"
/* ================================================================ /* ================================================================

View file

@ -1,6 +1,3 @@
/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*/
/*- /*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,14 @@
* *
*/ */
/** @file drm_dma.c
* Support code for DMA buffer management.
*
* The implementation used to be significantly more complicated, but the
* complexity has been moved into the drivers as different buffer management
* schemes evolved.
*/
#include "drmP.h" #include "drmP.h"
int drm_dma_setup(drm_device_t *dev) int drm_dma_setup(drm_device_t *dev)
@ -121,6 +126,7 @@ int drm_dma(drm_device_t *dev, void *data, struct drm_file *file_priv)
{ {
if (dev->driver.dma_ioctl) { if (dev->driver.dma_ioctl) {
/* shared code returns -errno */
return -dev->driver.dma_ioctl(dev, data, file_priv); return -dev->driver.dma_ioctl(dev, data, file_priv);
} else { } else {
DRM_DEBUG("DMA ioctl on driver with no dma handler\n"); DRM_DEBUG("DMA ioctl on driver with no dma handler\n");

View file

@ -1,6 +1,3 @@
/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*/
/*- /*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,11 @@
* *
*/ */
/** @file drm_drawable.c
* This file implements ioctls to store information along with DRM drawables,
* such as the current set of cliprects for vblank-synced buffer swaps.
*/
#include "drmP.h" #include "drmP.h"
struct bsd_drm_drawable_info { struct bsd_drm_drawable_info {

View file

@ -1,6 +1,3 @@
/* drm_drv.h -- Generic driver template -*- linux-c -*-
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
*/
/*- /*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,12 @@
* *
*/ */
/** @file drm_drv.c
* The catch-all file for DRM device support, including module setup/teardown,
* open/close, and ioctl dispatch.
*/
#include <sys/limits.h> #include <sys/limits.h>
#include "drmP.h" #include "drmP.h"
#include "drm.h" #include "drm.h"
@ -818,14 +821,7 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags, int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
DRM_STRUCTPROC *p) DRM_STRUCTPROC *p)
{ {
#ifdef __FreeBSD__ drm_device_t *dev = drm_get_device_from_kdev(kdev);
drm_device_t *dev = kdev->si_drv1;
#elif defined(__NetBSD__)
drm_device_t *dev = device_lookup(&drm_cd, minor(kdev));
#else
drm_device_t *dev = device_lookup(&drm_cd,
minor(kdev)))->dv_cfdata->cf_driver->cd_devs[minor(kdev)];
#endif
int retcode = 0; int retcode = 0;
drm_ioctl_desc_t *ioctl; drm_ioctl_desc_t *ioctl;
int (*func)(drm_device_t *dev, void *data, struct drm_file *file_priv); int (*func)(drm_device_t *dev, void *data, struct drm_file *file_priv);
@ -912,15 +908,13 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
((ioctl->flags & DRM_MASTER) && !file_priv->master)) ((ioctl->flags & DRM_MASTER) && !file_priv->master))
return EACCES; return EACCES;
if (is_driver_ioctl)
DRM_LOCK();
retcode = func(dev, data, file_priv);
if (is_driver_ioctl) { if (is_driver_ioctl) {
DRM_LOCK();
/* shared code returns -errno */
retcode = -func(dev, data, file_priv);
DRM_UNLOCK(); DRM_UNLOCK();
/* Driver ioctls in shared code follow the linux convention of } else {
* returning -errno instead of errno. retcode = func(dev, data, file_priv);
*/
retcode = -retcode;
} }
if (retcode != 0) if (retcode != 0)

View file

@ -1,6 +1,3 @@
/* drm_fops.h -- File operations for DRM -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
*/
/*- /*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -32,6 +29,11 @@
* *
*/ */
/** @file drm_fops.c
* Support code for dealing with the file privates associated with each
* open of the DRM device.
*/
#include "drmP.h" #include "drmP.h"
drm_file_t *drm_find_file_by_proc(drm_device_t *dev, DRM_STRUCTPROC *p) drm_file_t *drm_find_file_by_proc(drm_device_t *dev, DRM_STRUCTPROC *p)
@ -93,6 +95,7 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
priv->authenticated = DRM_SUSER(p); priv->authenticated = DRM_SUSER(p);
if (dev->driver.open) { if (dev->driver.open) {
/* shared code returns -errno */
retcode = -dev->driver.open(dev, priv); retcode = -dev->driver.open(dev, priv);
if (retcode != 0) { if (retcode != 0) {
free(priv, M_DRM); free(priv, M_DRM);

View file

@ -1,6 +1,3 @@
/* drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*-
* Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
*/
/*- /*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,11 @@
* *
*/ */
/** @file drm_ioctl.c
* Varios minor DRM ioctls not applicable to other files, such as versioning
* information and reporting DRM information to userland.
*/
#include "drmP.h" #include "drmP.h"
/* /*
@ -203,7 +205,7 @@ int drm_getstats(drm_device_t *dev, void *data, struct drm_file *file_priv)
drm_stats_t *stats = data; drm_stats_t *stats = data;
int i; int i;
memset(&stats, 0, sizeof(stats)); memset(stats, 0, sizeof(drm_stats_t));
DRM_LOCK(); DRM_LOCK();
@ -230,23 +232,27 @@ int drm_getstats(drm_device_t *dev, void *data, struct drm_file *file_priv)
int drm_setversion(drm_device_t *dev, void *data, struct drm_file *file_priv) int drm_setversion(drm_device_t *dev, void *data, struct drm_file *file_priv)
{ {
drm_set_version_t *sv = data; drm_set_version_t *sv = data;
drm_set_version_t retv; drm_set_version_t ver;
int if_version; int if_version;
retv.drm_di_major = DRM_IF_MAJOR; /* Save the incoming data, and set the response before continuing
retv.drm_di_minor = DRM_IF_MINOR; * any further.
retv.drm_dd_major = dev->driver.major; */
retv.drm_dd_minor = dev->driver.minor; ver = *sv;
sv->drm_di_major = DRM_IF_MAJOR;
sv->drm_di_minor = DRM_IF_MINOR;
sv->drm_dd_major = dev->driver.major;
sv->drm_dd_minor = dev->driver.minor;
if (sv->drm_di_major != -1) { if (ver.drm_di_major != -1) {
if (sv->drm_di_major != DRM_IF_MAJOR || if (ver.drm_di_major != DRM_IF_MAJOR ||
sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { ver.drm_di_minor < 0 || ver.drm_di_minor > DRM_IF_MINOR) {
return EINVAL; return EINVAL;
} }
if_version = DRM_IF_VERSION(sv->drm_di_major, if_version = DRM_IF_VERSION(ver.drm_di_major,
sv->drm_dd_minor); ver.drm_dd_minor);
dev->if_version = DRM_MAX(if_version, dev->if_version); dev->if_version = DRM_MAX(if_version, dev->if_version);
if (sv->drm_di_minor >= 1) { if (ver.drm_di_minor >= 1) {
/* /*
* Version 1.1 includes tying of DRM to specific device * Version 1.1 includes tying of DRM to specific device
*/ */
@ -254,10 +260,10 @@ int drm_setversion(drm_device_t *dev, void *data, struct drm_file *file_priv)
} }
} }
if (sv->drm_dd_major != -1) { if (ver.drm_dd_major != -1) {
if (sv->drm_dd_major != dev->driver.major || if (ver.drm_dd_major != dev->driver.major ||
sv->drm_dd_minor < 0 || ver.drm_dd_minor < 0 ||
sv->drm_dd_minor > dev->driver.minor) ver.drm_dd_minor > dev->driver.minor)
{ {
return EINVAL; return EINVAL;
} }

View file

@ -1,6 +1,3 @@
/* drm_irq.c -- IRQ IOCTL and function support
* Created: Fri Oct 18 2003 by anholt@FreeBSD.org
*/
/*- /*-
* Copyright 2003 Eric Anholt * Copyright 2003 Eric Anholt
* All Rights Reserved. * All Rights Reserved.
@ -28,6 +25,11 @@
* *
*/ */
/** @file drm_irq.c
* Support code for handling setup/teardown of interrupt handlers and
* handing interrupt handlers off to the drivers.
*/
#include "drmP.h" #include "drmP.h"
#include "drm.h" #include "drm.h"
@ -241,6 +243,7 @@ int drm_wait_vblank(drm_device_t *dev, void *data, struct drm_file *file_priv)
ret = EINVAL; ret = EINVAL;
} else { } else {
DRM_LOCK(); DRM_LOCK();
/* shared code returns -errno */
ret = -dev->driver.vblank_wait(dev, ret = -dev->driver.vblank_wait(dev,
&vblwait->request.sequence); &vblwait->request.sequence);
DRM_UNLOCK(); DRM_UNLOCK();

View file

@ -1,6 +1,3 @@
/* lock.c -- IOCTLs for locking -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*/
/*- /*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,25 @@
* *
*/ */
/** @file drm_lock.c
* Implementation of the ioctls and other support code for dealing with the
* hardware lock.
*
* The DRM hardware lock is a shared structure between the kernel and userland.
*
* On uncontended access where the new context was the last context, the
* client may take the lock without dropping down into the kernel, using atomic
* compare-and-set.
*
* If the client finds during compare-and-set that it was not the last owner
* of the lock, it calls the DRM lock ioctl, which may sleep waiting for the
* lock, and may have side-effects of kernel-managed context switching.
*
* When the client releases the lock, if the lock is marked as being contended
* by another client, then the DRM unlock ioctl is called so that the
* contending client may be woken up.
*/
#include "drmP.h" #include "drmP.h"
int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
@ -157,6 +173,12 @@ int drm_unlock(drm_device_t *dev, void *data, struct drm_file *file_priv)
DRM_CURRENTPID, lock->context); DRM_CURRENTPID, lock->context);
return EINVAL; return EINVAL;
} }
/* Check that the context unlock being requested actually matches
* who currently holds the lock.
*/
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) != lock->context)
return EINVAL;
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);

View file

@ -1,6 +1,3 @@
/* drm_memory.h -- Memory management wrappers for DRM -*- linux-c -*-
* Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
*/
/*- /*-
*Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. *Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -31,6 +28,14 @@
* *
*/ */
/** @file drm_memory.c
* Wrappers for kernel memory allocation routines, and MTRR management support.
*
* This file previously implemented a memory consumption tracking system using
* the "area" argument for various different types of allocations, but that
* has been stripped out for now.
*/
#include "drmP.h" #include "drmP.h"
MALLOC_DEFINE(M_DRM, "drm", "DRM Data Structures"); MALLOC_DEFINE(M_DRM, "drm", "DRM Data Structures");

View file

@ -1,10 +1,3 @@
/**
* \file drm_pci.h
* \brief PCI consistent, DMA-accessible memory functions.
*
* \author Eric Anholt <anholt@FreeBSD.org>
*/
/*- /*-
* Copyright 2003 Eric Anholt. * Copyright 2003 Eric Anholt.
* All Rights Reserved. * All Rights Reserved.
@ -28,6 +21,13 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ */
/**
* \file drm_pci.h
* \brief PCI consistent, DMA-accessible memory allocation.
*
* \author Eric Anholt <anholt@FreeBSD.org>
*/
#include "drmP.h" #include "drmP.h"
/**********************************************************************/ /**********************************************************************/

View file

@ -1,5 +1,3 @@
/* drm_scatter.h -- IOCTLs to manage scatter/gather memory -*- linux-c -*-
* Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com */
/*- /*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved. * All Rights Reserved.
@ -29,6 +27,13 @@
* *
*/ */
/** @file drm_scatter.c
* Allocation of memory for scatter-gather mappings by the graphics chip.
*
* The memory allocated here is then made into an aperture in the card
* by drm_ati_pcigart_init().
*/
#include "drmP.h" #include "drmP.h"
#define DEBUG_SCATTER 0 #define DEBUG_SCATTER 0

View file

@ -21,6 +21,11 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ */
/** @file drm_sysctl.c
* Implementation of various sysctls for controlling DRM behavior and reporting
* debug information.
*/
#include "drmP.h" #include "drmP.h"
#include "drm.h" #include "drm.h"

View file

@ -21,6 +21,10 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ */
/** @file drm_vm.c
* Support code for mmaping of DRM maps.
*/
#include "drmP.h" #include "drmP.h"
#include "drm.h" #include "drm.h"

View file

@ -22,7 +22,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o i915_buffer.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \
nouveau_sgdma.o \ nouveau_sgdma.o nouveau_dma.o \
nv04_timer.o \ nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv40_fb.o \ nv04_fb.o nv10_fb.o nv40_fb.o \

View file

@ -911,6 +911,8 @@ extern void drm_exit(struct drm_driver *driver);
extern void drm_cleanup_pci(struct pci_dev *pdev); extern void drm_cleanup_pci(struct pci_dev *pdev);
extern int drm_ioctl(struct inode *inode, struct file *filp, extern int drm_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern long drm_unlocked_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp, extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
@ -1073,6 +1075,7 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev,
extern int drm_control(struct drm_device *dev, void *data, extern int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
extern int drm_irq_install(struct drm_device *dev);
extern int drm_irq_uninstall(struct drm_device *dev); extern int drm_irq_uninstall(struct drm_device *dev);
extern void drm_driver_irq_preinstall(struct drm_device *dev); extern void drm_driver_irq_preinstall(struct drm_device *dev);
extern void drm_driver_irq_postinstall(struct drm_device *dev); extern void drm_driver_irq_postinstall(struct drm_device *dev);

View file

@ -183,7 +183,6 @@ int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
#else #else
agp_enable(dev->agp->bridge, mode.mode); agp_enable(dev->agp->bridge, mode.mode);
#endif #endif
dev->agp->base = dev->agp->agp_info.aper_base;
dev->agp->enabled = 1; dev->agp->enabled = 1;
return 0; return 0;
} }
@ -441,6 +440,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
INIT_LIST_HEAD(&head->memory); INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture; head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask; head->page_mask = head->agp_info.page_mask;
head->base = head->agp_info.aper_base;
return head; return head;
} }

View file

@ -517,7 +517,7 @@ static void drm_bo_base_deref_locked(struct drm_file * file_priv,
drm_bo_usage_deref_locked(&bo); drm_bo_usage_deref_locked(&bo);
} }
static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
{ {
struct drm_buffer_object *tmp_bo = *bo; struct drm_buffer_object *tmp_bo = *bo;
struct drm_device *dev = tmp_bo->dev; struct drm_device *dev = tmp_bo->dev;
@ -530,6 +530,7 @@ static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
} }
EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
/* /*
* Note. The caller has to register (if applicable) * Note. The caller has to register (if applicable)
@ -1672,10 +1673,10 @@ int drm_buffer_object_create(struct drm_device *dev,
drm_bo_usage_deref_unlocked(&bo); drm_bo_usage_deref_unlocked(&bo);
return ret; return ret;
} }
EXPORT_SYMBOL(drm_buffer_object_create);
static int drm_bo_add_user_object(struct drm_file *file_priv, int drm_bo_add_user_object(struct drm_file *file_priv,
struct drm_buffer_object *bo, struct drm_buffer_object *bo, int shareable)
int shareable)
{ {
struct drm_device *dev = file_priv->head->dev; struct drm_device *dev = file_priv->head->dev;
int ret; int ret;
@ -1694,6 +1695,7 @@ static int drm_bo_add_user_object(struct drm_file *file_priv,
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
EXPORT_SYMBOL(drm_bo_add_user_object);
static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv)
{ {

View file

@ -128,6 +128,7 @@ int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem,
*virtual = addr; *virtual = addr;
return 0; return 0;
} }
EXPORT_SYMBOL(drm_mem_reg_ioremap);
/** /**
* \c Unmap mapping obtained using drm_bo_ioremap * \c Unmap mapping obtained using drm_bo_ioremap

View file

@ -223,11 +223,17 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
#ifdef __alpha__ #ifdef __alpha__
map->offset += dev->hose->mem_space->start; map->offset += dev->hose->mem_space->start;
#endif #endif
/* Note: dev->agp->base may actually be 0 when the DRM /* In some cases (i810 driver), user space may have already
* is not in control of AGP space. But if user space is * added the AGP base itself, because dev->agp->base previously
* it should already have added the AGP base itself. * only got set during AGP enable. So, only add the base
* address if the map's offset isn't already within the
* aperture.
*/ */
map->offset += dev->agp->base; if (map->offset < dev->agp->base ||
map->offset > dev->agp->base +
dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
map->offset += dev->agp->base;
}
map->mtrr = dev->agp->agp_mtrr; /* for getmap */ map->mtrr = dev->agp->agp_mtrr; /* for getmap */
/* This assumes the DRM is in total control of AGP space. /* This assumes the DRM is in total control of AGP space.

View file

@ -573,6 +573,12 @@ static int drm_version(struct drm_device *dev, void *data,
*/ */
int drm_ioctl(struct inode *inode, struct file *filp, int drm_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{
return drm_unlocked_ioctl(filp, cmd, arg);
}
EXPORT_SYMBOL(drm_ioctl);
long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{ {
struct drm_file *file_priv = filp->private_data; struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->head->dev; struct drm_device *dev = file_priv->head->dev;
@ -650,7 +656,7 @@ err_i1:
DRM_DEBUG("ret = %x\n", retcode); DRM_DEBUG("ret = %x\n", retcode);
return retcode; return retcode;
} }
EXPORT_SYMBOL(drm_ioctl); EXPORT_SYMBOL(drm_unlocked_ioctl);
drm_local_map_t *drm_getsarea(struct drm_device *dev) drm_local_map_t *drm_getsarea(struct drm_device *dev)
{ {

View file

@ -520,9 +520,10 @@ void drm_fence_manager_init(struct drm_device * dev)
struct drm_fence_class_manager *class; struct drm_fence_class_manager *class;
struct drm_fence_driver *fed = dev->driver->fence_driver; struct drm_fence_driver *fed = dev->driver->fence_driver;
int i; int i;
unsigned long flags;
rwlock_init(&fm->lock); rwlock_init(&fm->lock);
write_lock(&fm->lock); write_lock_irqsave(&fm->lock, flags);
fm->initialized = 0; fm->initialized = 0;
if (!fed) if (!fed)
goto out_unlock; goto out_unlock;
@ -541,7 +542,7 @@ void drm_fence_manager_init(struct drm_device * dev)
atomic_set(&fm->count, 0); atomic_set(&fm->count, 0);
out_unlock: out_unlock:
write_unlock(&fm->lock); write_unlock_irqrestore(&fm->lock, flags);
} }
void drm_fence_manager_takedown(struct drm_device * dev) void drm_fence_manager_takedown(struct drm_device * dev)
@ -597,7 +598,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
* usage > 0. No need to lock dev->struct_mutex; * usage > 0. No need to lock dev->struct_mutex;
*/ */
atomic_inc(&fence->usage);
arg->handle = fence->base.hash.key; arg->handle = fence->base.hash.key;
read_lock_irqsave(&fm->lock, flags); read_lock_irqsave(&fm->lock, flags);
@ -830,7 +830,7 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file
DRM_FENCE_FLAG_SHAREABLE); DRM_FENCE_FLAG_SHAREABLE);
if (ret) if (ret)
return ret; return ret;
atomic_inc(&fence->usage);
arg->handle = fence->base.hash.key; arg->handle = fence->base.hash.key;
read_lock_irqsave(&fm->lock, flags); read_lock_irqsave(&fm->lock, flags);

View file

@ -80,7 +80,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
* \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
* before and after the installation. * before and after the installation.
*/ */
static int drm_irq_install(struct drm_device * dev) int drm_irq_install(struct drm_device * dev)
{ {
int ret; int ret;
unsigned long sh_flags = 0; unsigned long sh_flags = 0;
@ -140,6 +140,7 @@ static int drm_irq_install(struct drm_device * dev)
return 0; return 0;
} }
EXPORT_SYMBOL(drm_irq_install);
/** /**
* Uninstall the IRQ handler. * Uninstall the IRQ handler.

View file

@ -483,6 +483,17 @@ extern int drm_bo_mem_space(struct drm_buffer_object * bo,
struct drm_bo_mem_reg * mem, int no_wait); struct drm_bo_mem_reg * mem, int no_wait);
extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
int no_wait, int move_unfenced); int no_wait, int move_unfenced);
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
enum drm_bo_type type, uint64_t mask,
uint32_t hint, uint32_t page_alignment,
unsigned long buffer_start,
struct drm_buffer_object **bo);
extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
unsigned long p_offset, unsigned long p_size);
extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
extern int drm_bo_add_user_object(struct drm_file *file_priv,
struct drm_buffer_object *bo, int sharable);
extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
/* /*
* Buffer object memory move helpers. * Buffer object memory move helpers.
@ -502,6 +513,11 @@ extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
uint32_t fence_flags, uint32_t fence_flags,
struct drm_bo_mem_reg * new_mem); struct drm_bo_mem_reg * new_mem);
extern int drm_mem_reg_ioremap(struct drm_device *dev,
struct drm_bo_mem_reg *mem, void **virtual);
extern void drm_mem_reg_iounmap(struct drm_device *dev,
struct drm_bo_mem_reg *mem, void *virtual);
#ifdef CONFIG_DEBUG_MUTEXES #ifdef CONFIG_DEBUG_MUTEXES
#define DRM_ASSERT_LOCKED(_mutex) \ #define DRM_ASSERT_LOCKED(_mutex) \
BUG_ON(!mutex_is_locked(_mutex) || \ BUG_ON(!mutex_is_locked(_mutex) || \

View file

@ -102,13 +102,8 @@ typedef enum _drm_i810_init_func {
/* This is the init structure after v1.2 */ /* This is the init structure after v1.2 */
typedef struct _drm_i810_init { typedef struct _drm_i810_init {
drm_i810_init_func_t func; drm_i810_init_func_t func;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int ring_map_idx;
int buffer_map_idx;
#else
unsigned int mmio_offset; unsigned int mmio_offset;
unsigned int buffers_offset; unsigned int buffers_offset;
#endif
int sarea_priv_offset; int sarea_priv_offset;
unsigned int ring_start; unsigned int ring_start;
unsigned int ring_end; unsigned int ring_end;

1
linux-core/nouveau_dma.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_dma.c

1
linux-core/nouveau_dma.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_dma.h

View file

@ -69,7 +69,7 @@ nouveau_sgdma_clear(struct drm_ttm_backend *be)
if (nvbe->is_bound) if (nvbe->is_bound)
be->func->unbind(be); be->func->unbind(be);
for (d = 0; d < nvbe->pages_populated; d--) { for (d = 0; d < nvbe->pages_populated; d++) {
pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d], pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d],
NV_CTXDMA_PAGE_SIZE, NV_CTXDMA_PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
@ -211,7 +211,7 @@ nouveau_sgdma_init(struct drm_device *dev)
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
} }
if ((ret = nouveau_gpuobj_new(dev, -1, obj_size, 16, if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
NVOBJ_FLAG_ALLOW_NO_REFS | NVOBJ_FLAG_ALLOW_NO_REFS |
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &gpuobj))) { NVOBJ_FLAG_ZERO_FREE, &gpuobj))) {
@ -316,3 +316,20 @@ nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev)
{ {
} }
int
nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
int pte;
pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
if (dev_priv->card_type < NV_50) {
*page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
return 0;
}
DRM_ERROR("Unimplemented on NV50\n");
return -EINVAL;
}

View file

@ -89,24 +89,6 @@
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
#endif #endif
#define XFREE86_VERSION(major,minor,patch,snap) \
((major << 16) | (minor << 8) | patch)
#ifndef CONFIG_XFREE86_VERSION
#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4,1,0,0)
#endif
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
#define DRM_PROC_DEVICES "/proc/devices"
#define DRM_PROC_MISC "/proc/misc"
#define DRM_PROC_DRM "/proc/drm"
#define DRM_DEV_DRM "/dev/drm"
#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
#define DRM_DEV_UID 0
#define DRM_DEV_GID 0
#endif
#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
#ifdef __OpenBSD__ #ifdef __OpenBSD__
#define DRM_MAJOR 81 #define DRM_MAJOR 81
#endif #endif
@ -114,7 +96,7 @@
#define DRM_MAJOR 226 #define DRM_MAJOR 226
#endif #endif
#define DRM_MAX_MINOR 15 #define DRM_MAX_MINOR 15
#endif
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */ #define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */

View file

@ -482,9 +482,6 @@
0x10DE 0x009E NV40 "NVidia 0x009E" 0x10DE 0x009E NV40 "NVidia 0x009E"
[nouveau] [nouveau]
0x10de 0x0008 NV_03 "EDGE 3D"
0x10de 0x0009 NV_03 "EDGE 3D"
0x10de 0x0010 NV_03 "Mutara V08"
0x10de 0x0020 NV_04 "RIVA TNT" 0x10de 0x0020 NV_04 "RIVA TNT"
0x10de 0x0028 NV_04 "RIVA TNT2/TNT2 Pro" 0x10de 0x0028 NV_04 "RIVA TNT2/TNT2 Pro"
0x10de 0x0029 NV_04 "RIVA TNT2 Ultra" 0x10de 0x0029 NV_04 "RIVA TNT2 Ultra"
@ -510,8 +507,9 @@
0x10de 0x0091 NV_40 "GeForce 7800 GTX" 0x10de 0x0091 NV_40 "GeForce 7800 GTX"
0x10de 0x0092 NV_40 "GeForce 7800 GT" 0x10de 0x0092 NV_40 "GeForce 7800 GT"
0x10de 0x0093 NV_40 "GeForce 7800 GS" 0x10de 0x0093 NV_40 "GeForce 7800 GS"
0x10de 0x0095 NV_40 "GeForce 7800 SLI"
0x10de 0x0098 NV_40 "GeForce Go 7800" 0x10de 0x0098 NV_40 "GeForce Go 7800"
0x10de 0x0099 NV_40 "GE Force Go 7800 GTX" 0x10de 0x0099 NV_40 "GeForce Go 7800 GTX"
0x10de 0x009d NV_40 "Quadro FX4500" 0x10de 0x009d NV_40 "Quadro FX4500"
0x10de 0x00a0 NV_04 "Aladdin TNT2" 0x10de 0x00a0 NV_04 "Aladdin TNT2"
0x10de 0x00c0 NV_40 "GeForce 6800 GS" 0x10de 0x00c0 NV_40 "GeForce 6800 GS"
@ -547,13 +545,16 @@
0x10de 0x0113 NV_11 "Quadro2 MXR/EX/Go" 0x10de 0x0113 NV_11 "Quadro2 MXR/EX/Go"
0x10de 0x0140 NV_40 "GeForce 6600 GT" 0x10de 0x0140 NV_40 "GeForce 6600 GT"
0x10de 0x0141 NV_40 "GeForce 6600" 0x10de 0x0141 NV_40 "GeForce 6600"
0x10de 0x0142 NV_40 "GeForce 6600 PCIe" 0x10de 0x0142 NV_40 "GeForce 6600 LE"
0x10de 0x0143 NV_40 "GeForce 6600 VE"
0x10de 0x0144 NV_40 "GeForce Go 6600" 0x10de 0x0144 NV_40 "GeForce Go 6600"
0x10de 0x0145 NV_40 "GeForce 6610 XL" 0x10de 0x0145 NV_40 "GeForce 6610 XL"
0x10de 0x0146 NV_40 "Geforce Go 6600TE/6200TE" 0x10de 0x0146 NV_40 "Geforce Go 6600TE/6200TE"
0x10de 0x0147 NV_40 "GeForce 6700 XL"
0x10de 0x0148 NV_40 "GeForce Go 6600" 0x10de 0x0148 NV_40 "GeForce Go 6600"
0x10de 0x0149 NV_40 "GeForce Go 6600 GT" 0x10de 0x0149 NV_40 "GeForce Go 6600 GT"
0x10de 0x014a NV_40 "Quadro NVS 440" 0x10de 0x014a NV_40 "Quadro NVS 440"
0x10de 0x014c NV_40 "Quadro FX 550"
0x10de 0x014d NV_17 "Quadro FX 550" 0x10de 0x014d NV_17 "Quadro FX 550"
0x10de 0x014e NV_40 "Quadro FX 540" 0x10de 0x014e NV_40 "Quadro FX 540"
0x10de 0x014f NV_40 "GeForce 6200" 0x10de 0x014f NV_40 "GeForce 6200"
@ -561,6 +562,7 @@
0x10de 0x0151 NV_15 "GeForce2 Ti" 0x10de 0x0151 NV_15 "GeForce2 Ti"
0x10de 0x0152 NV_15 "GeForce2 Ultra, Bladerunner" 0x10de 0x0152 NV_15 "GeForce2 Ultra, Bladerunner"
0x10de 0x0153 NV_15 "Quadro2 Pro" 0x10de 0x0153 NV_15 "Quadro2 Pro"
0x10de 0x0160 NV_44 "GeForce 6500"
0x10de 0x0161 NV_44 "GeForce 6200 TurboCache(TM)" 0x10de 0x0161 NV_44 "GeForce 6200 TurboCache(TM)"
0x10de 0x0162 NV_44 "GeForce 6200 SE TurboCache (TM)" 0x10de 0x0162 NV_44 "GeForce 6200 SE TurboCache (TM)"
0x10de 0x0163 NV_44 "GeForce 6200 LE" 0x10de 0x0163 NV_44 "GeForce 6200 LE"
@ -569,6 +571,7 @@
0x10de 0x0166 NV_44 "GeForce Go 6400" 0x10de 0x0166 NV_44 "GeForce Go 6400"
0x10de 0x0167 NV_44 "GeForce Go 6200 TurboCache" 0x10de 0x0167 NV_44 "GeForce Go 6200 TurboCache"
0x10de 0x0168 NV_44 "GeForce Go 6200 TurboCache" 0x10de 0x0168 NV_44 "GeForce Go 6200 TurboCache"
0x10de 0x0169 NV_44 "GeForce 6250"
0x10de 0x0170 NV_17 "GeForce4 MX 460" 0x10de 0x0170 NV_17 "GeForce4 MX 460"
0x10de 0x0171 NV_17 "GeForce4 MX 440" 0x10de 0x0171 NV_17 "GeForce4 MX 440"
0x10de 0x0172 NV_17 "GeForce4 MX 420" 0x10de 0x0172 NV_17 "GeForce4 MX 420"
@ -601,11 +604,16 @@
0x10de 0x019e NV_50 "Quadro FX 4600" 0x10de 0x019e NV_50 "Quadro FX 4600"
0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics" 0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics"
0x10de 0x01d1 NV_44 "GeForce 7300 LE" 0x10de 0x01d1 NV_44 "GeForce 7300 LE"
0x10de 0x01d3 NV_44 "Geforce 7300 SE"
0x10de 0x01d6 NV_44 "GeForce Go 7200" 0x10de 0x01d6 NV_44 "GeForce Go 7200"
0x10de 0x01d7 NV_44 "Quadro NVS 110M / GeForce Go 7300" 0x10de 0x01d7 NV_44 "Quadro NVS 110M / GeForce Go 7300"
0x10de 0x01d8 NV_44 "GeForce Go 7400" 0x10de 0x01d8 NV_44 "GeForce Go 7400"
0x10de 0x01d9 NV_44 "GeForce Go 7400 GS"
0x10de 0x01da NV_44 "Quadro NVS 110M" 0x10de 0x01da NV_44 "Quadro NVS 110M"
0x10de 0x01db NV_44 "Quadro NVS 120M"
0x10de 0x01dc NV_44 "Quadro FX 350M" 0x10de 0x01dc NV_44 "Quadro FX 350M"
0x10de 0x01dd NV_44 "GeForce 7500 LE"
0x10de 0x01de NV_44 "Quadro FX 350"
0x10de 0x01df NV_44 "GeForce 7300 GS" 0x10de 0x01df NV_44 "GeForce 7300 GS"
0x10de 0x01f0 NV_17|NV_NFORCE2 "GeForce4 MX - nForce GPU" 0x10de 0x01f0 NV_17|NV_NFORCE2 "GeForce4 MX - nForce GPU"
0x10de 0x0200 NV_20 "GeForce3" 0x10de 0x0200 NV_20 "GeForce3"
@ -617,9 +625,12 @@
0x10de 0x0215 NV_40 "GeForce 6800 GT" 0x10de 0x0215 NV_40 "GeForce 6800 GT"
0x10de 0x0218 NV_40 "GeForce 6800 XT" 0x10de 0x0218 NV_40 "GeForce 6800 XT"
0x10de 0x0221 NV_44 "GeForce 6200" 0x10de 0x0221 NV_44 "GeForce 6200"
0x10de 0x0222 NV_44 "GeForce 6200 A-LE"
0x10de 0x0240 NV_44 "GeForce 6150" 0x10de 0x0240 NV_44 "GeForce 6150"
0x10de 0x0241 NV_44 "GeForce 6150 LE"
0x10de 0x0242 NV_44 "GeForce 6100" 0x10de 0x0242 NV_44 "GeForce 6100"
0x10de 0x0244 NV_44 "GeForce 6150 Go" 0x10de 0x0244 NV_44 "GeForce Go 6150"
0x10de 0x0247 NV_44 "GeForce Go 6100"
0x10de 0x0250 NV_25 "GeForce4 Ti 4600" 0x10de 0x0250 NV_25 "GeForce4 Ti 4600"
0x10de 0x0251 NV_25 "GeForce4 Ti 4400" 0x10de 0x0251 NV_25 "GeForce4 Ti 4400"
0x10de 0x0252 NV_25 "GeForce4 Ti" 0x10de 0x0252 NV_25 "GeForce4 Ti"
@ -700,7 +711,15 @@
0x10de 0x0391 NV_40 "GeForce 7600 GT" 0x10de 0x0391 NV_40 "GeForce 7600 GT"
0x10de 0x0392 NV_40 "GeForce 7600 GS" 0x10de 0x0392 NV_40 "GeForce 7600 GS"
0x10de 0x0393 NV_40 "GeForce 7300 GT" 0x10de 0x0393 NV_40 "GeForce 7300 GT"
0x10de 0x0394 NV_40 "GeForce 7600 LE"
0x10de 0x0395 NV_40 "GeForce 7300 GT"
0x10de 0x0397 NV_40 "GeForce Go 7700"
0x10de 0x0398 NV_40 "GeForce Go 7600" 0x10de 0x0398 NV_40 "GeForce Go 7600"
0x10de 0x0399 NV_40 "GeForce Go 7600 GT"
0x10de 0x039a NV_40 "Quadro NVS 300M"
0x10de 0x039b NV_40 "GeForce Go 7900 SE"
0x10de 0x039c NV_40 "Quadro FX 550M"
0x10de 0x039e NV_40 "Quadro FX 560"
0x10de 0x03d0 NV_44 "GeForce 6100 nForce 430" 0x10de 0x03d0 NV_44 "GeForce 6100 nForce 430"
0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405" 0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405"
0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400" 0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400"
@ -710,10 +729,6 @@
0x10de 0x0421 NV_50 "GeForce 8500 GT" 0x10de 0x0421 NV_50 "GeForce 8500 GT"
0x10de 0x0422 NV_50 "GeForce 8400 GS" 0x10de 0x0422 NV_50 "GeForce 8400 GS"
0x10de 0x0423 NV_50 "GeForce 8300 GS" 0x10de 0x0423 NV_50 "GeForce 8300 GS"
0x12d2 0x0008 NV_03 "NV1"
0x12d2 0x0009 NV_03 "DAC64"
0x12d2 0x0018 NV_03 "Riva128"
0x12d2 0x0019 NV_03 "Riva128ZX"
0x12d2 0x0020 NV_04 "TNT" 0x12d2 0x0020 NV_04 "TNT"
0x12d2 0x0028 NV_04 "TNT2" 0x12d2 0x0028 NV_04 "TNT2"
0x12d2 0x0029 NV_04 "UTNT2" 0x12d2 0x0029 NV_04 "UTNT2"

View file

@ -546,9 +546,15 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
if (dev_priv->use_mi_batchbuffer_start) { if (dev_priv->use_mi_batchbuffer_start) {
BEGIN_LP_RING(2); BEGIN_LP_RING(2);
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); if (IS_I965G(dev)) {
OUT_RING(batch->start | MI_BATCH_NON_SECURE); OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
} else {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} else { } else {
BEGIN_LP_RING(4); BEGIN_LP_RING(4);
OUT_RING(MI_BATCH_BUFFER); OUT_RING(MI_BATCH_BUFFER);

View file

@ -272,12 +272,25 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define MI_NO_WRITE_FLUSH (1 << 2) #define MI_NO_WRITE_FLUSH (1 << 2)
#define MI_READ_FLUSH (1 << 0) #define MI_READ_FLUSH (1 << 0)
#define MI_EXE_FLUSH (1 << 1) #define MI_EXE_FLUSH (1 << 1)
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
/* Packet to load a register value from the ring/batch command stream:
*/
#define CMD_MI_LOAD_REGISTER_IMM ((0x22 << 23)|0x1)
#define BB1_START_ADDR_MASK (~0x7) #define BB1_START_ADDR_MASK (~0x7)
#define BB1_PROTECTED (1<<0) #define BB1_PROTECTED (1<<0)
#define BB1_UNPROTECTED (0<<0) #define BB1_UNPROTECTED (0<<0)
#define BB2_END_ADDR_MASK (~0x7) #define BB2_END_ADDR_MASK (~0x7)
/* Interrupt bits:
*/
#define USER_INT_FLAG (1<<1)
#define VSYNC_PIPEB_FLAG (1<<5)
#define VSYNC_PIPEA_FLAG (1<<7)
#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
#define I915REG_HWSTAM 0x02098 #define I915REG_HWSTAM 0x02098
#define I915REG_INT_IDENTITY_R 0x020a4 #define I915REG_INT_IDENTITY_R 0x020a4
#define I915REG_INT_MASK_R 0x020a8 #define I915REG_INT_MASK_R 0x020a8
@ -315,6 +328,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define NOPID 0x2094 #define NOPID 0x2094
#define LP_RING 0x2030 #define LP_RING 0x2030
#define HP_RING 0x2040 #define HP_RING 0x2040
/* The binner has its own ring buffer:
*/
#define HWB_RING 0x2400
#define RING_TAIL 0x00 #define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8 #define TAIL_ADDR 0x001FFFF8
#define RING_HEAD 0x04 #define RING_HEAD 0x04
@ -333,11 +350,105 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define RING_VALID 0x00000001 #define RING_VALID 0x00000001
#define RING_INVALID 0x00000000 #define RING_INVALID 0x00000000
/* Instruction parser error reg:
*/
#define IPEIR 0x2088
/* Scratch pad debug 0 reg:
*/
#define SCPD0 0x209c
/* Error status reg:
*/
#define ESR 0x20b8
/* Secondary DMA fetch address debug reg:
*/
#define DMA_FADD_S 0x20d4
/* Cache mode 0 reg.
* - Manipulating render cache behaviour is central
* to the concept of zone rendering, tuning this reg can help avoid
* unnecessary render cache reads and even writes (for z/stencil)
* at beginning and end of scene.
*
* - To change a bit, write to this reg with a mask bit set and the
* bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
*/
#define Cache_Mode_0 0x2120
#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_DEPTH_EVICT_DISABLE (1<<4)
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
/* Graphics flush control. A CPU write flushes the GWB of all writes.
* The data is discarded.
*/
#define GFX_FLSH_CNTL 0x2170
/* Binner control. Defines the location of the bin pointer list:
*/
#define BINCTL 0x2420
#define BC_MASK (1 << 9)
/* Binned scene info.
*/
#define BINSCENE 0x2428
#define BS_OP_LOAD (1 << 8)
#define BS_MASK (1 << 22)
/* Bin command parser debug reg:
*/
#define BCPD 0x2480
/* Bin memory control debug reg:
*/
#define BMCD 0x2484
/* Bin data cache debug reg:
*/
#define BDCD 0x2488
/* Binner pointer cache debug reg:
*/
#define BPCD 0x248c
/* Binner scratch pad debug reg:
*/
#define BINSKPD 0x24f0
/* HWB scratch pad debug reg:
*/
#define HWBSKPD 0x24f4
/* Binner memory pool reg:
*/
#define BMP_BUFFER 0x2430
#define BMP_PAGE_SIZE_4K (0 << 10)
#define BMP_BUFFER_SIZE_SHIFT 1
#define BMP_ENABLE (1 << 0)
/* Get/put memory from the binner memory pool:
*/
#define BMP_GET 0x2438
#define BMP_PUT 0x2440
#define BMP_OFFSET_SHIFT 5
/* 3D state packets:
*/
#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1) #define SC_UPDATE_SCISSOR (0x1<<1)
#define SC_ENABLE_MASK (0x1<<0) #define SC_ENABLE_MASK (0x1<<0)
#define SC_ENABLE (0x1<<0) #define SC_ENABLE (0x1<<0)
#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) #define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
#define SCI_YMIN_MASK (0xffff<<16) #define SCI_YMIN_MASK (0xffff<<16)
#define SCI_XMIN_MASK (0xffff<<0) #define SCI_XMIN_MASK (0xffff<<0)
@ -364,6 +475,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define MI_BATCH_BUFFER_END (0xA<<23) #define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_WAIT_FOR_EVENT ((0x3<<23)) #define MI_WAIT_FOR_EVENT ((0x3<<23))
#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
@ -376,6 +489,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define DISPLAY_PLANE_A (0<<20) #define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20) #define DISPLAY_PLANE_B (1<<20)
/* Define the region of interest for the binner:
*/
#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define BREADCRUMB_BITS 31 #define BREADCRUMB_BITS 31

177
shared-core/nouveau_dma.c Normal file
View file

@ -0,0 +1,177 @@
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#define SKIPS 8
int
nouveau_dma_channel_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_drm_channel *dchan = &dev_priv->channel;
struct nouveau_gpuobj *gpuobj = NULL;
struct mem_block *pushbuf;
int grclass, ret, i;
DRM_DEBUG("\n");
pushbuf = nouveau_mem_alloc(dev, 0, 0x8000,
NOUVEAU_MEM_FB | NOUVEAU_MEM_MAPPED,
(struct drm_file *)-2);
if (!pushbuf) {
DRM_ERROR("Failed to allocate DMA push buffer\n");
return -ENOMEM;
}
/* Allocate channel */
ret = nouveau_fifo_alloc(dev, &dchan->chan, (struct drm_file *)-2,
pushbuf, NvDmaFB, NvDmaTT);
if (ret) {
DRM_ERROR("Error allocating GPU channel: %d\n", ret);
return ret;
}
DRM_DEBUG("Using FIFO channel %d\n", dchan->chan->id);
/* Map push buffer */
drm_core_ioremap(dchan->chan->pushbuf_mem->map, dev);
if (!dchan->chan->pushbuf_mem->map->handle) {
DRM_ERROR("Failed to ioremap push buffer\n");
return -EINVAL;
}
dchan->pushbuf = (void*)dchan->chan->pushbuf_mem->map->handle;
/* Initialise DMA vars */
dchan->max = (dchan->chan->pushbuf_mem->size >> 2) - 2;
dchan->put = dchan->chan->pushbuf_base >> 2;
dchan->cur = dchan->put;
dchan->free = dchan->max - dchan->cur;
/* Insert NOPS for SKIPS */
dchan->free -= SKIPS;
dchan->push_free = SKIPS;
for (i=0; i<SKIPS; i++)
OUT_RING(0);
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier */
if ((ret = nouveau_notifier_alloc(dchan->chan, NvNotify0, 1,
&dchan->notify0_offset))) {
DRM_ERROR("Error allocating NvNotify0: %d\n", ret);
return ret;
}
/* We use NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
if (dev_priv->card_type < NV_50) grclass = NV_MEMORY_TO_MEMORY_FORMAT;
else grclass = NV50_MEMORY_TO_MEMORY_FORMAT;
if ((ret = nouveau_gpuobj_gr_new(dchan->chan, grclass, &gpuobj))) {
DRM_ERROR("Error creating NvM2MF: %d\n", ret);
return ret;
}
if ((ret = nouveau_gpuobj_ref_add(dev, dchan->chan, NvM2MF,
gpuobj, NULL))) {
DRM_ERROR("Error referencing NvM2MF: %d\n", ret);
return ret;
}
dchan->m2mf_dma_source = NvDmaFB;
dchan->m2mf_dma_destin = NvDmaFB;
BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
OUT_RING (NvM2MF);
BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY, 1);
OUT_RING (NvNotify0);
BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
OUT_RING (dchan->m2mf_dma_source);
OUT_RING (dchan->m2mf_dma_destin);
FIRE_RING();
return 0;
}
void
nouveau_dma_channel_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_drm_channel *dchan = &dev_priv->channel;
DRM_DEBUG("\n");
if (dchan->chan) {
nouveau_fifo_free(dchan->chan);
dchan->chan = NULL;
}
}
#define RING_SKIPS 8
#define READ_GET() ((NV_READ(NV03_FIFO_REGS_DMAGET(dchan->chan->id)) - \
dchan->chan->pushbuf_base) >> 2)
#define WRITE_PUT(val) do { \
NV_WRITE(NV03_FIFO_REGS_DMAPUT(dchan->chan->id), \
((val) << 2) + dchan->chan->pushbuf_base); \
} while(0)
int
nouveau_dma_wait(struct drm_device *dev, int size)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_drm_channel *dchan = &dev_priv->channel;
uint32_t get;
while (dchan->free < size) {
get = READ_GET();
if (dchan->put >= get) {
dchan->free = dchan->max - dchan->cur;
if (dchan->free < size) {
dchan->push_free = 1;
OUT_RING(0x20000000|dchan->chan->pushbuf_base);
if (get <= RING_SKIPS) {
/*corner case - will be idle*/
if (dchan->put <= RING_SKIPS)
WRITE_PUT(RING_SKIPS + 1);
do {
get = READ_GET();
} while (get <= RING_SKIPS);
}
WRITE_PUT(RING_SKIPS);
dchan->cur = dchan->put = RING_SKIPS;
dchan->free = get - (RING_SKIPS + 1);
}
} else {
dchan->free = get - dchan->cur - 1;
}
}
return 0;
}

98
shared-core/nouveau_dma.h Normal file
View file

@ -0,0 +1,98 @@
/*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __NOUVEAU_DMA_H__
#define __NOUVEAU_DMA_H__
typedef enum {
NvSubM2MF = 0,
} nouveau_subchannel_id_t;
typedef enum {
NvM2MF = 0x80039001,
NvDmaFB = 0x8003d001,
NvDmaTT = 0x8003d002,
NvNotify0 = 0x8003d003
} nouveau_object_handle_t;
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000
#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050
#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100
#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000
#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001
#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY 0x00000180
#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE 0x00000184
#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200
#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c
#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238
#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c
#define BEGIN_RING(subc, mthd, cnt) do { \
int push_size = (cnt) + 1; \
if (dchan->push_free) { \
DRM_ERROR("prior packet incomplete: %d\n", dchan->push_free); \
break; \
} \
if (dchan->free < push_size) { \
if (nouveau_dma_wait(dev, push_size)) { \
DRM_ERROR("FIFO timeout\n"); \
break; \
} \
} \
dchan->free -= push_size; \
dchan->push_free = push_size; \
OUT_RING(((cnt)<<18) | ((subc)<<15) | mthd); \
} while(0)
#define OUT_RING(data) do { \
if (dchan->push_free == 0) { \
DRM_ERROR("no space left in packet\n"); \
break; \
} \
dchan->pushbuf[dchan->cur++] = (data); \
dchan->push_free--; \
} while(0)
#define FIRE_RING() do { \
if (dchan->push_free) { \
DRM_ERROR("packet incomplete: %d\n", dchan->push_free); \
break; \
} \
if (dchan->cur != dchan->put) { \
DRM_MEMORYBARRIER(); \
dchan->put = dchan->cur; \
NV_WRITE(NV03_FIFO_REGS_DMAPUT(dchan->chan->id), \
(dchan->put<<2)); \
} \
} while(0)
#endif

View file

@ -25,9 +25,9 @@
#ifndef __NOUVEAU_DRM_H__ #ifndef __NOUVEAU_DRM_H__
#define __NOUVEAU_DRM_H__ #define __NOUVEAU_DRM_H__
#define NOUVEAU_DRM_HEADER_PATCHLEVEL 9 #define NOUVEAU_DRM_HEADER_PATCHLEVEL 10
struct drm_nouveau_fifo_alloc { struct drm_nouveau_channel_alloc {
uint32_t fb_ctxdma_handle; uint32_t fb_ctxdma_handle;
uint32_t tt_ctxdma_handle; uint32_t tt_ctxdma_handle;
@ -44,6 +44,10 @@ struct drm_nouveau_fifo_alloc {
int notifier_size; int notifier_size;
}; };
struct drm_nouveau_channel_free {
int channel;
};
struct drm_nouveau_grobj_alloc { struct drm_nouveau_grobj_alloc {
int channel; int channel;
uint32_t handle; uint32_t handle;
@ -53,7 +57,7 @@ struct drm_nouveau_grobj_alloc {
#define NOUVEAU_MEM_ACCESS_RO 1 #define NOUVEAU_MEM_ACCESS_RO 1
#define NOUVEAU_MEM_ACCESS_WO 2 #define NOUVEAU_MEM_ACCESS_WO 2
#define NOUVEAU_MEM_ACCESS_RW 3 #define NOUVEAU_MEM_ACCESS_RW 3
struct drm_nouveau_notifier_alloc { struct drm_nouveau_notifierobj_alloc {
int channel; int channel;
uint32_t handle; uint32_t handle;
int count; int count;
@ -61,6 +65,11 @@ struct drm_nouveau_notifier_alloc {
uint32_t offset; uint32_t offset;
}; };
struct drm_nouveau_gpuobj_free {
int channel;
uint32_t handle;
};
#define NOUVEAU_MEM_FB 0x00000001 #define NOUVEAU_MEM_FB 0x00000001
#define NOUVEAU_MEM_AGP 0x00000002 #define NOUVEAU_MEM_AGP 0x00000002
#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004 #define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004
@ -95,6 +104,7 @@ struct drm_nouveau_mem_free {
#define NOUVEAU_GETPARAM_FB_SIZE 8 #define NOUVEAU_GETPARAM_FB_SIZE 8
#define NOUVEAU_GETPARAM_AGP_SIZE 9 #define NOUVEAU_GETPARAM_AGP_SIZE 9
#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 #define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
struct drm_nouveau_getparam { struct drm_nouveau_getparam {
uint64_t param; uint64_t param;
uint64_t value; uint64_t value;
@ -109,8 +119,6 @@ struct drm_nouveau_setparam {
enum nouveau_card_type { enum nouveau_card_type {
NV_UNKNOWN =0, NV_UNKNOWN =0,
NV_01 =1,
NV_03 =3,
NV_04 =4, NV_04 =4,
NV_05 =5, NV_05 =5,
NV_10 =10, NV_10 =10,
@ -141,13 +149,16 @@ struct drm_nouveau_sarea {
unsigned int nbox; unsigned int nbox;
}; };
#define DRM_NOUVEAU_FIFO_ALLOC 0x00 #define DRM_NOUVEAU_CARD_INIT 0x00
#define DRM_NOUVEAU_GROBJ_ALLOC 0x01 #define DRM_NOUVEAU_GETPARAM 0x01
#define DRM_NOUVEAU_NOTIFIER_ALLOC 0x02 #define DRM_NOUVEAU_SETPARAM 0x02
#define DRM_NOUVEAU_MEM_ALLOC 0x03 #define DRM_NOUVEAU_CHANNEL_ALLOC 0x03
#define DRM_NOUVEAU_MEM_FREE 0x04 #define DRM_NOUVEAU_CHANNEL_FREE 0x04
#define DRM_NOUVEAU_GETPARAM 0x05 #define DRM_NOUVEAU_GROBJ_ALLOC 0x05
#define DRM_NOUVEAU_SETPARAM 0x06 #define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06
#define DRM_NOUVEAU_GPUOBJ_FREE 0x07
#define DRM_NOUVEAU_MEM_ALLOC 0x08
#define DRM_NOUVEAU_MEM_FREE 0x09
#endif /* __NOUVEAU_DRM_H__ */ #endif /* __NOUVEAU_DRM_H__ */

View file

@ -34,7 +34,7 @@
#define DRIVER_MAJOR 0 #define DRIVER_MAJOR 0
#define DRIVER_MINOR 0 #define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 9 #define DRIVER_PATCHLEVEL 10
#define NOUVEAU_FAMILY 0x0000FFFF #define NOUVEAU_FAMILY 0x0000FFFF
#define NOUVEAU_FLAGS 0xFFFF0000 #define NOUVEAU_FLAGS 0xFFFF0000
@ -67,8 +67,7 @@ enum nouveau_flags {
#define NVOBJ_FLAG_ZERO_FREE (1 << 2) #define NVOBJ_FLAG_ZERO_FREE (1 << 2)
#define NVOBJ_FLAG_FAKE (1 << 3) #define NVOBJ_FLAG_FAKE (1 << 3)
struct nouveau_gpuobj { struct nouveau_gpuobj {
struct nouveau_gpuobj *next; struct list_head list;
struct nouveau_gpuobj *prev;
int im_channel; int im_channel;
struct mem_block *im_pramin; struct mem_block *im_pramin;
@ -80,10 +79,13 @@ struct nouveau_gpuobj {
uint32_t engine; uint32_t engine;
uint32_t class; uint32_t class;
void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
void *priv;
}; };
struct nouveau_gpuobj_ref { struct nouveau_gpuobj_ref {
struct nouveau_gpuobj_ref *next; struct list_head list;
struct nouveau_gpuobj *gpuobj; struct nouveau_gpuobj *gpuobj;
uint32_t instance; uint32_t instance;
@ -92,8 +94,11 @@ struct nouveau_gpuobj_ref {
int handle; int handle;
}; };
struct nouveau_fifo struct nouveau_channel
{ {
struct drm_device *dev;
int id;
/* owner of this fifo */ /* owner of this fifo */
struct drm_file *file_priv; struct drm_file *file_priv;
/* mapping of the fifo itself */ /* mapping of the fifo itself */
@ -126,7 +131,23 @@ struct nouveau_fifo
struct nouveau_gpuobj_ref *ramin; /* Private instmem */ struct nouveau_gpuobj_ref *ramin; /* Private instmem */
struct mem_block *ramin_heap; /* Private PRAMIN heap */ struct mem_block *ramin_heap; /* Private PRAMIN heap */
struct nouveau_gpuobj_ref *ramht; /* Hash table */ struct nouveau_gpuobj_ref *ramht; /* Hash table */
struct nouveau_gpuobj_ref *ramht_refs; /* Objects referenced by RAMHT */ struct list_head ramht_refs; /* Objects referenced by RAMHT */
};
struct nouveau_drm_channel {
struct nouveau_channel *chan;
/* DMA state */
int max, put, cur, free;
int push_free;
volatile uint32_t *pushbuf;
/* Notifiers */
uint32_t notify0_offset;
/* Buffer moves */
uint32_t m2mf_dma_source;
uint32_t m2mf_dma_destin;
}; };
struct nouveau_config { struct nouveau_config {
@ -136,57 +157,64 @@ struct nouveau_config {
} cmdbuf; } cmdbuf;
}; };
struct nouveau_engine_func { struct nouveau_instmem_engine {
struct { void *priv;
void *priv;
int (*init)(struct drm_device *dev); int (*init)(struct drm_device *dev);
void (*takedown)(struct drm_device *dev); void (*takedown)(struct drm_device *dev);
int (*populate)(struct drm_device *, struct nouveau_gpuobj *, int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
uint32_t *size); uint32_t *size);
void (*clear)(struct drm_device *, struct nouveau_gpuobj *); void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
int (*bind)(struct drm_device *, struct nouveau_gpuobj *); int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
} instmem; };
struct { struct nouveau_mc_engine {
int (*init)(struct drm_device *dev); int (*init)(struct drm_device *dev);
void (*takedown)(struct drm_device *dev); void (*takedown)(struct drm_device *dev);
} mc; };
struct { struct nouveau_timer_engine {
int (*init)(struct drm_device *dev); int (*init)(struct drm_device *dev);
uint64_t (*read)(struct drm_device *dev); void (*takedown)(struct drm_device *dev);
void (*takedown)(struct drm_device *dev); uint64_t (*read)(struct drm_device *dev);
} timer; };
struct { struct nouveau_fb_engine {
int (*init)(struct drm_device *dev); int (*init)(struct drm_device *dev);
void (*takedown)(struct drm_device *dev); void (*takedown)(struct drm_device *dev);
} fb; };
struct { struct nouveau_fifo_engine {
int (*init)(struct drm_device *); void *priv;
void (*takedown)(struct drm_device *);
int (*create_context)(struct drm_device *, int channel); int (*init)(struct drm_device *);
void (*destroy_context)(struct drm_device *, int channel); void (*takedown)(struct drm_device *);
int (*load_context)(struct drm_device *, int channel);
int (*save_context)(struct drm_device *, int channel);
} graph;
struct { int (*create_context)(struct nouveau_channel *);
void *priv; void (*destroy_context)(struct nouveau_channel *);
int (*load_context)(struct nouveau_channel *);
int (*save_context)(struct nouveau_channel *);
};
int (*init)(struct drm_device *); struct nouveau_pgraph_engine {
void (*takedown)(struct drm_device *); int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
int (*create_context)(struct drm_device *, int channel); int (*create_context)(struct nouveau_channel *);
void (*destroy_context)(struct drm_device *, int channel); void (*destroy_context)(struct nouveau_channel *);
int (*load_context)(struct drm_device *, int channel); int (*load_context)(struct nouveau_channel *);
int (*save_context)(struct drm_device *, int channel); int (*save_context)(struct nouveau_channel *);
} fifo; };
struct nouveau_engine {
struct nouveau_instmem_engine instmem;
struct nouveau_mc_engine mc;
struct nouveau_timer_engine timer;
struct nouveau_fb_engine fb;
struct nouveau_pgraph_engine graph;
struct nouveau_fifo_engine fifo;
}; };
struct drm_nouveau_private { struct drm_nouveau_private {
@ -207,9 +235,10 @@ struct drm_nouveau_private {
drm_local_map_t *ramin; /* NV40 onwards */ drm_local_map_t *ramin; /* NV40 onwards */
int fifo_alloc_count; int fifo_alloc_count;
struct nouveau_fifo *fifos[NV_MAX_FIFO_NUMBER]; struct nouveau_channel *fifos[NV_MAX_FIFO_NUMBER];
struct nouveau_engine_func Engine; struct nouveau_engine Engine;
struct nouveau_drm_channel channel;
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
struct nouveau_gpuobj *ramht; struct nouveau_gpuobj *ramht;
@ -259,230 +288,277 @@ struct drm_nouveau_private {
struct nouveau_config config; struct nouveau_config config;
struct nouveau_gpuobj *gpuobj_all; struct list_head gpuobj_list;
}; };
#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \
struct drm_nouveau_private *nv = dev->dev_private; \
if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \
DRM_ERROR("called without init\n"); \
return -EINVAL; \
} \
} while(0)
#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id,cl,ch) do { \
struct drm_nouveau_private *nv = dev->dev_private; \
if (!nouveau_fifo_owner(dev, (cl), (id))) { \
DRM_ERROR("pid %d doesn't own channel %d\n", \
DRM_CURRENTPID, (id)); \
return -EPERM; \
} \
(ch) = nv->fifos[(id)]; \
} while(0)
/* nouveau_state.c */ /* nouveau_state.c */
extern void nouveau_preclose(struct drm_device * dev, extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
struct drm_file *file_priv); extern int nouveau_load(struct drm_device *, unsigned long flags);
extern int nouveau_load(struct drm_device *dev, unsigned long flags); extern int nouveau_firstopen(struct drm_device *);
extern int nouveau_firstopen(struct drm_device *dev); extern void nouveau_lastclose(struct drm_device *);
extern void nouveau_lastclose(struct drm_device *dev); extern int nouveau_unload(struct drm_device *);
extern int nouveau_unload(struct drm_device *dev); extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
extern int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv); struct drm_file *);
extern int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
extern void nouveau_wait_for_idle(struct drm_device *dev); struct drm_file *);
extern int nouveau_ioctl_card_init(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
struct drm_file *);
/* nouveau_mem.c */ /* nouveau_mem.c */
extern int nouveau_mem_init_heap(struct mem_block **, extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
uint64_t start, uint64_t size); uint64_t size);
extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
uint64_t size, int align2, uint64_t size, int align2,
struct drm_file *file_priv); struct drm_file *);
extern void nouveau_mem_takedown(struct mem_block **heap); extern void nouveau_mem_takedown(struct mem_block **heap);
extern void nouveau_mem_free_block(struct mem_block *); extern void nouveau_mem_free_block(struct mem_block *);
extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev); extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
extern void nouveau_mem_release(struct drm_file *file_priv, extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
struct mem_block *heap); extern int nouveau_ioctl_mem_alloc(struct drm_device *, void *data,
extern int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); struct drm_file *);
extern int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int nouveau_ioctl_mem_free(struct drm_device *, void *data,
extern struct mem_block* nouveau_mem_alloc(struct drm_device *dev, struct drm_file *);
extern struct mem_block* nouveau_mem_alloc(struct drm_device *,
int alignment, uint64_t size, int alignment, uint64_t size,
int flags, int flags, struct drm_file *);
struct drm_file *file_priv); extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*);
extern void nouveau_mem_free(struct drm_device* dev, struct mem_block*); extern int nouveau_mem_init(struct drm_device *);
extern int nouveau_mem_init(struct drm_device *dev); extern void nouveau_mem_close(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *dev);
/* nouveau_notifier.c */ /* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(struct drm_device *, int channel, extern int nouveau_notifier_init_channel(struct nouveau_channel *);
struct drm_file *file_priv); extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
extern void nouveau_notifier_takedown_channel(struct drm_device *, int channel); extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
extern int nouveau_notifier_alloc(struct drm_device *, int channel, int cout, uint32_t *offset);
uint32_t handle, int cout, uint32_t *offset); extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
extern int nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); struct drm_file *);
extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
struct drm_file *);
/* nouveau_fifo.c */ /* nouveau_fifo.c */
extern int nouveau_fifo_init(struct drm_device *dev); extern int nouveau_fifo_init(struct drm_device *);
extern int nouveau_fifo_number(struct drm_device *dev); extern int nouveau_fifo_number(struct drm_device *);
extern int nouveau_fifo_ctx_size(struct drm_device *dev); extern int nouveau_fifo_ctx_size(struct drm_device *);
extern void nouveau_fifo_cleanup(struct drm_device *dev, extern void nouveau_fifo_cleanup(struct drm_device *, struct drm_file *);
struct drm_file *file_priv); extern int nouveau_fifo_owner(struct drm_device *, struct drm_file *,
extern int nouveau_fifo_owner(struct drm_device *dev, int channel);
struct drm_file *file_priv, int channel); extern int nouveau_fifo_alloc(struct drm_device *dev,
extern void nouveau_fifo_free(struct drm_device *dev, int channel); struct nouveau_channel **chan,
struct drm_file *file_priv,
struct mem_block *pushbuf,
uint32_t fb_ctxdma, uint32_t tt_ctxdma);
extern void nouveau_fifo_free(struct nouveau_channel *);
/* nouveau_object.c */ /* nouveau_object.c */
extern void nouveau_gpuobj_takedown(struct drm_device *dev); extern int nouveau_gpuobj_early_init(struct drm_device *);
extern int nouveau_gpuobj_channel_init(struct drm_device *, int channel, extern int nouveau_gpuobj_init(struct drm_device *);
extern void nouveau_gpuobj_takedown(struct drm_device *);
extern void nouveau_gpuobj_late_takedown(struct drm_device *);
extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
uint32_t vram_h, uint32_t tt_h); uint32_t vram_h, uint32_t tt_h);
extern void nouveau_gpuobj_channel_takedown(struct drm_device *, int channel); extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
extern int nouveau_gpuobj_new(struct drm_device *, int channel, int size, int align, extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
uint32_t flags, struct nouveau_gpuobj **); int size, int align, uint32_t flags,
struct nouveau_gpuobj **);
extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **); extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
extern int nouveau_gpuobj_ref_add(struct drm_device *, int channel, uint32_t handle, extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
struct nouveau_gpuobj *, uint32_t handle, struct nouveau_gpuobj *,
struct nouveau_gpuobj_ref **); struct nouveau_gpuobj_ref **);
extern int nouveau_gpuobj_ref_del(struct drm_device *, struct nouveau_gpuobj_ref **); extern int nouveau_gpuobj_ref_del(struct drm_device *,
extern int nouveau_gpuobj_new_ref(struct drm_device *, int chan_obj, int chan_ref, struct nouveau_gpuobj_ref **);
extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
struct nouveau_gpuobj_ref **ref_ret);
extern int nouveau_gpuobj_new_ref(struct drm_device *,
struct nouveau_channel *alloc_chan,
struct nouveau_channel *ref_chan,
uint32_t handle, int size, int align, uint32_t handle, int size, int align,
uint32_t flags, struct nouveau_gpuobj_ref **); uint32_t flags, struct nouveau_gpuobj_ref **);
extern int nouveau_gpuobj_new_fake(struct drm_device *, uint32_t offset, extern int nouveau_gpuobj_new_fake(struct drm_device *,
uint32_t p_offset, uint32_t b_offset,
uint32_t size, uint32_t flags, uint32_t size, uint32_t flags,
struct nouveau_gpuobj**, struct nouveau_gpuobj **,
struct nouveau_gpuobj_ref**); struct nouveau_gpuobj_ref**);
extern int nouveau_gpuobj_dma_new(struct drm_device *, int channel, int class, extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
uint64_t offset, uint64_t size, uint64_t offset, uint64_t size, int access,
int access, int target, int target, struct nouveau_gpuobj **);
struct nouveau_gpuobj **); extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
extern int nouveau_gpuobj_gart_dma_new(struct drm_device *, int channel,
uint64_t offset, uint64_t size, uint64_t offset, uint64_t size,
int access, struct nouveau_gpuobj **, int access, struct nouveau_gpuobj **,
uint32_t *o_ret); uint32_t *o_ret);
extern int nouveau_gpuobj_gr_new(struct drm_device *, int channel, int class, extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
struct nouveau_gpuobj **); struct nouveau_gpuobj **);
extern int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
struct drm_file *);
/* nouveau_irq.c */ /* nouveau_irq.c */
extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
extern void nouveau_irq_preinstall(struct drm_device*); extern void nouveau_irq_preinstall(struct drm_device *);
extern void nouveau_irq_postinstall(struct drm_device*); extern void nouveau_irq_postinstall(struct drm_device *);
extern void nouveau_irq_uninstall(struct drm_device*); extern void nouveau_irq_uninstall(struct drm_device *);
/* nouveau_sgdma.c */ /* nouveau_sgdma.c */
extern int nouveau_sgdma_init(struct drm_device *); extern int nouveau_sgdma_init(struct drm_device *);
extern void nouveau_sgdma_takedown(struct drm_device *); extern void nouveau_sgdma_takedown(struct drm_device *);
extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
uint32_t *page);
extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
extern int nouveau_sgdma_nottm_hack_init(struct drm_device *); extern int nouveau_sgdma_nottm_hack_init(struct drm_device *);
extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *); extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *);
/* nouveau_dma.c */
extern int nouveau_dma_channel_init(struct drm_device *);
extern void nouveau_dma_channel_takedown(struct drm_device *);
extern int nouveau_dma_wait(struct drm_device *, int size);
/* nv04_fb.c */ /* nv04_fb.c */
extern int nv04_fb_init(struct drm_device *dev); extern int nv04_fb_init(struct drm_device *);
extern void nv04_fb_takedown(struct drm_device *dev); extern void nv04_fb_takedown(struct drm_device *);
/* nv10_fb.c */ /* nv10_fb.c */
extern int nv10_fb_init(struct drm_device *dev); extern int nv10_fb_init(struct drm_device *);
extern void nv10_fb_takedown(struct drm_device *dev); extern void nv10_fb_takedown(struct drm_device *);
/* nv40_fb.c */ /* nv40_fb.c */
extern int nv40_fb_init(struct drm_device *dev); extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *dev); extern void nv40_fb_takedown(struct drm_device *);
/* nv04_fifo.c */ /* nv04_fifo.c */
extern int nv04_fifo_create_context(struct drm_device *dev, int channel); extern int nv04_fifo_create_context(struct nouveau_channel *);
extern void nv04_fifo_destroy_context(struct drm_device *dev, int channel); extern void nv04_fifo_destroy_context(struct nouveau_channel *);
extern int nv04_fifo_load_context(struct drm_device *dev, int channel); extern int nv04_fifo_load_context(struct nouveau_channel *);
extern int nv04_fifo_save_context(struct drm_device *dev, int channel); extern int nv04_fifo_save_context(struct nouveau_channel *);
/* nv10_fifo.c */ /* nv10_fifo.c */
extern int nv10_fifo_create_context(struct drm_device *dev, int channel); extern int nv10_fifo_create_context(struct nouveau_channel *);
extern void nv10_fifo_destroy_context(struct drm_device *dev, int channel); extern void nv10_fifo_destroy_context(struct nouveau_channel *);
extern int nv10_fifo_load_context(struct drm_device *dev, int channel); extern int nv10_fifo_load_context(struct nouveau_channel *);
extern int nv10_fifo_save_context(struct drm_device *dev, int channel); extern int nv10_fifo_save_context(struct nouveau_channel *);
/* nv40_fifo.c */ /* nv40_fifo.c */
extern int nv40_fifo_create_context(struct drm_device *, int channel); extern int nv40_fifo_init(struct drm_device *);
extern void nv40_fifo_destroy_context(struct drm_device *, int channel); extern int nv40_fifo_create_context(struct nouveau_channel *);
extern int nv40_fifo_load_context(struct drm_device *, int channel); extern void nv40_fifo_destroy_context(struct nouveau_channel *);
extern int nv40_fifo_save_context(struct drm_device *, int channel); extern int nv40_fifo_load_context(struct nouveau_channel *);
extern int nv40_fifo_save_context(struct nouveau_channel *);
/* nv50_fifo.c */ /* nv50_fifo.c */
extern int nv50_fifo_init(struct drm_device *); extern int nv50_fifo_init(struct drm_device *);
extern void nv50_fifo_takedown(struct drm_device *); extern void nv50_fifo_takedown(struct drm_device *);
extern int nv50_fifo_create_context(struct drm_device *, int channel); extern int nv50_fifo_create_context(struct nouveau_channel *);
extern void nv50_fifo_destroy_context(struct drm_device *, int channel); extern void nv50_fifo_destroy_context(struct nouveau_channel *);
extern int nv50_fifo_load_context(struct drm_device *, int channel); extern int nv50_fifo_load_context(struct nouveau_channel *);
extern int nv50_fifo_save_context(struct drm_device *, int channel); extern int nv50_fifo_save_context(struct nouveau_channel *);
/* nv04_graph.c */ /* nv04_graph.c */
extern void nouveau_nv04_context_switch(struct drm_device *dev); extern void nouveau_nv04_context_switch(struct drm_device *);
extern int nv04_graph_init(struct drm_device *dev); extern int nv04_graph_init(struct drm_device *);
extern void nv04_graph_takedown(struct drm_device *dev); extern void nv04_graph_takedown(struct drm_device *);
extern int nv04_graph_create_context(struct drm_device *dev, int channel); extern int nv04_graph_create_context(struct nouveau_channel *);
extern void nv04_graph_destroy_context(struct drm_device *dev, int channel); extern void nv04_graph_destroy_context(struct nouveau_channel *);
extern int nv04_graph_load_context(struct drm_device *dev, int channel); extern int nv04_graph_load_context(struct nouveau_channel *);
extern int nv04_graph_save_context(struct drm_device *dev, int channel); extern int nv04_graph_save_context(struct nouveau_channel *);
/* nv10_graph.c */ /* nv10_graph.c */
extern void nouveau_nv10_context_switch(struct drm_device *dev); extern void nouveau_nv10_context_switch(struct drm_device *);
extern int nv10_graph_init(struct drm_device *dev); extern int nv10_graph_init(struct drm_device *);
extern void nv10_graph_takedown(struct drm_device *dev); extern void nv10_graph_takedown(struct drm_device *);
extern int nv10_graph_create_context(struct drm_device *dev, int channel); extern int nv10_graph_create_context(struct nouveau_channel *);
extern void nv10_graph_destroy_context(struct drm_device *dev, int channel); extern void nv10_graph_destroy_context(struct nouveau_channel *);
extern int nv10_graph_load_context(struct drm_device *dev, int channel); extern int nv10_graph_load_context(struct nouveau_channel *);
extern int nv10_graph_save_context(struct drm_device *dev, int channel); extern int nv10_graph_save_context(struct nouveau_channel *);
/* nv20_graph.c */ /* nv20_graph.c */
extern void nouveau_nv20_context_switch(struct drm_device *dev); extern void nouveau_nv20_context_switch(struct drm_device *);
extern int nv20_graph_init(struct drm_device *dev); extern int nv20_graph_init(struct drm_device *);
extern void nv20_graph_takedown(struct drm_device *dev); extern void nv20_graph_takedown(struct drm_device *);
extern int nv20_graph_create_context(struct drm_device *dev, int channel); extern int nv20_graph_create_context(struct nouveau_channel *);
extern void nv20_graph_destroy_context(struct drm_device *dev, int channel); extern void nv20_graph_destroy_context(struct nouveau_channel *);
extern int nv20_graph_load_context(struct drm_device *dev, int channel); extern int nv20_graph_load_context(struct nouveau_channel *);
extern int nv20_graph_save_context(struct drm_device *dev, int channel); extern int nv20_graph_save_context(struct nouveau_channel *);
/* nv30_graph.c */ /* nv30_graph.c */
extern int nv30_graph_init(struct drm_device *dev); extern int nv30_graph_init(struct drm_device *);
extern void nv30_graph_takedown(struct drm_device *dev); extern void nv30_graph_takedown(struct drm_device *);
extern int nv30_graph_create_context(struct drm_device *, int channel); extern int nv30_graph_create_context(struct nouveau_channel *);
extern void nv30_graph_destroy_context(struct drm_device *, int channel); extern void nv30_graph_destroy_context(struct nouveau_channel *);
extern int nv30_graph_load_context(struct drm_device *, int channel); extern int nv30_graph_load_context(struct nouveau_channel *);
extern int nv30_graph_save_context(struct drm_device *, int channel); extern int nv30_graph_save_context(struct nouveau_channel *);
/* nv40_graph.c */ /* nv40_graph.c */
extern int nv40_graph_init(struct drm_device *); extern int nv40_graph_init(struct drm_device *);
extern void nv40_graph_takedown(struct drm_device *); extern void nv40_graph_takedown(struct drm_device *);
extern int nv40_graph_create_context(struct drm_device *, int channel); extern int nv40_graph_create_context(struct nouveau_channel *);
extern void nv40_graph_destroy_context(struct drm_device *, int channel); extern void nv40_graph_destroy_context(struct nouveau_channel *);
extern int nv40_graph_load_context(struct drm_device *, int channel); extern int nv40_graph_load_context(struct nouveau_channel *);
extern int nv40_graph_save_context(struct drm_device *, int channel); extern int nv40_graph_save_context(struct nouveau_channel *);
/* nv50_graph.c */ /* nv50_graph.c */
extern int nv50_graph_init(struct drm_device *); extern int nv50_graph_init(struct drm_device *);
extern void nv50_graph_takedown(struct drm_device *); extern void nv50_graph_takedown(struct drm_device *);
extern int nv50_graph_create_context(struct drm_device *, int channel); extern int nv50_graph_create_context(struct nouveau_channel *);
extern void nv50_graph_destroy_context(struct drm_device *, int channel); extern void nv50_graph_destroy_context(struct nouveau_channel *);
extern int nv50_graph_load_context(struct drm_device *, int channel); extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_save_context(struct drm_device *, int channel); extern int nv50_graph_save_context(struct nouveau_channel *);
/* nv04_instmem.c */ /* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *dev); extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *dev); extern void nv04_instmem_takedown(struct drm_device *);
extern int nv04_instmem_populate(struct drm_device*, struct nouveau_gpuobj*, extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
uint32_t *size); uint32_t *size);
extern void nv04_instmem_clear(struct drm_device*, struct nouveau_gpuobj*); extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
extern int nv04_instmem_bind(struct drm_device*, struct nouveau_gpuobj*); extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
extern int nv04_instmem_unbind(struct drm_device*, struct nouveau_gpuobj*); extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
/* nv50_instmem.c */ /* nv50_instmem.c */
extern int nv50_instmem_init(struct drm_device *dev); extern int nv50_instmem_init(struct drm_device *);
extern void nv50_instmem_takedown(struct drm_device *dev); extern void nv50_instmem_takedown(struct drm_device *);
extern int nv50_instmem_populate(struct drm_device*, struct nouveau_gpuobj*, extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
uint32_t *size); uint32_t *size);
extern void nv50_instmem_clear(struct drm_device*, struct nouveau_gpuobj*); extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
extern int nv50_instmem_bind(struct drm_device*, struct nouveau_gpuobj*); extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
extern int nv50_instmem_unbind(struct drm_device*, struct nouveau_gpuobj*); extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
/* nv04_mc.c */ /* nv04_mc.c */
extern int nv04_mc_init(struct drm_device *dev); extern int nv04_mc_init(struct drm_device *);
extern void nv04_mc_takedown(struct drm_device *dev); extern void nv04_mc_takedown(struct drm_device *);
/* nv40_mc.c */ /* nv40_mc.c */
extern int nv40_mc_init(struct drm_device *dev); extern int nv40_mc_init(struct drm_device *);
extern void nv40_mc_takedown(struct drm_device *dev); extern void nv40_mc_takedown(struct drm_device *);
/* nv50_mc.c */ /* nv50_mc.c */
extern int nv50_mc_init(struct drm_device *dev); extern int nv50_mc_init(struct drm_device *);
extern void nv50_mc_takedown(struct drm_device *dev); extern void nv50_mc_takedown(struct drm_device *);
/* nv04_timer.c */ /* nv04_timer.c */
extern int nv04_timer_init(struct drm_device *dev); extern int nv04_timer_init(struct drm_device *);
extern uint64_t nv04_timer_read(struct drm_device *dev); extern uint64_t nv04_timer_read(struct drm_device *);
extern void nv04_timer_takedown(struct drm_device *dev); extern void nv04_timer_takedown(struct drm_device *);
extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg); unsigned long arg);
#if defined(__powerpc__) #if defined(__powerpc__)
#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) ) #define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) )

View file

@ -34,8 +34,6 @@ int nouveau_fifo_number(struct drm_device *dev)
struct drm_nouveau_private *dev_priv=dev->dev_private; struct drm_nouveau_private *dev_priv=dev->dev_private;
switch(dev_priv->card_type) switch(dev_priv->card_type)
{ {
case NV_03:
return 8;
case NV_04: case NV_04:
case NV_05: case NV_05:
return 16; return 16;
@ -84,9 +82,16 @@ static int nouveau_fifo_instmem_configure(struct drm_device *dev)
{ {
case NV_50: case NV_50:
case NV_40: case NV_40:
switch (dev_priv->chipset) {
case 0x47:
case 0x49:
case 0x4b:
NV_WRITE(0x2230, 1);
break;
default:
break;
}
NV_WRITE(NV40_PFIFO_RAMFC, 0x30002); NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
if((dev_priv->chipset == 0x49) || (dev_priv->chipset == 0x4b))
NV_WRITE(0x2230,0x00000001);
break; break;
case NV_44: case NV_44:
NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) | NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
@ -102,7 +107,6 @@ static int nouveau_fifo_instmem_configure(struct drm_device *dev)
case NV_11: case NV_11:
case NV_10: case NV_10:
case NV_04: case NV_04:
case NV_03:
NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8); NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
break; break;
} }
@ -120,6 +124,10 @@ int nouveau_fifo_init(struct drm_device *dev)
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PFIFO); NV_PMC_ENABLE_PFIFO);
/* Enable PFIFO error reporting */
NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
ret = nouveau_fifo_instmem_configure(dev); ret = nouveau_fifo_instmem_configure(dev);
@ -186,72 +194,47 @@ int nouveau_fifo_init(struct drm_device *dev)
} }
static int static int
nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct mem_block *pb = chan->pushbuf_mem;
struct nouveau_config *config = &dev_priv->config;
struct mem_block *cb;
int cb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
struct nouveau_gpuobj *pushbuf = NULL; struct nouveau_gpuobj *pushbuf = NULL;
int ret; int ret;
/* Defaults for unconfigured values */ if (pb->flags & NOUVEAU_MEM_AGP) {
if (!config->cmdbuf.location) ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size,
config->cmdbuf.location = NOUVEAU_MEM_FB;
if (!config->cmdbuf.size || config->cmdbuf.size < cb_min_size)
config->cmdbuf.size = cb_min_size;
cb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
config->cmdbuf.location | NOUVEAU_MEM_MAPPED,
(struct drm_file *)-2);
if (!cb) {
DRM_ERROR("Couldn't allocate DMA command buffer.\n");
return -ENOMEM;
}
if (cb->flags & NOUVEAU_MEM_AGP) {
ret = nouveau_gpuobj_gart_dma_new(dev, channel,
cb->start, cb->size,
NV_DMA_ACCESS_RO, NV_DMA_ACCESS_RO,
&pushbuf, &pushbuf,
&chan->pushbuf_base); &chan->pushbuf_base);
} else } else
if (cb->flags & NOUVEAU_MEM_PCI) { if (pb->flags & NOUVEAU_MEM_PCI) {
ret = nouveau_gpuobj_dma_new(dev, channel, ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
NV_CLASS_DMA_IN_MEMORY, pb->start, pb->size,
cb->start, cb->size,
NV_DMA_ACCESS_RO, NV_DMA_ACCESS_RO,
NV_DMA_TARGET_PCI_NONLINEAR, NV_DMA_TARGET_PCI_NONLINEAR,
&pushbuf); &pushbuf);
chan->pushbuf_base = 0; chan->pushbuf_base = 0;
} else if (dev_priv->card_type != NV_04) { } else if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
(dev, channel, NV_CLASS_DMA_IN_MEMORY, pb->start, pb->size,
cb->start, NV_DMA_ACCESS_RO,
cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, NV_DMA_TARGET_VIDMEM, &pushbuf);
&pushbuf);
chan->pushbuf_base = 0; chan->pushbuf_base = 0;
} else { } else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's /* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in * exact reason for existing :) PCI access to cmdbuf in
* VRAM. * VRAM.
*/ */
ret = nouveau_gpuobj_dma_new ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
(dev, channel, NV_CLASS_DMA_IN_MEMORY, pb->start +
cb->start + drm_get_resource_start(dev, 1), drm_get_resource_start(dev, 1),
cb->size, NV_DMA_ACCESS_RO, pb->size, NV_DMA_ACCESS_RO,
NV_DMA_TARGET_PCI, &pushbuf); NV_DMA_TARGET_PCI, &pushbuf);
chan->pushbuf_base = 0; chan->pushbuf_base = 0;
} }
if (ret) { if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf,
nouveau_mem_free(dev, cb);
DRM_ERROR("Error creating push buffer ctxdma: %d\n", ret);
return ret;
}
if ((ret = nouveau_gpuobj_ref_add(dev, channel, 0, pushbuf,
&chan->pushbuf))) { &chan->pushbuf))) {
DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret); DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret);
if (pushbuf != dev_priv->gart_info.sg_ctxdma) if (pushbuf != dev_priv->gart_info.sg_ctxdma)
@ -259,19 +242,42 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel)
return ret; return ret;
} }
chan->pushbuf_mem = cb;
return 0; return 0;
} }
static struct mem_block *
nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_config *config = &dev_priv->config;
struct mem_block *pb;
int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
/* Defaults for unconfigured values */
if (!config->cmdbuf.location)
config->cmdbuf.location = NOUVEAU_MEM_FB;
if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size)
config->cmdbuf.size = pb_min_size;
pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
config->cmdbuf.location | NOUVEAU_MEM_MAPPED,
(struct drm_file *)-2);
if (!pb)
DRM_ERROR("Couldn't allocate DMA push buffer.\n");
return pb;
}
/* allocates and initializes a fifo for user space consumption */ /* allocates and initializes a fifo for user space consumption */
int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret, int
struct drm_file *file_priv, nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
uint32_t vram_handle, uint32_t tt_handle) struct drm_file *file_priv, struct mem_block *pushbuf,
uint32_t vram_handle, uint32_t tt_handle)
{ {
int ret; int ret;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine_func *engine = &dev_priv->Engine; struct nouveau_engine *engine = &dev_priv->Engine;
struct nouveau_fifo *chan; struct nouveau_channel *chan;
int channel; int channel;
/* /*
@ -283,44 +289,44 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret,
* When there are no more contexts, you lost * When there are no more contexts, you lost
*/ */
for(channel=0; channel<nouveau_fifo_number(dev); channel++) { for(channel=0; channel<nouveau_fifo_number(dev); channel++) {
if ((dev_priv->card_type == NV_50) && (channel == 0))
continue;
if (dev_priv->fifos[channel] == NULL) if (dev_priv->fifos[channel] == NULL)
break; break;
} }
/* no more fifos. you lost. */ /* no more fifos. you lost. */
if (channel==nouveau_fifo_number(dev)) if (channel==nouveau_fifo_number(dev))
return -EINVAL; return -EINVAL;
(*chan_ret) = channel;
dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_fifo), dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel),
DRM_MEM_DRIVER); DRM_MEM_DRIVER);
if (!dev_priv->fifos[channel]) if (!dev_priv->fifos[channel])
return -ENOMEM; return -ENOMEM;
dev_priv->fifo_alloc_count++; dev_priv->fifo_alloc_count++;
chan = dev_priv->fifos[channel]; chan = dev_priv->fifos[channel];
chan->dev = dev;
chan->id = channel;
chan->file_priv = file_priv; chan->file_priv = file_priv;
chan->pushbuf_mem = pushbuf;
DRM_INFO("Allocating FIFO number %d\n", channel); DRM_INFO("Allocating FIFO number %d\n", channel);
/* Setup channel's default objects */
ret = nouveau_gpuobj_channel_init(dev, channel, vram_handle, tt_handle);
if (ret) {
nouveau_fifo_free(dev, channel);
return ret;
}
/* allocate a command buffer, and create a dma object for the gpu */
ret = nouveau_fifo_cmdbuf_alloc(dev, channel);
if (ret) {
nouveau_fifo_free(dev, channel);
return ret;
}
/* Allocate space for per-channel fixed notifier memory */ /* Allocate space for per-channel fixed notifier memory */
ret = nouveau_notifier_init_channel(dev, channel, file_priv); ret = nouveau_notifier_init_channel(chan);
if (ret) { if (ret) {
nouveau_fifo_free(dev, channel); nouveau_fifo_free(chan);
return ret;
}
/* Setup channel's default objects */
ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
if (ret) {
nouveau_fifo_free(chan);
return ret;
}
/* Create a dma object for the push buffer */
ret = nouveau_fifo_pushbuf_ctxdma_init(chan);
if (ret) {
nouveau_fifo_free(chan);
return ret; return ret;
} }
@ -333,16 +339,16 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret,
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000); NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
/* Create a graphics context for new channel */ /* Create a graphics context for new channel */
ret = engine->graph.create_context(dev, channel); ret = engine->graph.create_context(chan);
if (ret) { if (ret) {
nouveau_fifo_free(dev, channel); nouveau_fifo_free(chan);
return ret; return ret;
} }
/* Construct inital RAMFC for new channel */ /* Construct inital RAMFC for new channel */
ret = engine->fifo.create_context(dev, channel); ret = engine->fifo.create_context(chan);
if (ret) { if (ret) {
nouveau_fifo_free(dev, channel); nouveau_fifo_free(chan);
return ret; return ret;
} }
@ -359,30 +365,17 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret,
* other case, the GPU will handle this when it switches contexts. * other case, the GPU will handle this when it switches contexts.
*/ */
if (dev_priv->fifo_alloc_count == 1) { if (dev_priv->fifo_alloc_count == 1) {
ret = engine->fifo.load_context(dev, channel); ret = engine->fifo.load_context(chan);
if (ret) { if (ret) {
nouveau_fifo_free(dev, channel); nouveau_fifo_free(chan);
return ret; return ret;
} }
ret = engine->graph.load_context(dev, channel); ret = engine->graph.load_context(chan);
if (ret) { if (ret) {
nouveau_fifo_free(dev, channel); nouveau_fifo_free(chan);
return ret; return ret;
} }
/* Temporary hack, to avoid breaking Xv on cards where the
* initial context value for 0x400710 doesn't have these bits
* set. Proper fix would be to find which object+method is
* responsible for modifying this state.
*/
if (dev_priv->chipset >= 0x10 && dev_priv->chipset < 0x50) {
uint32_t tmp;
tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
}
} }
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
@ -395,32 +388,28 @@ int nouveau_fifo_alloc(struct drm_device *dev, int *chan_ret,
NV_WRITE(NV03_PFIFO_CACHES, 1); NV_WRITE(NV03_PFIFO_CACHES, 1);
DRM_INFO("%s: initialised FIFO %d\n", __func__, channel); DRM_INFO("%s: initialised FIFO %d\n", __func__, channel);
*chan_ret = chan;
return 0; return 0;
} }
/* stops a fifo */ /* stops a fifo */
void nouveau_fifo_free(struct drm_device *dev, int channel) void nouveau_fifo_free(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine_func *engine = &dev_priv->Engine; struct nouveau_engine *engine = &dev_priv->Engine;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
if (!chan) { DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id);
DRM_ERROR("Freeing non-existant channel %d\n", channel);
return;
}
DRM_INFO("%s: freeing fifo %d\n", __func__, channel);
/* disable the fifo caches */ /* disable the fifo caches */
NV_WRITE(NV03_PFIFO_CACHES, 0x00000000); NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
// FIXME XXX needs more code // FIXME XXX needs more code
engine->fifo.destroy_context(dev, channel); engine->fifo.destroy_context(chan);
/* Cleanup PGRAPH state */ /* Cleanup PGRAPH state */
engine->graph.destroy_context(dev, channel); engine->graph.destroy_context(chan);
/* reenable the fifo caches */ /* reenable the fifo caches */
NV_WRITE(NV03_PFIFO_CACHES, 0x00000001); NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
@ -432,12 +421,12 @@ void nouveau_fifo_free(struct drm_device *dev, int channel)
chan->pushbuf_mem = NULL; chan->pushbuf_mem = NULL;
} }
nouveau_notifier_takedown_channel(dev, channel);
/* Destroy objects belonging to the channel */ /* Destroy objects belonging to the channel */
nouveau_gpuobj_channel_takedown(dev, channel); nouveau_gpuobj_channel_takedown(chan);
dev_priv->fifos[channel] = NULL; nouveau_notifier_takedown_channel(chan);
dev_priv->fifos[chan->id] = NULL;
dev_priv->fifo_alloc_count--; dev_priv->fifo_alloc_count--;
drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER); drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
} }
@ -445,14 +434,16 @@ void nouveau_fifo_free(struct drm_device *dev, int channel)
/* cleanups all the fifos from file_priv */ /* cleanups all the fifos from file_priv */
void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv) void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv)
{ {
int i;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
DRM_DEBUG("clearing FIFO enables from file_priv\n"); DRM_DEBUG("clearing FIFO enables from file_priv\n");
for(i=0;i<nouveau_fifo_number(dev);i++) for(i = 0; i < nouveau_fifo_number(dev); i++) {
if (dev_priv->fifos[i] && struct nouveau_channel *chan = dev_priv->fifos[i];
dev_priv->fifos[i]->file_priv==file_priv)
nouveau_fifo_free(dev,i); if (chan && chan->file_priv == file_priv)
nouveau_fifo_free(chan);
}
} }
int int
@ -472,24 +463,31 @@ nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv,
* ioctls wrapping the functions * ioctls wrapping the functions
***********************************/ ***********************************/
static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_fifo_alloc *init = data; struct drm_nouveau_channel_alloc *init = data;
struct drm_map_list *entry; struct drm_map_list *entry;
struct nouveau_fifo *chan; struct nouveau_channel *chan;
struct mem_block *pushbuf;
int res; int res;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
return -EINVAL; return -EINVAL;
res = nouveau_fifo_alloc(dev, &init->channel, file_priv, pushbuf = nouveau_fifo_user_pushbuf_alloc(dev);
if (!pushbuf)
return -ENOMEM;
res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf,
init->fb_ctxdma_handle, init->fb_ctxdma_handle,
init->tt_ctxdma_handle); init->tt_ctxdma_handle);
if (res) if (res)
return res; return res;
chan = dev_priv->fifos[init->channel]; init->channel = chan->id;
init->put_base = chan->pushbuf_base; init->put_base = chan->pushbuf_base;
/* make the fifo available to user space */ /* make the fifo available to user space */
@ -523,18 +521,34 @@ static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, struct d
return 0; return 0;
} }
static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_channel_free *cfree = data;
struct nouveau_channel *chan;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
nouveau_fifo_free(chan);
return 0;
}
/*********************************** /***********************************
* finally, the ioctl table * finally, the ioctl table
***********************************/ ***********************************/
struct drm_ioctl_desc nouveau_ioctls[] = { struct drm_ioctl_desc nouveau_ioctls[] = {
DRM_IOCTL_DEF(DRM_NOUVEAU_FIFO_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIER_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH),
}; };
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);

View file

@ -40,33 +40,6 @@ void nouveau_irq_preinstall(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
DRM_DEBUG("IRQ: preinst\n");
if (!dev_priv) {
DRM_ERROR("AIII, no dev_priv\n");
return;
}
if (!dev_priv->mmio) {
DRM_ERROR("AIII, no dev_priv->mmio\n");
return;
}
/* Disable/Clear PFIFO interrupts */
NV_WRITE(NV03_PFIFO_INTR_EN_0, 0);
NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
/* Disable/Clear PGRAPH interrupts */
if (dev_priv->card_type<NV_40)
NV_WRITE(NV03_PGRAPH_INTR_EN, 0);
else
NV_WRITE(NV40_PGRAPH_INTR_EN, 0);
NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
#if 0
/* Disable/Clear CRTC0/1 interrupts */
NV_WRITE(NV_CRTC0_INTEN, 0);
NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
NV_WRITE(NV_CRTC1_INTEN, 0);
NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
#endif
/* Master disable */ /* Master disable */
NV_WRITE(NV03_PMC_INTR_EN_0, 0); NV_WRITE(NV03_PMC_INTR_EN_0, 0);
} }
@ -75,34 +48,6 @@ void nouveau_irq_postinstall(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
if (!dev_priv) {
DRM_ERROR("AIII, no dev_priv\n");
return;
}
if (!dev_priv->mmio) {
DRM_ERROR("AIII, no dev_priv->mmio\n");
return;
}
DRM_DEBUG("IRQ: postinst\n");
/* Enable PFIFO error reporting */
NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
/* Enable PGRAPH interrupts */
if (dev_priv->card_type<NV_40)
NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
else
NV_WRITE(NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
#if 0
/* Enable CRTC0/1 interrupts */
NV_WRITE(NV_CRTC0_INTEN, NV_CRTC_INTR_VBLANK);
NV_WRITE(NV_CRTC1_INTEN, NV_CRTC_INTR_VBLANK);
#endif
/* Master enable */ /* Master enable */
NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
} }
@ -111,29 +56,6 @@ void nouveau_irq_uninstall(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
if (!dev_priv) {
DRM_ERROR("AIII, no dev_priv\n");
return;
}
if (!dev_priv->mmio) {
DRM_ERROR("AIII, no dev_priv->mmio\n");
return;
}
DRM_DEBUG("IRQ: uninst\n");
/* Disable PFIFO interrupts */
NV_WRITE(NV03_PFIFO_INTR_EN_0, 0);
/* Disable PGRAPH interrupts */
if (dev_priv->card_type<NV_40)
NV_WRITE(NV03_PGRAPH_INTR_EN, 0);
else
NV_WRITE(NV40_PGRAPH_INTR_EN, 0);
#if 0
/* Disable CRTC0/1 interrupts */
NV_WRITE(NV_CRTC0_INTEN, 0);
NV_WRITE(NV_CRTC1_INTEN, 0);
#endif
/* Master disable */ /* Master disable */
NV_WRITE(NV03_PMC_INTR_EN_0, 0); NV_WRITE(NV03_PMC_INTR_EN_0, 0);
} }
@ -150,12 +72,10 @@ static void nouveau_fifo_irq_handler(struct drm_device *dev)
chstat = NV_READ(NV04_PFIFO_DMA); chstat = NV_READ(NV04_PFIFO_DMA);
channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
DRM_DEBUG("NV: PFIFO interrupt! Channel=%d, INTSTAT=0x%08x/MODE=0x%08x/PEND=0x%08x\n", channel, status, chmode, chstat);
if (status & NV_PFIFO_INTR_CACHE_ERROR) { if (status & NV_PFIFO_INTR_CACHE_ERROR) {
uint32_t c1get, c1method, c1data; uint32_t c1get, c1method, c1data;
DRM_ERROR("NV: PFIFO error interrupt\n"); DRM_ERROR("PFIFO error interrupt\n");
c1get = NV_READ(NV03_PFIFO_CACHE1_GET) >> 2; c1get = NV_READ(NV03_PFIFO_CACHE1_GET) >> 2;
if (dev_priv->card_type < NV_40) { if (dev_priv->card_type < NV_40) {
@ -167,17 +87,17 @@ static void nouveau_fifo_irq_handler(struct drm_device *dev)
c1data = NV_READ(NV40_PFIFO_CACHE1_DATA(c1get)); c1data = NV_READ(NV40_PFIFO_CACHE1_DATA(c1get));
} }
DRM_ERROR("NV: Channel %d/%d - Method 0x%04x, Data 0x%08x\n", DRM_ERROR("Channel %d/%d - Method 0x%04x, Data 0x%08x\n",
channel, (c1method >> 13) & 7, channel, (c1method >> 13) & 7, c1method & 0x1ffc,
c1method & 0x1ffc, c1data c1data);
);
status &= ~NV_PFIFO_INTR_CACHE_ERROR; status &= ~NV_PFIFO_INTR_CACHE_ERROR;
NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
} }
if (status & NV_PFIFO_INTR_DMA_PUSHER) { if (status & NV_PFIFO_INTR_DMA_PUSHER) {
DRM_INFO("NV: PFIFO DMA pusher interrupt\n"); DRM_ERROR("PFIFO DMA pusher interrupt: ch%d, 0x%08x\n",
channel, NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
status &= ~NV_PFIFO_INTR_DMA_PUSHER; status &= ~NV_PFIFO_INTR_DMA_PUSHER;
NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER); NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER);
@ -191,7 +111,7 @@ static void nouveau_fifo_irq_handler(struct drm_device *dev)
} }
if (status) { if (status) {
DRM_INFO("NV: unknown PFIFO interrupt. status=0x%08x\n", status); DRM_ERROR("Unhandled PFIFO interrupt: status=0x%08x\n", status);
NV_WRITE(NV03_PFIFO_INTR_0, status); NV_WRITE(NV03_PFIFO_INTR_0, status);
} }
@ -301,20 +221,86 @@ nouveau_print_bitfield_names(uint32_t value,
printk(" (unknown bits 0x%08x)", value); printk(" (unknown bits 0x%08x)", value);
} }
static int
nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int channel;
if (dev_priv->card_type < NV_10) {
channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
} else if (dev_priv->card_type < NV_40) {
channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
} else
if (dev_priv->card_type < NV_50) {
uint32_t cur_grctx = (NV_READ(0x40032C) & 0xfffff) << 4;
/* 0x400704 *sometimes* contains a sensible channel ID, but
* mostly not.. for now lookup which channel owns the active
* PGRAPH context. Probably a better way, but this'll do
* for now.
*/
for (channel = 0; channel < 32; channel++) {
if (dev_priv->fifos[channel] == NULL)
continue;
if (cur_grctx ==
dev_priv->fifos[channel]->ramin_grctx->instance)
break;
}
if (channel == 32) {
DRM_ERROR("AIII, unable to determine active channel "
"from PGRAPH context 0x%08x\n", cur_grctx);
return -EINVAL;
}
} else {
uint32_t cur_grctx = (NV_READ(0x40032C) & 0xfffff) << 12;
for (channel = 0; channel < 128; channel++) {
if (dev_priv->fifos[channel] == NULL)
continue;
if (cur_grctx ==
dev_priv->fifos[channel]->ramin_grctx->instance)
break;
}
if (channel == 128) {
DRM_ERROR("AIII, unable to determine active channel "
"from PGRAPH context 0x%08x\n", cur_grctx);
return -EINVAL;
}
}
if (channel > nouveau_fifo_number(dev) ||
dev_priv->fifos[channel] == NULL) {
DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel);
return -EINVAL;
}
*channel_ret = channel;
return 0;
}
static void static void
nouveau_graph_dump_trap_info(struct drm_device *dev) nouveau_graph_dump_trap_info(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t address; uint32_t address;
uint32_t channel, class; uint32_t channel, class;
uint32_t method, subc, data; uint32_t method, subc, data, data2;
uint32_t nsource, nstatus; uint32_t nsource, nstatus;
address = NV_READ(0x400704); if (nouveau_graph_trapped_channel(dev, &channel))
channel = (address >> 20) & 0x1F; channel = -1;
subc = (address >> 16) & 0x7;
data = NV_READ(NV04_PGRAPH_TRAPPED_DATA);
address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR);
method = address & 0x1FFC; method = address & 0x1FFC;
data = NV_READ(0x400708); if (dev_priv->card_type < NV_10) {
subc = (address >> 13) & 0x7;
data2= 0;
} else {
subc = (address >> 16) & 0x7;
data2= NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH);
}
nsource = NV_READ(NV03_PGRAPH_NSOURCE); nsource = NV_READ(NV03_PGRAPH_NSOURCE);
nstatus = NV_READ(NV03_PGRAPH_NSTATUS); nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
if (dev_priv->card_type < NV_50) { if (dev_priv->card_type < NV_50) {
@ -331,77 +317,31 @@ nouveau_graph_dump_trap_info(struct drm_device *dev)
ARRAY_SIZE(nouveau_nstatus_names)); ARRAY_SIZE(nouveau_nstatus_names));
printk("\n"); printk("\n");
DRM_ERROR("NV: Channel %d/%d (class 0x%04x) - " DRM_ERROR("Channel %d/%d (class 0x%04x) - Method 0x%04x, Data 0x%08x:0x%08x\n",
"Method 0x%04x, Data 0x%08x\n", channel, subc, class, method, data2, data);
channel, subc, class, method, data
);
} }
static void nouveau_pgraph_irq_handler(struct drm_device *dev) static void nouveau_pgraph_irq_handler(struct drm_device *dev)
{ {
uint32_t status;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t status, nsource;
status = NV_READ(NV03_PGRAPH_INTR); status = NV_READ(NV03_PGRAPH_INTR);
if (!status) if (!status)
return; return;
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
if (status & NV_PGRAPH_INTR_NOTIFY) { if (status & NV_PGRAPH_INTR_NOTIFY) {
uint32_t nsource, nstatus, instance, notify; DRM_DEBUG("PGRAPH notify interrupt\n");
DRM_DEBUG("NV: PGRAPH notify interrupt\n");
nstatus = NV_READ(NV03_PGRAPH_NSTATUS); nouveau_graph_dump_trap_info(dev);
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus);
/* if this wasn't NOTIFICATION_PENDING, dump extra trap info */
if (nsource & ~(1<<0)) {
nouveau_graph_dump_trap_info(dev);
} else {
instance = NV_READ(0x00400158);
notify = NV_READ(0x00400150) >> 16;
DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n",
instance, notify);
}
status &= ~NV_PGRAPH_INTR_NOTIFY; status &= ~NV_PGRAPH_INTR_NOTIFY;
NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
} }
if (status & NV_PGRAPH_INTR_BUFFER_NOTIFY) {
uint32_t nsource, nstatus, instance, notify;
DRM_DEBUG("NV: PGRAPH buffer notify interrupt\n");
nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus);
instance = NV_READ(0x00400158);
notify = NV_READ(0x00400150) >> 16;
DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", instance, notify);
status &= ~NV_PGRAPH_INTR_BUFFER_NOTIFY;
NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_BUFFER_NOTIFY);
}
if (status & NV_PGRAPH_INTR_MISSING_HW) {
DRM_ERROR("NV: PGRAPH missing hw interrupt\n");
status &= ~NV_PGRAPH_INTR_MISSING_HW;
NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_MISSING_HW);
}
if (status & NV_PGRAPH_INTR_ERROR) { if (status & NV_PGRAPH_INTR_ERROR) {
uint32_t nsource, nstatus, instance; DRM_ERROR("PGRAPH error interrupt\n");
DRM_ERROR("NV: PGRAPH error interrupt\n");
nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
nsource = NV_READ(NV03_PGRAPH_NSOURCE);
DRM_ERROR("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus);
instance = NV_READ(0x00400158);
DRM_ERROR("instance:0x%08x\n", instance);
nouveau_graph_dump_trap_info(dev); nouveau_graph_dump_trap_info(dev);
@ -411,7 +351,7 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev)
if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
uint32_t channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); uint32_t channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
DRM_INFO("NV: PGRAPH context switch interrupt channel %x\n",channel); DRM_DEBUG("PGRAPH context switch interrupt channel %x\n",channel);
switch(dev_priv->card_type) switch(dev_priv->card_type)
{ {
case NV_04: case NV_04:
@ -428,7 +368,7 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev)
nouveau_nv20_context_switch(dev); nouveau_nv20_context_switch(dev);
break; break;
default: default:
DRM_INFO("NV: Context switch not implemented\n"); DRM_ERROR("Context switch not implemented\n");
break; break;
} }
@ -437,7 +377,7 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev)
} }
if (status) { if (status) {
DRM_INFO("NV: Unknown PGRAPH interrupt! STAT=0x%08x\n", status); DRM_ERROR("Unhandled PGRAPH interrupt: STAT=0x%08x\n", status);
NV_WRITE(NV03_PGRAPH_INTR, status); NV_WRITE(NV03_PGRAPH_INTR, status);
} }
@ -447,6 +387,7 @@ static void nouveau_pgraph_irq_handler(struct drm_device *dev)
static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
if (crtc&1) { if (crtc&1) {
NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
} }
@ -466,16 +407,16 @@ irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS)
if (!status) if (!status)
return IRQ_NONE; return IRQ_NONE;
DRM_DEBUG("PMC INTSTAT: 0x%08x\n", status);
if (status & NV_PMC_INTR_0_PFIFO_PENDING) { if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
nouveau_fifo_irq_handler(dev); nouveau_fifo_irq_handler(dev);
status &= ~NV_PMC_INTR_0_PFIFO_PENDING; status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
} }
if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
nouveau_pgraph_irq_handler(dev); nouveau_pgraph_irq_handler(dev);
status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
} }
if (status & NV_PMC_INTR_0_CRTCn_PENDING) { if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
nouveau_crtc_irq_handler(dev, (status>>24)&3); nouveau_crtc_irq_handler(dev, (status>>24)&3);
status &= ~NV_PMC_INTR_0_CRTCn_PENDING; status &= ~NV_PMC_INTR_0_CRTCn_PENDING;

View file

@ -219,24 +219,44 @@ void nouveau_mem_close(struct drm_device *dev)
nouveau_mem_takedown(&dev_priv->pci_heap); nouveau_mem_takedown(&dev_priv->pci_heap);
} }
/*XXX won't work on BSD because of pci_read_config_dword */
static uint32_t
nouveau_mem_fb_amount_igp(struct drm_device *dev)
{
#if defined(LINUX) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct pci_dev *bridge;
uint32_t mem;
bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1));
if (!bridge) {
DRM_ERROR("no bridge device\n");
return 0;
}
if (dev_priv->flags&NV_NFORCE) {
pci_read_config_dword(bridge, 0x7C, &mem);
return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
} else
if(dev_priv->flags&NV_NFORCE2) {
pci_read_config_dword(bridge, 0x84, &mem);
return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
}
DRM_ERROR("impossible!\n");
#else
DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n");
#endif
return 0;
}
/* returns the amount of FB ram in bytes */ /* returns the amount of FB ram in bytes */
uint64_t nouveau_mem_fb_amount(struct drm_device *dev) uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv=dev->dev_private; struct drm_nouveau_private *dev_priv=dev->dev_private;
switch(dev_priv->card_type) switch(dev_priv->card_type)
{ {
case NV_03:
switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
{
case NV03_BOOT_0_RAM_AMOUNT_8MB:
case NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM:
return 8*1024*1024;
case NV03_BOOT_0_RAM_AMOUNT_4MB:
return 4*1024*1024;
case NV03_BOOT_0_RAM_AMOUNT_2MB:
return 2*1024*1024;
}
break;
case NV_04: case NV_04:
case NV_05: case NV_05:
if (NV_READ(NV03_BOOT_0) & 0x00000100) { if (NV_READ(NV03_BOOT_0) & 0x00000100) {
@ -263,18 +283,14 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
case NV_44: case NV_44:
case NV_50: case NV_50:
default: default:
// XXX won't work on BSD because of pci_read_config_dword if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
if (dev_priv->flags&NV_NFORCE) { return nouveau_mem_fb_amount_igp(dev);
uint32_t mem;
pci_read_config_dword(dev->pdev, 0x7C, &mem);
return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
} else if(dev_priv->flags&NV_NFORCE2) {
uint32_t mem;
pci_read_config_dword(dev->pdev, 0x84, &mem);
return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
} else { } else {
uint64_t mem; uint64_t mem;
mem=(NV_READ(NV04_FIFO_DATA)&NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
mem = (NV_READ(NV04_FIFO_DATA) &
NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
return mem*1024*1024; return mem*1024*1024;
} }
break; break;
@ -411,11 +427,11 @@ int nouveau_mem_init(struct drm_device *dev)
struct drm_scatter_gather sgreq; struct drm_scatter_gather sgreq;
DRM_DEBUG("Allocating sg memory for PCI DMA\n"); DRM_DEBUG("Allocating sg memory for PCI DMA\n");
sgreq.size = 4 << 20; //4MB of PCI scatter-gather zone sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
if (drm_sg_alloc(dev, &sgreq)) { if (drm_sg_alloc(dev, &sgreq)) {
DRM_ERROR("Unable to allocate 4MB of scatter-gather" DRM_ERROR("Unable to allocate %dMB of scatter-gather"
" pages for PCI DMA!"); " pages for PCI DMA!",sgreq.size>>20);
} else { } else {
if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
dev->sg->pages * PAGE_SIZE)) { dev->sg->pages * PAGE_SIZE)) {
@ -531,13 +547,13 @@ alloc_ok:
block->map_handle = entry->user_token; block->map_handle = entry->user_token;
} }
DRM_INFO("allocated 0x%llx\n", block->start); DRM_DEBUG("allocated 0x%llx type=0x%08x\n", block->start, block->flags);
return block; return block;
} }
void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
{ {
DRM_INFO("freeing 0x%llx\n", block->start); DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
if (block->flags&NOUVEAU_MEM_MAPPED) if (block->flags&NOUVEAU_MEM_MAPPED)
drm_rmmap(dev, block->map); drm_rmmap(dev, block->map);
nouveau_mem_free_block(block); nouveau_mem_free_block(block);
@ -549,14 +565,10 @@ void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_mem_alloc *alloc = data; struct drm_nouveau_mem_alloc *alloc = data;
struct mem_block *block; struct mem_block *block;
if (!dev_priv) { NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return -EINVAL;
}
block=nouveau_mem_alloc(dev, alloc->alignment, alloc->size, block=nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
alloc->flags, file_priv); alloc->flags, file_priv);
@ -575,6 +587,8 @@ int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *
struct drm_nouveau_mem_free *memfree = data; struct drm_nouveau_mem_free *memfree = data;
struct mem_block *block; struct mem_block *block;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
block=NULL; block=NULL;
if (memfree->flags & NOUVEAU_MEM_FB) if (memfree->flags & NOUVEAU_MEM_FB)
block = find_block(dev_priv->fb_heap, memfree->offset); block = find_block(dev_priv->fb_heap, memfree->offset);

View file

@ -30,25 +30,27 @@
#include "nouveau_drv.h" #include "nouveau_drv.h"
int int
nouveau_notifier_init_channel(struct drm_device *dev, int channel, nouveau_notifier_init_channel(struct nouveau_channel *chan)
struct drm_file *file_priv)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
int flags, ret; int flags, ret;
/*TODO: PCI notifier blocks */ /*TODO: PCI notifier blocks */
if (dev_priv->agp_heap && if (dev_priv->agp_heap)
dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) flags = NOUVEAU_MEM_AGP;
flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE; else if (dev_priv->pci_heap)
flags = NOUVEAU_MEM_PCI;
else else
flags = NOUVEAU_MEM_FB; flags = NOUVEAU_MEM_FB;
flags |= NOUVEAU_MEM_MAPPED; flags |= (NOUVEAU_MEM_MAPPED | NOUVEAU_MEM_FB_ACCEPTABLE);
chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags, chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,
file_priv); (struct drm_file *)-2);
if (!chan->notifier_block) if (!chan->notifier_block)
return -ENOMEM; return -ENOMEM;
DRM_DEBUG("Allocated notifier block in 0x%08x\n",
chan->notifier_block->flags);
ret = nouveau_mem_init_heap(&chan->notifier_heap, ret = nouveau_mem_init_heap(&chan->notifier_heap,
0, chan->notifier_block->size); 0, chan->notifier_block->size);
@ -59,25 +61,34 @@ nouveau_notifier_init_channel(struct drm_device *dev, int channel,
} }
void void
nouveau_notifier_takedown_channel(struct drm_device *dev, int channel) nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_device *dev = chan->dev;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
if (chan->notifier_block) { if (chan->notifier_block) {
nouveau_mem_free(dev, chan->notifier_block); nouveau_mem_free(dev, chan->notifier_block);
chan->notifier_block = NULL; chan->notifier_block = NULL;
} }
/*XXX: heap destroy */ nouveau_mem_takedown(&chan->notifier_heap);
}
static void
nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
struct nouveau_gpuobj *gpuobj)
{
DRM_DEBUG("\n");
if (gpuobj->priv)
nouveau_mem_free_block(gpuobj->priv);
} }
int int
nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle, nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
int count, uint32_t *b_offset) int count, uint32_t *b_offset)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
struct nouveau_gpuobj *nobj = NULL; struct nouveau_gpuobj *nobj = NULL;
struct mem_block *mem; struct mem_block *mem;
uint32_t offset; uint32_t offset;
@ -85,38 +96,53 @@ nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle,
if (!chan->notifier_heap) { if (!chan->notifier_heap) {
DRM_ERROR("Channel %d doesn't have a notifier heap!\n", DRM_ERROR("Channel %d doesn't have a notifier heap!\n",
channel); chan->id);
return -EINVAL; return -EINVAL;
} }
mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0, mem = nouveau_mem_alloc_block(chan->notifier_heap, count*32, 0,
chan->file_priv); (struct drm_file *)-2);
if (!mem) { if (!mem) {
DRM_ERROR("Channel %d notifier block full\n", channel); DRM_ERROR("Channel %d notifier block full\n", chan->id);
return -ENOMEM; return -ENOMEM;
} }
mem->flags = NOUVEAU_MEM_NOTIFIER; mem->flags = NOUVEAU_MEM_NOTIFIER;
offset = chan->notifier_block->start + mem->start; offset = chan->notifier_block->start;
if (chan->notifier_block->flags & NOUVEAU_MEM_FB) { if (chan->notifier_block->flags & NOUVEAU_MEM_FB) {
target = NV_DMA_TARGET_VIDMEM; target = NV_DMA_TARGET_VIDMEM;
} else if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) { } else
target = NV_DMA_TARGET_AGP; if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) {
if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
dev_priv->card_type < NV_50) {
ret = nouveau_sgdma_get_page(dev, offset, &offset);
if (ret)
return ret;
target = NV_DMA_TARGET_PCI;
} else {
target = NV_DMA_TARGET_AGP;
}
} else
if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) {
target = NV_DMA_TARGET_PCI_NONLINEAR;
} else { } else {
DRM_ERROR("Bad DMA target, flags 0x%08x!\n", DRM_ERROR("Bad DMA target, flags 0x%08x!\n",
chan->notifier_block->flags); chan->notifier_block->flags);
return -EINVAL; return -EINVAL;
} }
offset += mem->start;
if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
offset, mem->size, offset, mem->size,
NV_DMA_ACCESS_RW, target, &nobj))) { NV_DMA_ACCESS_RW, target, &nobj))) {
nouveau_mem_free_block(mem); nouveau_mem_free_block(mem);
DRM_ERROR("Error creating notifier ctxdma: %d\n", ret); DRM_ERROR("Error creating notifier ctxdma: %d\n", ret);
return ret; return ret;
} }
nobj->dtor = nouveau_notifier_gpuobj_dtor;
nobj->priv = mem;
if ((ret = nouveau_gpuobj_ref_add(dev, channel, handle, nobj, NULL))) { if ((ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL))) {
nouveau_gpuobj_del(dev, &nobj); nouveau_gpuobj_del(dev, &nobj);
nouveau_mem_free_block(mem); nouveau_mem_free_block(mem);
DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret); DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret);
@ -128,19 +154,17 @@ nouveau_notifier_alloc(struct drm_device *dev, int channel, uint32_t handle,
} }
int int
nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{ {
struct drm_nouveau_notifier_alloc *na = data; struct drm_nouveau_notifierobj_alloc *na = data;
struct nouveau_channel *chan;
int ret; int ret;
if (!nouveau_fifo_owner(dev, file_priv, na->channel)) { NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
DRM_ERROR("pid %d doesn't own channel %d\n", NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
DRM_CURRENTPID, na->channel);
return -EPERM;
}
ret = nouveau_notifier_alloc(dev, na->channel, na->handle, ret = nouveau_notifier_alloc(chan, na->handle, na->count, &na->offset);
na->count, &na->offset);
if (ret) if (ret)
return ret; return ret;

View file

@ -72,6 +72,8 @@ nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
uint32_t hash = 0; uint32_t hash = 0;
int i; int i;
DRM_DEBUG("ch%d handle=0x%08x\n", channel, handle);
for (i=32;i>0;i-=dev_priv->ramht_bits) { for (i=32;i>0;i-=dev_priv->ramht_bits) {
hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
handle >>= dev_priv->ramht_bits; handle >>= dev_priv->ramht_bits;
@ -80,7 +82,7 @@ nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
hash ^= channel << (dev_priv->ramht_bits - 4); hash ^= channel << (dev_priv->ramht_bits - 4);
hash <<= 3; hash <<= 3;
DRM_DEBUG("ch%d handle=0x%08x hash=0x%08x\n", channel, handle, hash); DRM_DEBUG("hash=0x%08x\n", hash);
return hash; return hash;
} }
@ -100,7 +102,7 @@ static int
nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
{ {
struct drm_nouveau_private *dev_priv=dev->dev_private; struct drm_nouveau_private *dev_priv=dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
struct nouveau_gpuobj *gpuobj = ref->gpuobj; struct nouveau_gpuobj *gpuobj = ref->gpuobj;
uint32_t ctx, co, ho; uint32_t ctx, co, ho;
@ -131,14 +133,21 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
ref->channel, co, ref->handle, ctx); ref->channel, co, ref->handle, ctx);
INSTANCE_WR(ramht, (co + 0)/4, ref->handle); INSTANCE_WR(ramht, (co + 0)/4, ref->handle);
INSTANCE_WR(ramht, (co + 4)/4, ctx); INSTANCE_WR(ramht, (co + 4)/4, ctx);
list_add_tail(&ref->list, &chan->ramht_refs);
return 0; return 0;
} }
DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n", DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n",
ref->channel, co, INSTANCE_RD(ramht, co/4)); ref->channel, co, INSTANCE_RD(ramht, co/4));
co += 8; co += 8;
if (co >= dev_priv->ramht_size) if (co >= dev_priv->ramht_size) {
DRM_INFO("no space left after collision\n");
co = 0; co = 0;
/* exit as it seems to cause crash with nouveau_demo and
* 0xdead0001 object */
break;
}
} while (co != ho); } while (co != ho);
DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel); DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel);
@ -149,7 +158,7 @@ static void
nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
uint32_t co, ho; uint32_t co, ho;
@ -167,6 +176,8 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
INSTANCE_RD(ramht, (co + 4))); INSTANCE_RD(ramht, (co + 4)));
INSTANCE_WR(ramht, (co + 0)/4, 0x00000000); INSTANCE_WR(ramht, (co + 0)/4, 0x00000000);
INSTANCE_WR(ramht, (co + 4)/4, 0x00000000); INSTANCE_WR(ramht, (co + 4)/4, 0x00000000);
list_del(&ref->list);
return; return;
} }
@ -180,34 +191,30 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
} }
int int
nouveau_gpuobj_new(struct drm_device *dev, int channel, int size, int align, nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
uint32_t flags, struct nouveau_gpuobj **gpuobj_ret) int size, int align, uint32_t flags,
struct nouveau_gpuobj **gpuobj_ret)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine_func *engine = &dev_priv->Engine; struct nouveau_engine *engine = &dev_priv->Engine;
struct nouveau_fifo *chan = NULL;
struct nouveau_gpuobj *gpuobj; struct nouveau_gpuobj *gpuobj;
struct mem_block *pramin = NULL; struct mem_block *pramin = NULL;
int ret; int ret;
DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n", DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n",
channel, size, align, flags); chan ? chan->id : -1, size, align, flags);
if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
return -EINVAL; return -EINVAL;
if (channel >= 0) {
if (channel > nouveau_fifo_number(dev))
return -EINVAL;
chan = dev_priv->fifos[channel];
}
gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
if (!gpuobj) if (!gpuobj)
return -ENOMEM; return -ENOMEM;
DRM_DEBUG("gpuobj %p\n", gpuobj); DRM_DEBUG("gpuobj %p\n", gpuobj);
gpuobj->flags = flags; gpuobj->flags = flags;
gpuobj->im_channel = channel; gpuobj->im_channel = chan ? chan->id : -1;
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
/* Choose between global instmem heap, and per-channel private /* Choose between global instmem heap, and per-channel private
* instmem heap. On <NV50 allow requests for private instmem * instmem heap. On <NV50 allow requests for private instmem
@ -260,24 +267,64 @@ nouveau_gpuobj_new(struct drm_device *dev, int channel, int size, int align,
INSTANCE_WR(gpuobj, i/4, 0); INSTANCE_WR(gpuobj, i/4, 0);
} }
if (dev_priv->gpuobj_all) {
gpuobj->next = dev_priv->gpuobj_all;
gpuobj->next->prev = gpuobj;
}
dev_priv->gpuobj_all = gpuobj;
*gpuobj_ret = gpuobj; *gpuobj_ret = gpuobj;
return 0; return 0;
} }
void nouveau_gpuobj_takedown(struct drm_device *dev) int
nouveau_gpuobj_early_init(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = NULL;
DRM_DEBUG("\n"); DRM_DEBUG("\n");
while ((gpuobj = dev_priv->gpuobj_all)) { INIT_LIST_HEAD(&dev_priv->gpuobj_list);
return 0;
}
int
nouveau_gpuobj_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
DRM_DEBUG("\n");
if (dev_priv->card_type < NV_50) {
if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset,
~0, dev_priv->ramht_size,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ALLOW_NO_REFS,
&dev_priv->ramht, NULL)))
return ret;
}
return 0;
}
void
nouveau_gpuobj_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
nouveau_gpuobj_del(dev, &dev_priv->ramht);
}
void
nouveau_gpuobj_late_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = NULL;
struct list_head *entry, *tmp;
DRM_DEBUG("\n");
list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n", DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n",
gpuobj, gpuobj->refcount); gpuobj, gpuobj->refcount);
gpuobj->refcount = 0; gpuobj->refcount = 0;
@ -285,10 +332,11 @@ void nouveau_gpuobj_takedown(struct drm_device *dev)
} }
} }
int nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) int
nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine_func *engine = &dev_priv->Engine; struct nouveau_engine *engine = &dev_priv->Engine;
struct nouveau_gpuobj *gpuobj; struct nouveau_gpuobj *gpuobj;
DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
@ -302,7 +350,16 @@ int nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
return -EINVAL; return -EINVAL;
} }
engine->instmem.clear(dev, gpuobj); if (gpuobj->dtor)
gpuobj->dtor(dev, gpuobj);
if (gpuobj->im_backing) {
if (gpuobj->flags & NVOBJ_FLAG_FAKE)
drm_free(gpuobj->im_backing,
sizeof(*gpuobj->im_backing), DRM_MEM_DRIVER);
else
engine->instmem.clear(dev, gpuobj);
}
if (gpuobj->im_pramin) { if (gpuobj->im_pramin) {
if (gpuobj->flags & NVOBJ_FLAG_FAKE) if (gpuobj->flags & NVOBJ_FLAG_FAKE)
@ -312,12 +369,7 @@ int nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
nouveau_mem_free_block(gpuobj->im_pramin); nouveau_mem_free_block(gpuobj->im_pramin);
} }
if (gpuobj->next) list_del(&gpuobj->list);
gpuobj->next->prev = gpuobj->prev;
if (gpuobj->prev)
gpuobj->prev->next = gpuobj->next;
else
dev_priv->gpuobj_all = gpuobj->next;
*pgpuobj = NULL; *pgpuobj = NULL;
drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER); drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER);
@ -325,7 +377,8 @@ int nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
} }
static int static int
nouveau_gpuobj_instance_get(struct drm_device *dev, int channel, nouveau_gpuobj_instance_get(struct drm_device *dev,
struct nouveau_channel *chan,
struct nouveau_gpuobj *gpuobj, uint32_t *inst) struct nouveau_gpuobj *gpuobj, uint32_t *inst)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
@ -337,15 +390,15 @@ nouveau_gpuobj_instance_get(struct drm_device *dev, int channel,
return 0; return 0;
} }
if ((channel > 0) && gpuobj->im_channel != channel) { if (chan && gpuobj->im_channel != chan->id) {
DRM_ERROR("Channel mismatch: obj %d, ref %d\n", DRM_ERROR("Channel mismatch: obj %d, ref %d\n",
gpuobj->im_channel, channel); gpuobj->im_channel, chan->id);
return -EINVAL; return -EINVAL;
} }
/* NV50 channel-local instance */ /* NV50 channel-local instance */
if (channel > 0) { if (chan > 0) {
cpramin = dev_priv->fifos[channel]->ramin->gpuobj; cpramin = chan->ramin->gpuobj;
*inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
return 0; return 0;
} }
@ -371,29 +424,25 @@ nouveau_gpuobj_instance_get(struct drm_device *dev, int channel,
} }
int int
nouveau_gpuobj_ref_add(struct drm_device *dev, int channel, uint32_t handle, nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
struct nouveau_gpuobj *gpuobj, struct nouveau_gpuobj_ref **ref_ret) uint32_t handle, struct nouveau_gpuobj *gpuobj,
struct nouveau_gpuobj_ref **ref_ret)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = NULL;
struct nouveau_gpuobj_ref *ref; struct nouveau_gpuobj_ref *ref;
uint32_t instance; uint32_t instance;
int ret; int ret;
DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n", channel, handle, gpuobj); DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n",
chan ? chan->id : -1, handle, gpuobj);
if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
return -EINVAL; return -EINVAL;
if (channel >= 0) { if (!chan && !ref_ret)
if (channel > nouveau_fifo_number(dev))
return -EINVAL;
chan = dev_priv->fifos[channel];
} else
if (!ref_ret)
return -EINVAL; return -EINVAL;
ret = nouveau_gpuobj_instance_get(dev, channel, gpuobj, &instance); ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
if (ret) if (ret)
return ret; return ret;
@ -401,7 +450,7 @@ nouveau_gpuobj_ref_add(struct drm_device *dev, int channel, uint32_t handle,
if (!ref) if (!ref)
return -ENOMEM; return -ENOMEM;
ref->gpuobj = gpuobj; ref->gpuobj = gpuobj;
ref->channel = channel; ref->channel = chan ? chan->id : -1;
ref->instance = instance; ref->instance = instance;
if (!ref_ret) { if (!ref_ret) {
@ -412,9 +461,6 @@ nouveau_gpuobj_ref_add(struct drm_device *dev, int channel, uint32_t handle,
drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER); drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER);
return ret; return ret;
} }
ref->next = chan->ramht_refs;
chan->ramht_refs = ref;
} else { } else {
ref->handle = ~0; ref->handle = ~0;
*ref_ret = ref; *ref_ret = ref;
@ -452,8 +498,9 @@ int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **p
} }
int int
nouveau_gpuobj_new_ref(struct drm_device *dev, int oc, int rc, uint32_t handle, nouveau_gpuobj_new_ref(struct drm_device *dev,
int size, int align, uint32_t flags, struct nouveau_channel *oc, struct nouveau_channel *rc,
uint32_t handle, int size, int align, uint32_t flags,
struct nouveau_gpuobj_ref **ref) struct nouveau_gpuobj_ref **ref)
{ {
struct nouveau_gpuobj *gpuobj = NULL; struct nouveau_gpuobj *gpuobj = NULL;
@ -470,28 +517,29 @@ nouveau_gpuobj_new_ref(struct drm_device *dev, int oc, int rc, uint32_t handle,
return 0; return 0;
} }
static int int
nouveau_gpuobj_ref_find(struct drm_device *dev, int channel, uint32_t handle, nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
struct nouveau_gpuobj_ref **ref_ret) struct nouveau_gpuobj_ref **ref_ret)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj_ref *ref;
struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct list_head *entry, *tmp;
struct nouveau_gpuobj_ref *ref = chan->ramht_refs;
list_for_each_safe(entry, tmp, &chan->ramht_refs) {
ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
while (ref) {
if (ref->handle == handle) { if (ref->handle == handle) {
if (ref_ret) if (ref_ret)
*ref_ret = ref; *ref_ret = ref;
return 0; return 0;
} }
ref = ref->next;
} }
return -EINVAL; return -EINVAL;
} }
int int
nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size, nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
uint32_t b_offset, uint32_t size,
uint32_t flags, struct nouveau_gpuobj **pgpuobj, uint32_t flags, struct nouveau_gpuobj **pgpuobj,
struct nouveau_gpuobj_ref **pref) struct nouveau_gpuobj_ref **pref)
{ {
@ -499,8 +547,8 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size,
struct nouveau_gpuobj *gpuobj = NULL; struct nouveau_gpuobj *gpuobj = NULL;
int i; int i;
DRM_DEBUG("offset=0x%08x size=0x%08x flags=0x%08x\n", DRM_DEBUG("p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
offset, size, flags); p_offset, b_offset, size, flags);
gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
if (!gpuobj) if (!gpuobj)
@ -509,14 +557,29 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size,
gpuobj->im_channel = -1; gpuobj->im_channel = -1;
gpuobj->flags = flags | NVOBJ_FLAG_FAKE; gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
DRM_MEM_DRIVER);
if (!gpuobj->im_pramin) { if (p_offset != ~0) {
nouveau_gpuobj_del(dev, &gpuobj); gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block),
return -ENOMEM; DRM_MEM_DRIVER);
if (!gpuobj->im_pramin) {
nouveau_gpuobj_del(dev, &gpuobj);
return -ENOMEM;
}
gpuobj->im_pramin->start = p_offset;
gpuobj->im_pramin->size = size;
}
if (b_offset != ~0) {
gpuobj->im_backing = drm_calloc(1, sizeof(struct mem_block),
DRM_MEM_DRIVER);
if (!gpuobj->im_backing) {
nouveau_gpuobj_del(dev, &gpuobj);
return -ENOMEM;
}
gpuobj->im_backing->start = b_offset;
gpuobj->im_backing->size = size;
} }
gpuobj->im_pramin->start = offset;
gpuobj->im_pramin->size = size;
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
for (i = 0; i < gpuobj->im_pramin->size; i += 4) for (i = 0; i < gpuobj->im_pramin->size; i += 4)
@ -524,7 +587,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t offset, uint32_t size,
} }
if (pref) { if (pref) {
if ((i = nouveau_gpuobj_ref_add(dev, -1, 0, gpuobj, pref))) { if ((i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref))) {
nouveau_gpuobj_del(dev, &gpuobj); nouveau_gpuobj_del(dev, &gpuobj);
return i; return i;
} }
@ -577,10 +640,11 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
to it that can be used to set up context objects. to it that can be used to set up context objects.
*/ */
int int
nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
uint64_t offset, uint64_t size, int access, int target, uint64_t offset, uint64_t size, int access,
struct nouveau_gpuobj **gpuobj) int target, struct nouveau_gpuobj **gpuobj)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret; int ret;
uint32_t is_scatter_gather = 0; uint32_t is_scatter_gather = 0;
@ -591,7 +655,7 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class,
DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n", DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
channel, class, offset, size); chan->id, class, offset, size);
DRM_DEBUG("access=%d target=%d\n", access, target); DRM_DEBUG("access=%d target=%d\n", access, target);
switch (target) { switch (target) {
@ -608,7 +672,7 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class,
break; break;
} }
ret = nouveau_gpuobj_new(dev, channel, ret = nouveau_gpuobj_new(dev, chan,
is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class), is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class),
16, 16,
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
@ -711,19 +775,19 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class,
} }
int int
nouveau_gpuobj_gart_dma_new(struct drm_device *dev, int channel, nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
uint64_t offset, uint64_t size, int access, uint64_t offset, uint64_t size, int access,
struct nouveau_gpuobj **gpuobj, struct nouveau_gpuobj **gpuobj,
uint32_t *o_ret) uint32_t *o_ret)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret; int ret;
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
(dev_priv->card_type >= NV_50 && (dev_priv->card_type >= NV_50 &&
dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
ret = nouveau_gpuobj_dma_new(dev, channel, ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
NV_CLASS_DMA_IN_MEMORY,
offset, size, access, offset, size, access,
NV_DMA_TARGET_AGP, gpuobj); NV_DMA_TARGET_AGP, gpuobj);
if (o_ret) if (o_ret)
@ -798,15 +862,16 @@ nouveau_gpuobj_gart_dma_new(struct drm_device *dev, int channel,
set to 0? set to 0?
*/ */
int int
nouveau_gpuobj_gr_new(struct drm_device *dev, int channel, int class, nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
struct nouveau_gpuobj **gpuobj) struct nouveau_gpuobj **gpuobj)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret; int ret;
DRM_DEBUG("ch%d class=0x%04x\n", channel, class); DRM_DEBUG("ch%d class=0x%04x\n", chan->id, class);
ret = nouveau_gpuobj_new(dev, channel, ret = nouveau_gpuobj_new(dev, chan,
nouveau_gpuobj_class_instmem_size(dev, class), nouveau_gpuobj_class_instmem_size(dev, class),
16, 16,
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
@ -847,14 +912,14 @@ nouveau_gpuobj_gr_new(struct drm_device *dev, int channel, int class,
} }
static int static int
nouveau_gpuobj_channel_init_pramin(struct drm_device *dev, int channel) nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
struct nouveau_gpuobj *pramin = NULL; struct nouveau_gpuobj *pramin = NULL;
int size, base, ret; int size, base, ret;
DRM_DEBUG("ch%d\n", channel); DRM_DEBUG("ch%d\n", chan->id);
/* Base amount for object storage (4KiB enough?) */ /* Base amount for object storage (4KiB enough?) */
size = 0x1000; size = 0x1000;
@ -876,8 +941,8 @@ nouveau_gpuobj_channel_init_pramin(struct drm_device *dev, int channel)
} }
DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
channel, size, base); chan->id, size, base);
ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, size, 0x1000, 0, ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
&chan->ramin); &chan->ramin);
if (ret) { if (ret) {
DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret); DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret);
@ -897,21 +962,23 @@ nouveau_gpuobj_channel_init_pramin(struct drm_device *dev, int channel)
} }
int int
nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
uint32_t vram_h, uint32_t tt_h) uint32_t vram_h, uint32_t tt_h)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
struct nouveau_gpuobj *vram = NULL, *tt = NULL; struct nouveau_gpuobj *vram = NULL, *tt = NULL;
int ret, i; int ret, i;
DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", channel, vram_h, tt_h); INIT_LIST_HEAD(&chan->ramht_refs);
DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
/* Reserve a block of PRAMIN for the channel /* Reserve a block of PRAMIN for the channel
*XXX: maybe on <NV50 too at some point *XXX: maybe on <NV50 too at some point
*/ */
if (0 || dev_priv->card_type == NV_50) { if (0 || dev_priv->card_type == NV_50) {
ret = nouveau_gpuobj_channel_init_pramin(dev, channel); ret = nouveau_gpuobj_channel_init_pramin(chan);
if (ret) if (ret)
return ret; return ret;
} }
@ -922,7 +989,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel,
vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
vm_offset += chan->ramin->gpuobj->im_pramin->start; vm_offset += chan->ramin->gpuobj->im_pramin->start;
if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, 0x4000, if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
0, &chan->vm_pd, NULL))) 0, &chan->vm_pd, NULL)))
return ret; return ret;
for (i=0; i<0x4000; i+=8) { for (i=0; i<0x4000; i+=8) {
@ -930,7 +997,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel,
INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe); INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe);
} }
if ((ret = nouveau_gpuobj_ref_add(dev, -1, 0, if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
dev_priv->gart_info.sg_ctxdma, dev_priv->gart_info.sg_ctxdma,
&chan->vm_gart_pt))) &chan->vm_gart_pt)))
return ret; return ret;
@ -941,12 +1008,12 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel,
/* RAMHT */ /* RAMHT */
if (dev_priv->card_type < NV_50) { if (dev_priv->card_type < NV_50) {
ret = nouveau_gpuobj_ref_add(dev, -1, 0, dev_priv->ramht, ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
&chan->ramht); &chan->ramht);
if (ret) if (ret)
return ret; return ret;
} else { } else {
ret = nouveau_gpuobj_new_ref(dev, channel, channel, 0, ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
0x8000, 16, 0x8000, 16,
NVOBJ_FLAG_ZERO_ALLOC, NVOBJ_FLAG_ZERO_ALLOC,
&chan->ramht); &chan->ramht);
@ -955,7 +1022,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel,
} }
/* VRAM ctxdma */ /* VRAM ctxdma */
if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
0, dev_priv->fb_available_size, 0, dev_priv->fb_available_size,
NV_DMA_ACCESS_RW, NV_DMA_ACCESS_RW,
NV_DMA_TARGET_VIDMEM, &vram))) { NV_DMA_TARGET_VIDMEM, &vram))) {
@ -963,20 +1030,19 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel,
return ret; return ret;
} }
if ((ret = nouveau_gpuobj_ref_add(dev, channel, vram_h, vram, NULL))) { if ((ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL))) {
DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret);
return ret; return ret;
} }
/* TT memory ctxdma */ /* TT memory ctxdma */
if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
ret = nouveau_gpuobj_gart_dma_new(dev, channel, 0, ret = nouveau_gpuobj_gart_dma_new(chan, 0,
dev_priv->gart_info.aper_size, dev_priv->gart_info.aper_size,
NV_DMA_ACCESS_RW, &tt, NULL); NV_DMA_ACCESS_RW, &tt, NULL);
} else } else
if (dev_priv->pci_heap) { if (dev_priv->pci_heap) {
ret = nouveau_gpuobj_dma_new(dev, channel, ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
NV_CLASS_DMA_IN_MEMORY,
0, dev->sg->pages * PAGE_SIZE, 0, dev->sg->pages * PAGE_SIZE,
NV_DMA_ACCESS_RW, NV_DMA_ACCESS_RW,
NV_DMA_TARGET_PCI_NONLINEAR, &tt); NV_DMA_TARGET_PCI_NONLINEAR, &tt);
@ -990,7 +1056,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel,
return ret; return ret;
} }
ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
if (ret) { if (ret) {
DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); DRM_ERROR("Error referencing TT ctxdma: %d\n", ret);
return ret; return ret;
@ -1000,18 +1066,20 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel,
} }
void void
nouveau_gpuobj_channel_takedown(struct drm_device *dev, int channel) nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_device *dev = chan->dev;
struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct list_head *entry, *tmp;
struct nouveau_gpuobj_ref *ref; struct nouveau_gpuobj_ref *ref;
DRM_DEBUG("ch%d\n", channel); DRM_DEBUG("ch%d\n", chan->id);
list_for_each_safe(entry, tmp, &chan->ramht_refs) {
ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
while ((ref = chan->ramht_refs)) {
chan->ramht_refs = ref->next;
nouveau_gpuobj_ref_del(dev, &ref); nouveau_gpuobj_ref_del(dev, &ref);
} }
nouveau_gpuobj_ref_del(dev, &chan->ramht); nouveau_gpuobj_ref_del(dev, &chan->ramht);
nouveau_gpuobj_del(dev, &chan->vm_pd); nouveau_gpuobj_del(dev, &chan->vm_pd);
@ -1024,35 +1092,33 @@ nouveau_gpuobj_channel_takedown(struct drm_device *dev, int channel)
} }
int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{ {
struct nouveau_channel *chan;
struct drm_nouveau_grobj_alloc *init = data; struct drm_nouveau_grobj_alloc *init = data;
struct nouveau_gpuobj *gr = NULL; struct nouveau_gpuobj *gr = NULL;
int ret; int ret;
if (!nouveau_fifo_owner(dev, file_priv, init->channel)) { NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
DRM_ERROR("pid %d doesn't own channel %d\n", NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
DRM_CURRENTPID, init->channel);
return -EINVAL;
}
//FIXME: check args, only allow trusted objects to be created //FIXME: check args, only allow trusted objects to be created
if (init->handle == ~0) if (init->handle == ~0)
return -EINVAL; return -EINVAL;
if (nouveau_gpuobj_ref_find(dev, init->channel, init->handle, NULL) ==
0) if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
return -EEXIST; return -EEXIST;
ret = nouveau_gpuobj_gr_new(dev, init->channel, init->class, &gr); ret = nouveau_gpuobj_gr_new(chan, init->class, &gr);
if (ret) { if (ret) {
DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n", DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n",
ret, init->channel, init->handle); ret, init->channel, init->handle);
return ret; return ret;
} }
if ((ret = nouveau_gpuobj_ref_add(dev, init->channel, init->handle, if ((ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL))) {
gr, NULL))) {
DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)", DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)",
ret, init->channel, init->handle); ret, init->channel, init->handle);
nouveau_gpuobj_del(dev, &gr); nouveau_gpuobj_del(dev, &gr);
@ -1062,3 +1128,21 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct drm_fil
return 0; return 0;
} }
int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gpuobj_free *objfree = data;
struct nouveau_gpuobj_ref *ref;
struct nouveau_channel *chan;
int ret;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
if ((ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref)))
return ret;
nouveau_gpuobj_ref_del(dev, &ref);
return 0;
}

View file

@ -15,9 +15,6 @@
# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000 # define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20 # define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
#define NV03_PGRAPH_STATUS 0x004006b0
#define NV04_PGRAPH_STATUS 0x00400700
#define NV_RAMIN 0x00700000 #define NV_RAMIN 0x00700000
#define NV_RAMHT_HANDLE_OFFSET 0 #define NV_RAMHT_HANDLE_OFFSET 0
@ -80,6 +77,16 @@
#define NV40_PMC_1708 0x00001708 #define NV40_PMC_1708 0x00001708
#define NV40_PMC_170C 0x0000170C #define NV40_PMC_170C 0x0000170C
/* probably PMC ? */
#define NV50_PUNK_BAR0_PRAMIN 0x00001700
#define NV50_PUNK_BAR_CFG_BASE 0x00001704
#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30)
#define NV50_PUNK_BAR1_CTXDMA 0x00001708
#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31)
#define NV50_PUNK_BAR3_CTXDMA 0x0000170C
#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31)
#define NV50_PUNK_UNK1710 0x00001710
#define NV04_PTIMER_INTR_0 0x00009100 #define NV04_PTIMER_INTR_0 0x00009100
#define NV04_PTIMER_INTR_EN_0 0x00009140 #define NV04_PTIMER_INTR_EN_0 0x00009140
#define NV04_PTIMER_NUMERATOR 0x00009200 #define NV04_PTIMER_NUMERATOR 0x00009200
@ -168,6 +175,10 @@
#define NV10_PGRAPH_CTX_CACHE5 0x004001E0 #define NV10_PGRAPH_CTX_CACHE5 0x004001E0
#define NV40_PGRAPH_CTXCTL_0304 0x00400304 #define NV40_PGRAPH_CTXCTL_0304 0x00400304
#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001 #define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
#define NV40_PGRAPH_CTXCTL_0310 0x00400310 #define NV40_PGRAPH_CTXCTL_0310 0x00400310
#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020 #define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040 #define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
@ -250,7 +261,12 @@
#define NV04_PGRAPH_BLIMIT5 0x00400698 #define NV04_PGRAPH_BLIMIT5 0x00400698
#define NV04_PGRAPH_BSWIZZLE2 0x0040069C #define NV04_PGRAPH_BSWIZZLE2 0x0040069C
#define NV04_PGRAPH_BSWIZZLE5 0x004006A0 #define NV04_PGRAPH_BSWIZZLE5 0x004006A0
#define NV03_PGRAPH_STATUS 0x004006B0
#define NV04_PGRAPH_STATUS 0x00400700
#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
#define NV04_PGRAPH_SURFACE 0x0040070C #define NV04_PGRAPH_SURFACE 0x0040070C
#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
#define NV04_PGRAPH_STATE 0x00400710 #define NV04_PGRAPH_STATE 0x00400710
#define NV10_PGRAPH_SURFACE 0x00400710 #define NV10_PGRAPH_SURFACE 0x00400710
#define NV04_PGRAPH_NOTIFY 0x00400714 #define NV04_PGRAPH_NOTIFY 0x00400714

View file

@ -93,7 +93,7 @@ static uint64_t nouveau_stub_timer_read(struct drm_device *dev) { return 0; }
static int nouveau_init_engine_ptrs(struct drm_device *dev) static int nouveau_init_engine_ptrs(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine_func *engine = &dev_priv->Engine; struct nouveau_engine *engine = &dev_priv->Engine;
switch (dev_priv->chipset & 0xf0) { switch (dev_priv->chipset & 0xf0) {
case 0x00: case 0x00:
@ -224,7 +224,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv40_graph_destroy_context; engine->graph.destroy_context = nv40_graph_destroy_context;
engine->graph.load_context = nv40_graph_load_context; engine->graph.load_context = nv40_graph_load_context;
engine->graph.save_context = nv40_graph_save_context; engine->graph.save_context = nv40_graph_save_context;
engine->fifo.init = nouveau_fifo_init; engine->fifo.init = nv40_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown; engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.create_context = nv40_fifo_create_context; engine->fifo.create_context = nv40_fifo_create_context;
engine->fifo.destroy_context = nv40_fifo_destroy_context; engine->fifo.destroy_context = nv40_fifo_destroy_context;
@ -267,12 +267,18 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
return 0; return 0;
} }
static int nouveau_card_init(struct drm_device *dev) int
nouveau_card_init(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine_func *engine; struct nouveau_engine *engine;
int ret; int ret;
DRM_DEBUG("prev state = %d\n", dev_priv->init_state);
if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
return 0;
/* Map any PCI resources we need on the card */ /* Map any PCI resources we need on the card */
ret = nouveau_init_card_mappings(dev); ret = nouveau_init_card_mappings(dev);
if (ret) return ret; if (ret) return ret;
@ -290,6 +296,9 @@ static int nouveau_card_init(struct drm_device *dev)
engine = &dev_priv->Engine; engine = &dev_priv->Engine;
dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
ret = nouveau_gpuobj_early_init(dev);
if (ret) return ret;
/* Initialise instance memory, must happen before mem_init so we /* Initialise instance memory, must happen before mem_init so we
* know exactly how much VRAM we're able to use for "normal" * know exactly how much VRAM we're able to use for "normal"
* purposes. * purposes.
@ -301,6 +310,9 @@ static int nouveau_card_init(struct drm_device *dev)
ret = nouveau_mem_init(dev); ret = nouveau_mem_init(dev);
if (ret) return ret; if (ret) return ret;
ret = nouveau_gpuobj_init(dev);
if (ret) return ret;
/* Parse BIOS tables / Run init tables? */ /* Parse BIOS tables / Run init tables? */
/* PMC */ /* PMC */
@ -323,8 +335,17 @@ static int nouveau_card_init(struct drm_device *dev)
ret = engine->fifo.init(dev); ret = engine->fifo.init(dev);
if (ret) return ret; if (ret) return ret;
/* this call irq_preinstall, register irq handler and
* call irq_postinstall
*/
ret = drm_irq_install(dev);
if (ret) return ret;
/* what about PVIDEO/PCRTC/PRAMDAC etc? */ /* what about PVIDEO/PCRTC/PRAMDAC etc? */
ret = nouveau_dma_channel_init(dev);
if (ret) return ret;
dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
return 0; return 0;
} }
@ -332,9 +353,13 @@ static int nouveau_card_init(struct drm_device *dev)
static void nouveau_card_takedown(struct drm_device *dev) static void nouveau_card_takedown(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine_func *engine = &dev_priv->Engine; struct nouveau_engine *engine = &dev_priv->Engine;
DRM_DEBUG("prev state = %d\n", dev_priv->init_state);
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
nouveau_dma_channel_takedown(dev);
engine->fifo.takedown(dev); engine->fifo.takedown(dev);
engine->graph.takedown(dev); engine->graph.takedown(dev);
engine->fb.takedown(dev); engine->fb.takedown(dev);
@ -349,6 +374,10 @@ static void nouveau_card_takedown(struct drm_device *dev)
nouveau_mem_close(dev); nouveau_mem_close(dev);
engine->instmem.takedown(dev); engine->instmem.takedown(dev);
drm_irq_uninstall(dev);
nouveau_gpuobj_late_takedown(dev);
dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
} }
} }
@ -368,14 +397,6 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
/* first module load, setup the mmio/fb mapping */ /* first module load, setup the mmio/fb mapping */
int nouveau_firstopen(struct drm_device *dev) int nouveau_firstopen(struct drm_device *dev)
{ {
int ret;
ret = nouveau_card_init(dev);
if (ret) {
DRM_ERROR("nouveau_card_init() failed! (%d)\n", ret);
return ret;
}
return 0; return 0;
} }
@ -395,15 +416,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
dev->dev_private = (void *)dev_priv; dev->dev_private = (void *)dev_priv;
#if 0
ret = nouveau_card_init(dev);
if (ret) {
DRM_ERROR("nouveau_card_init() failed! (%d)\n", ret);
return ret;
}
#endif
return 0; return 0;
} }
@ -427,12 +439,24 @@ int nouveau_unload(struct drm_device *dev)
return 0; return 0;
} }
int
nouveau_ioctl_card_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return nouveau_card_init(dev);
}
int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_getparam *getparam = data; struct drm_nouveau_getparam *getparam = data;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
switch (getparam->param) { switch (getparam->param) {
case NOUVEAU_GETPARAM_CHIPSET_ID:
getparam->value = dev_priv->chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR: case NOUVEAU_GETPARAM_PCI_VENDOR:
getparam->value=dev->pci_vendor; getparam->value=dev->pci_vendor;
break; break;
@ -481,6 +505,8 @@ int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_setparam *setparam = data; struct drm_nouveau_setparam *setparam = data;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
switch (setparam->param) { switch (setparam->param) {
case NOUVEAU_SETPARAM_CMDBUF_LOCATION: case NOUVEAU_SETPARAM_CMDBUF_LOCATION:
switch (setparam->value) { switch (setparam->value) {
@ -512,9 +538,6 @@ void nouveau_wait_for_idle(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv=dev->dev_private; struct drm_nouveau_private *dev_priv=dev->dev_private;
switch(dev_priv->card_type) { switch(dev_priv->card_type) {
case NV_03:
while (NV_READ(NV03_PGRAPH_STATUS));
break;
case NV_50: case NV_50:
break; break;
default: { default: {
@ -526,6 +549,7 @@ void nouveau_wait_for_idle(struct drm_device *dev)
uint32_t status; uint32_t status;
do { do {
uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE); uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE);
(void)pmc_e;
status = NV_READ(NV04_PGRAPH_STATUS); status = NV_READ(NV04_PGRAPH_STATUS);
if (!status) if (!status)
break; break;

View file

@ -36,13 +36,13 @@
#define NV04_RAMFC__SIZE 32 #define NV04_RAMFC__SIZE 32
int int
nv04_fifo_create_context(struct drm_device *dev, int channel) nv04_fifo_create_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
int ret; int ret;
if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(channel), if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
NV04_RAMFC__SIZE, NV04_RAMFC__SIZE,
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, NVOBJ_FLAG_ZERO_FREE,
@ -62,30 +62,29 @@ nv04_fifo_create_context(struct drm_device *dev, int channel)
0)); 0));
/* enable the fifo dma operation */ /* enable the fifo dma operation */
NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<channel)); NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE) | (1<<chan->id));
return 0; return 0;
} }
void void
nv04_fifo_destroy_context(struct drm_device *dev, int channel) nv04_fifo_destroy_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<channel)); nouveau_gpuobj_ref_del(dev, &chan->ramfc);
if (chan->ramfc)
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
} }
int int
nv04_fifo_load_context(struct drm_device *dev, int channel) nv04_fifo_load_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp; uint32_t tmp;
NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | channel); NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | chan->id);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT));
@ -107,10 +106,10 @@ nv04_fifo_load_context(struct drm_device *dev, int channel)
} }
int int
nv04_fifo_save_context(struct drm_device *dev, int channel) nv04_fifo_save_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp; uint32_t tmp;
RAMFC_WR(DMA_PUT, NV04_PFIFO_CACHE1_DMA_PUT); RAMFC_WR(DMA_PUT, NV04_PFIFO_CACHE1_DMA_PUT);

View file

@ -27,262 +27,321 @@
#include "nouveau_drm.h" #include "nouveau_drm.h"
#include "nouveau_drv.h" #include "nouveau_drv.h"
struct reg_interval static uint32_t nv04_graph_ctx_regs [] = {
{ NV04_PGRAPH_CTX_SWITCH1,
uint32_t reg; NV04_PGRAPH_CTX_SWITCH2,
int number; NV04_PGRAPH_CTX_SWITCH3,
} nv04_graph_ctx_regs [] = { NV04_PGRAPH_CTX_SWITCH4,
{NV04_PGRAPH_CTX_SWITCH1,1}, NV04_PGRAPH_CTX_CACHE1,
{NV04_PGRAPH_CTX_SWITCH2,1}, NV04_PGRAPH_CTX_CACHE2,
{NV04_PGRAPH_CTX_SWITCH3,1}, NV04_PGRAPH_CTX_CACHE3,
{NV04_PGRAPH_CTX_SWITCH4,1}, NV04_PGRAPH_CTX_CACHE4,
{NV04_PGRAPH_CTX_CACHE1,1}, 0x00400184,
{NV04_PGRAPH_CTX_CACHE2,1}, 0x004001a4,
{NV04_PGRAPH_CTX_CACHE3,1}, 0x004001c4,
{NV04_PGRAPH_CTX_CACHE4,1}, 0x004001e4,
{0x00400184,1}, 0x00400188,
{0x004001a4,1}, 0x004001a8,
{0x004001c4,1}, 0x004001c8,
{0x004001e4,1}, 0x004001e8,
{0x00400188,1}, 0x0040018c,
{0x004001a8,1}, 0x004001ac,
{0x004001c8,1}, 0x004001cc,
{0x004001e8,1}, 0x004001ec,
{0x0040018c,1}, 0x00400190,
{0x004001ac,1}, 0x004001b0,
{0x004001cc,1}, 0x004001d0,
{0x004001ec,1}, 0x004001f0,
{0x00400190,1}, 0x00400194,
{0x004001b0,1}, 0x004001b4,
{0x004001d0,1}, 0x004001d4,
{0x004001f0,1}, 0x004001f4,
{0x00400194,1}, 0x00400198,
{0x004001b4,1}, 0x004001b8,
{0x004001d4,1}, 0x004001d8,
{0x004001f4,1}, 0x004001f8,
{0x00400198,1}, 0x0040019c,
{0x004001b8,1}, 0x004001bc,
{0x004001d8,1}, 0x004001dc,
{0x004001f8,1}, 0x004001fc,
{0x0040019c,1}, 0x00400174,
{0x004001bc,1}, NV04_PGRAPH_DMA_START_0,
{0x004001dc,1}, NV04_PGRAPH_DMA_START_1,
{0x004001fc,1}, NV04_PGRAPH_DMA_LENGTH,
{0x00400174,1}, NV04_PGRAPH_DMA_MISC,
{NV04_PGRAPH_DMA_START_0,1}, NV04_PGRAPH_DMA_PITCH,
{NV04_PGRAPH_DMA_START_1,1}, NV04_PGRAPH_BOFFSET0,
{NV04_PGRAPH_DMA_LENGTH,1}, NV04_PGRAPH_BBASE0,
{NV04_PGRAPH_DMA_MISC,1}, NV04_PGRAPH_BLIMIT0,
{NV04_PGRAPH_DMA_PITCH,1}, NV04_PGRAPH_BOFFSET1,
{NV04_PGRAPH_BOFFSET0,1}, NV04_PGRAPH_BBASE1,
{NV04_PGRAPH_BBASE0,1}, NV04_PGRAPH_BLIMIT1,
{NV04_PGRAPH_BLIMIT0,1}, NV04_PGRAPH_BOFFSET2,
{NV04_PGRAPH_BOFFSET1,1}, NV04_PGRAPH_BBASE2,
{NV04_PGRAPH_BBASE1,1}, NV04_PGRAPH_BLIMIT2,
{NV04_PGRAPH_BLIMIT1,1}, NV04_PGRAPH_BOFFSET3,
{NV04_PGRAPH_BOFFSET2,1}, NV04_PGRAPH_BBASE3,
{NV04_PGRAPH_BBASE2,1}, NV04_PGRAPH_BLIMIT3,
{NV04_PGRAPH_BLIMIT2,1}, NV04_PGRAPH_BOFFSET4,
{NV04_PGRAPH_BOFFSET3,1}, NV04_PGRAPH_BBASE4,
{NV04_PGRAPH_BBASE3,1}, NV04_PGRAPH_BLIMIT4,
{NV04_PGRAPH_BLIMIT3,1}, NV04_PGRAPH_BOFFSET5,
{NV04_PGRAPH_BOFFSET4,1}, NV04_PGRAPH_BBASE5,
{NV04_PGRAPH_BBASE4,1}, NV04_PGRAPH_BLIMIT5,
{NV04_PGRAPH_BLIMIT4,1}, NV04_PGRAPH_BPITCH0,
{NV04_PGRAPH_BOFFSET5,1}, NV04_PGRAPH_BPITCH1,
{NV04_PGRAPH_BBASE5,1}, NV04_PGRAPH_BPITCH2,
{NV04_PGRAPH_BLIMIT5,1}, NV04_PGRAPH_BPITCH3,
{NV04_PGRAPH_BPITCH0,1}, NV04_PGRAPH_BPITCH4,
{NV04_PGRAPH_BPITCH1,1}, NV04_PGRAPH_SURFACE,
{NV04_PGRAPH_BPITCH2,1}, NV04_PGRAPH_STATE,
{NV04_PGRAPH_BPITCH3,1}, NV04_PGRAPH_BSWIZZLE2,
{NV04_PGRAPH_BPITCH4,1}, NV04_PGRAPH_BSWIZZLE5,
{NV04_PGRAPH_SURFACE,1}, NV04_PGRAPH_BPIXEL,
{NV04_PGRAPH_STATE,1}, NV04_PGRAPH_NOTIFY,
{NV04_PGRAPH_BSWIZZLE2,1}, NV04_PGRAPH_PATT_COLOR0,
{NV04_PGRAPH_BSWIZZLE5,1}, NV04_PGRAPH_PATT_COLOR1,
{NV04_PGRAPH_BPIXEL,1}, NV04_PGRAPH_PATT_COLORRAM+0x00,
{NV04_PGRAPH_NOTIFY,1}, NV04_PGRAPH_PATT_COLORRAM+0x01,
{NV04_PGRAPH_PATT_COLOR0,1}, NV04_PGRAPH_PATT_COLORRAM+0x02,
{NV04_PGRAPH_PATT_COLOR1,1}, NV04_PGRAPH_PATT_COLORRAM+0x03,
{NV04_PGRAPH_PATT_COLORRAM,64}, NV04_PGRAPH_PATT_COLORRAM+0x04,
{NV04_PGRAPH_PATTERN,1}, NV04_PGRAPH_PATT_COLORRAM+0x05,
{0x0040080c,1}, NV04_PGRAPH_PATT_COLORRAM+0x06,
{NV04_PGRAPH_PATTERN_SHAPE,1}, NV04_PGRAPH_PATT_COLORRAM+0x07,
{0x00400600,1}, NV04_PGRAPH_PATT_COLORRAM+0x08,
{NV04_PGRAPH_ROP3,1}, NV04_PGRAPH_PATT_COLORRAM+0x09,
{NV04_PGRAPH_CHROMA,1}, NV04_PGRAPH_PATT_COLORRAM+0x0A,
{NV04_PGRAPH_BETA_AND,1}, NV04_PGRAPH_PATT_COLORRAM+0x0B,
{NV04_PGRAPH_BETA_PREMULT,1}, NV04_PGRAPH_PATT_COLORRAM+0x0C,
{NV04_PGRAPH_CONTROL0,1}, NV04_PGRAPH_PATT_COLORRAM+0x0D,
{NV04_PGRAPH_CONTROL1,1}, NV04_PGRAPH_PATT_COLORRAM+0x0E,
{NV04_PGRAPH_CONTROL2,1}, NV04_PGRAPH_PATT_COLORRAM+0x0F,
{NV04_PGRAPH_BLEND,1}, NV04_PGRAPH_PATT_COLORRAM+0x10,
{NV04_PGRAPH_STORED_FMT,1}, NV04_PGRAPH_PATT_COLORRAM+0x11,
{NV04_PGRAPH_SOURCE_COLOR,1}, NV04_PGRAPH_PATT_COLORRAM+0x12,
{0x00400560,1}, NV04_PGRAPH_PATT_COLORRAM+0x13,
{0x00400568,1}, NV04_PGRAPH_PATT_COLORRAM+0x14,
{0x00400564,1}, NV04_PGRAPH_PATT_COLORRAM+0x15,
{0x0040056c,1}, NV04_PGRAPH_PATT_COLORRAM+0x16,
{0x00400400,1}, NV04_PGRAPH_PATT_COLORRAM+0x17,
{0x00400480,1}, NV04_PGRAPH_PATT_COLORRAM+0x18,
{0x00400404,1}, NV04_PGRAPH_PATT_COLORRAM+0x19,
{0x00400484,1}, NV04_PGRAPH_PATT_COLORRAM+0x1A,
{0x00400408,1}, NV04_PGRAPH_PATT_COLORRAM+0x1B,
{0x00400488,1}, NV04_PGRAPH_PATT_COLORRAM+0x1C,
{0x0040040c,1}, NV04_PGRAPH_PATT_COLORRAM+0x1D,
{0x0040048c,1}, NV04_PGRAPH_PATT_COLORRAM+0x1E,
{0x00400410,1}, NV04_PGRAPH_PATT_COLORRAM+0x1F,
{0x00400490,1}, NV04_PGRAPH_PATT_COLORRAM+0x20,
{0x00400414,1}, NV04_PGRAPH_PATT_COLORRAM+0x21,
{0x00400494,1}, NV04_PGRAPH_PATT_COLORRAM+0x22,
{0x00400418,1}, NV04_PGRAPH_PATT_COLORRAM+0x23,
{0x00400498,1}, NV04_PGRAPH_PATT_COLORRAM+0x24,
{0x0040041c,1}, NV04_PGRAPH_PATT_COLORRAM+0x25,
{0x0040049c,1}, NV04_PGRAPH_PATT_COLORRAM+0x26,
{0x00400420,1}, NV04_PGRAPH_PATT_COLORRAM+0x27,
{0x004004a0,1}, NV04_PGRAPH_PATT_COLORRAM+0x28,
{0x00400424,1}, NV04_PGRAPH_PATT_COLORRAM+0x29,
{0x004004a4,1}, NV04_PGRAPH_PATT_COLORRAM+0x2A,
{0x00400428,1}, NV04_PGRAPH_PATT_COLORRAM+0x2B,
{0x004004a8,1}, NV04_PGRAPH_PATT_COLORRAM+0x2C,
{0x0040042c,1}, NV04_PGRAPH_PATT_COLORRAM+0x2D,
{0x004004ac,1}, NV04_PGRAPH_PATT_COLORRAM+0x2E,
{0x00400430,1}, NV04_PGRAPH_PATT_COLORRAM+0x2F,
{0x004004b0,1}, NV04_PGRAPH_PATT_COLORRAM+0x30,
{0x00400434,1}, NV04_PGRAPH_PATT_COLORRAM+0x31,
{0x004004b4,1}, NV04_PGRAPH_PATT_COLORRAM+0x32,
{0x00400438,1}, NV04_PGRAPH_PATT_COLORRAM+0x33,
{0x004004b8,1}, NV04_PGRAPH_PATT_COLORRAM+0x34,
{0x0040043c,1}, NV04_PGRAPH_PATT_COLORRAM+0x35,
{0x004004bc,1}, NV04_PGRAPH_PATT_COLORRAM+0x36,
{0x00400440,1}, NV04_PGRAPH_PATT_COLORRAM+0x37,
{0x004004c0,1}, NV04_PGRAPH_PATT_COLORRAM+0x38,
{0x00400444,1}, NV04_PGRAPH_PATT_COLORRAM+0x39,
{0x004004c4,1}, NV04_PGRAPH_PATT_COLORRAM+0x3A,
{0x00400448,1}, NV04_PGRAPH_PATT_COLORRAM+0x3B,
{0x004004c8,1}, NV04_PGRAPH_PATT_COLORRAM+0x3C,
{0x0040044c,1}, NV04_PGRAPH_PATT_COLORRAM+0x3D,
{0x004004cc,1}, NV04_PGRAPH_PATT_COLORRAM+0x3E,
{0x00400450,1}, NV04_PGRAPH_PATT_COLORRAM+0x3F,
{0x004004d0,1}, NV04_PGRAPH_PATTERN,
{0x00400454,1}, 0x0040080c,
{0x004004d4,1}, NV04_PGRAPH_PATTERN_SHAPE,
{0x00400458,1}, 0x00400600,
{0x004004d8,1}, NV04_PGRAPH_ROP3,
{0x0040045c,1}, NV04_PGRAPH_CHROMA,
{0x004004dc,1}, NV04_PGRAPH_BETA_AND,
{0x00400460,1}, NV04_PGRAPH_BETA_PREMULT,
{0x004004e0,1}, NV04_PGRAPH_CONTROL0,
{0x00400464,1}, NV04_PGRAPH_CONTROL1,
{0x004004e4,1}, NV04_PGRAPH_CONTROL2,
{0x00400468,1}, NV04_PGRAPH_BLEND,
{0x004004e8,1}, NV04_PGRAPH_STORED_FMT,
{0x0040046c,1}, NV04_PGRAPH_SOURCE_COLOR,
{0x004004ec,1}, 0x00400560,
{0x00400470,1}, 0x00400568,
{0x004004f0,1}, 0x00400564,
{0x00400474,1}, 0x0040056c,
{0x004004f4,1}, 0x00400400,
{0x00400478,1}, 0x00400480,
{0x004004f8,1}, 0x00400404,
{0x0040047c,1}, 0x00400484,
{0x004004fc,1}, 0x00400408,
{0x0040053c,1}, 0x00400488,
{0x00400544,1}, 0x0040040c,
{0x00400540,1}, 0x0040048c,
{0x00400548,1}, 0x00400410,
{0x00400560,1}, 0x00400490,
{0x00400568,1}, 0x00400414,
{0x00400564,1}, 0x00400494,
{0x0040056c,1}, 0x00400418,
{0x00400534,1}, 0x00400498,
{0x00400538,1}, 0x0040041c,
{0x00400514,1}, 0x0040049c,
{0x00400518,1}, 0x00400420,
{0x0040051c,1}, 0x004004a0,
{0x00400520,1}, 0x00400424,
{0x00400524,1}, 0x004004a4,
{0x00400528,1}, 0x00400428,
{0x0040052c,1}, 0x004004a8,
{0x00400530,1}, 0x0040042c,
{0x00400d00,1}, 0x004004ac,
{0x00400d40,1}, 0x00400430,
{0x00400d80,1}, 0x004004b0,
{0x00400d04,1}, 0x00400434,
{0x00400d44,1}, 0x004004b4,
{0x00400d84,1}, 0x00400438,
{0x00400d08,1}, 0x004004b8,
{0x00400d48,1}, 0x0040043c,
{0x00400d88,1}, 0x004004bc,
{0x00400d0c,1}, 0x00400440,
{0x00400d4c,1}, 0x004004c0,
{0x00400d8c,1}, 0x00400444,
{0x00400d10,1}, 0x004004c4,
{0x00400d50,1}, 0x00400448,
{0x00400d90,1}, 0x004004c8,
{0x00400d14,1}, 0x0040044c,
{0x00400d54,1}, 0x004004cc,
{0x00400d94,1}, 0x00400450,
{0x00400d18,1}, 0x004004d0,
{0x00400d58,1}, 0x00400454,
{0x00400d98,1}, 0x004004d4,
{0x00400d1c,1}, 0x00400458,
{0x00400d5c,1}, 0x004004d8,
{0x00400d9c,1}, 0x0040045c,
{0x00400d20,1}, 0x004004dc,
{0x00400d60,1}, 0x00400460,
{0x00400da0,1}, 0x004004e0,
{0x00400d24,1}, 0x00400464,
{0x00400d64,1}, 0x004004e4,
{0x00400da4,1}, 0x00400468,
{0x00400d28,1}, 0x004004e8,
{0x00400d68,1}, 0x0040046c,
{0x00400da8,1}, 0x004004ec,
{0x00400d2c,1}, 0x00400470,
{0x00400d6c,1}, 0x004004f0,
{0x00400dac,1}, 0x00400474,
{0x00400d30,1}, 0x004004f4,
{0x00400d70,1}, 0x00400478,
{0x00400db0,1}, 0x004004f8,
{0x00400d34,1}, 0x0040047c,
{0x00400d74,1}, 0x004004fc,
{0x00400db4,1}, 0x0040053c,
{0x00400d38,1}, 0x00400544,
{0x00400d78,1}, 0x00400540,
{0x00400db8,1}, 0x00400548,
{0x00400d3c,1}, 0x00400560,
{0x00400d7c,1}, 0x00400568,
{0x00400dbc,1}, 0x00400564,
{0x00400590,1}, 0x0040056c,
{0x00400594,1}, 0x00400534,
{0x00400598,1}, 0x00400538,
{0x0040059c,1}, 0x00400514,
{0x004005a8,1}, 0x00400518,
{0x004005ac,1}, 0x0040051c,
{0x004005b0,1}, 0x00400520,
{0x004005b4,1}, 0x00400524,
{0x004005c0,1}, 0x00400528,
{0x004005c4,1}, 0x0040052c,
{0x004005c8,1}, 0x00400530,
{0x004005cc,1}, 0x00400d00,
{0x004005d0,1}, 0x00400d40,
{0x004005d4,1}, 0x00400d80,
{0x004005d8,1}, 0x00400d04,
{0x004005dc,1}, 0x00400d44,
{0x004005e0,1}, 0x00400d84,
{NV04_PGRAPH_PASSTHRU_0,1}, 0x00400d08,
{NV04_PGRAPH_PASSTHRU_1,1}, 0x00400d48,
{NV04_PGRAPH_PASSTHRU_2,1}, 0x00400d88,
{NV04_PGRAPH_DVD_COLORFMT,1}, 0x00400d0c,
{NV04_PGRAPH_SCALED_FORMAT,1}, 0x00400d4c,
{NV04_PGRAPH_MISC24_0,1}, 0x00400d8c,
{NV04_PGRAPH_MISC24_1,1}, 0x00400d10,
{NV04_PGRAPH_MISC24_2,1}, 0x00400d50,
{0x00400500,1}, 0x00400d90,
{0x00400504,1}, 0x00400d14,
{NV04_PGRAPH_VALID1,1}, 0x00400d54,
{NV04_PGRAPH_VALID2,1} 0x00400d94,
0x00400d18,
0x00400d58,
0x00400d98,
0x00400d1c,
0x00400d5c,
0x00400d9c,
0x00400d20,
0x00400d60,
0x00400da0,
0x00400d24,
0x00400d64,
0x00400da4,
0x00400d28,
0x00400d68,
0x00400da8,
0x00400d2c,
0x00400d6c,
0x00400dac,
0x00400d30,
0x00400d70,
0x00400db0,
0x00400d34,
0x00400d74,
0x00400db4,
0x00400d38,
0x00400d78,
0x00400db8,
0x00400d3c,
0x00400d7c,
0x00400dbc,
0x00400590,
0x00400594,
0x00400598,
0x0040059c,
0x004005a8,
0x004005ac,
0x004005b0,
0x004005b4,
0x004005c0,
0x004005c4,
0x004005c8,
0x004005cc,
0x004005d0,
0x004005d4,
0x004005d8,
0x004005dc,
0x004005e0,
NV04_PGRAPH_PASSTHRU_0,
NV04_PGRAPH_PASSTHRU_1,
NV04_PGRAPH_PASSTHRU_2,
NV04_PGRAPH_DVD_COLORFMT,
NV04_PGRAPH_SCALED_FORMAT,
NV04_PGRAPH_MISC24_0,
NV04_PGRAPH_MISC24_1,
NV04_PGRAPH_MISC24_2,
0x00400500,
0x00400504,
NV04_PGRAPH_VALID1,
NV04_PGRAPH_VALID2
}; };
@ -290,43 +349,35 @@ struct reg_interval
void nouveau_nv04_context_switch(struct drm_device *dev) void nouveau_nv04_context_switch(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
int channel, channel_old, i, j, index; struct nouveau_channel *next, *last;
int chid;
channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
channel_old = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); next = dev_priv->fifos[chid];
DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel); chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
last = dev_priv->fifos[chid];
DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",last->id, next->id);
NV_WRITE(NV03_PFIFO_CACHES, 0x0); NV_WRITE(NV03_PFIFO_CACHES, 0x0);
NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0); NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0); NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);
NV_WRITE(NV04_PGRAPH_FIFO,0x0); NV_WRITE(NV04_PGRAPH_FIFO,0x0);
nouveau_wait_for_idle(dev); nv04_graph_save_context(last);
// save PGRAPH context nouveau_wait_for_idle(dev);
index=0;
for (i = 0; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
for (j = 0; j<nv04_graph_ctx_regs[i].number; j++)
{
dev_priv->fifos[channel_old]->pgraph_ctx[index] = NV_READ(nv04_graph_ctx_regs[i].reg+j*4);
index++;
}
NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10000000); NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10000000);
NV_WRITE(NV04_PGRAPH_CTX_USER, (NV_READ(NV04_PGRAPH_CTX_USER) & 0xffffff) | (0x0f << 24)); NV_WRITE(NV04_PGRAPH_CTX_USER, (NV_READ(NV04_PGRAPH_CTX_USER) & 0xffffff) | (0x0f << 24));
// restore PGRAPH context nouveau_wait_for_idle(dev);
index=0;
for (i = 0; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) nv04_graph_load_context(last);
for (j = 0; j<nv04_graph_ctx_regs[i].number; j++)
{
NV_WRITE(nv04_graph_ctx_regs[i].reg+j*4, dev_priv->fifos[channel]->pgraph_ctx[index]);
index++;
}
NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100); NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV04_PGRAPH_CTX_USER, channel << 24); NV_WRITE(NV04_PGRAPH_CTX_USER, next->id << 24);
NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF); NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF);
NV_WRITE(NV04_PGRAPH_FIFO,0x0); NV_WRITE(NV04_PGRAPH_FIFO,0x0);
@ -336,14 +387,13 @@ void nouveau_nv04_context_switch(struct drm_device *dev)
NV_WRITE(NV04_PGRAPH_FIFO,0x1); NV_WRITE(NV04_PGRAPH_FIFO,0x1);
} }
int nv04_graph_create_context(struct drm_device *dev, int channel) { int nv04_graph_create_context(struct nouveau_channel *chan) {
struct drm_nouveau_private *dev_priv = dev->dev_private; DRM_DEBUG("nv04_graph_context_create %d\n", chan->id);
DRM_DEBUG("nv04_graph_context_create %d\n", channel);
memset(dev_priv->fifos[channel]->pgraph_ctx, 0, sizeof(dev_priv->fifos[channel]->pgraph_ctx)); memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx));
//dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24;
dev_priv->fifos[channel]->pgraph_ctx[0] = 0x0001ffff; chan->pgraph_ctx[0] = 0x0001ffff;
/* is it really needed ??? */ /* is it really needed ??? */
//dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4); //dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4);
//dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0); //dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0);
@ -351,49 +401,60 @@ int nv04_graph_create_context(struct drm_device *dev, int channel) {
return 0; return 0;
} }
void nv04_graph_destroy_context(struct drm_device *dev, int channel) void nv04_graph_destroy_context(struct nouveau_channel *chan)
{ {
} }
int nv04_graph_load_context(struct drm_device *dev, int channel) int nv04_graph_load_context(struct nouveau_channel *chan)
{ {
DRM_ERROR("stub!\n"); struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
NV_WRITE(nv04_graph_ctx_regs[i], chan->pgraph_ctx[i]);
return 0; return 0;
} }
int nv04_graph_save_context(struct drm_device *dev, int channel) int nv04_graph_save_context(struct nouveau_channel *chan)
{ {
DRM_ERROR("stub!\n"); struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
chan->pgraph_ctx[i] = NV_READ(nv04_graph_ctx_regs[i]);
return 0; return 0;
} }
int nv04_graph_init(struct drm_device *dev) { int nv04_graph_init(struct drm_device *dev) {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
int i,sum=0;
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH); ~NV_PMC_ENABLE_PGRAPH);
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH); NV_PMC_ENABLE_PGRAPH);
/* Enable PGRAPH interrupts */
NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
// check the context is big enough // check the context is big enough
for ( i = 0 ; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++) if ( sizeof(nv04_graph_ctx_regs)>sizeof(dev_priv->fifos[0]->pgraph_ctx) )
sum+=nv04_graph_ctx_regs[i].number;
if ( sum*4>sizeof(dev_priv->fifos[0]->pgraph_ctx) )
DRM_ERROR("pgraph_ctx too small\n"); DRM_ERROR("pgraph_ctx too small\n");
NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1230C000); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000);
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111101); NV_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);
NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11D5F071); NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x0004FF31); NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x0004FF31);
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x4004FF31 | NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x4004FF31 |
(0x00D00000) | (0x00D00000) |
(1<<29) | (1<<29) |
(1<<31)); (1<<31));
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);
NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100); NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100);

View file

@ -9,21 +9,18 @@ nv04_instmem_determine_amount(struct drm_device *dev)
int i; int i;
/* Figure out how much instance memory we need */ /* Figure out how much instance memory we need */
switch (dev_priv->card_type) { if (dev_priv->card_type >= NV_40) {
case NV_40:
/* We'll want more instance memory than this on some NV4x cards. /* We'll want more instance memory than this on some NV4x cards.
* There's a 16MB aperture to play with that maps onto the end * There's a 16MB aperture to play with that maps onto the end
* of vram. For now, only reserve a small piece until we know * of vram. For now, only reserve a small piece until we know
* more about what each chipset requires. * more about what each chipset requires.
*/ */
dev_priv->ramin_rsvd_vram = (1*1024* 1024); dev_priv->ramin_rsvd_vram = (1*1024* 1024);
break; } else {
default:
/*XXX: what *are* the limits on <NV40 cards?, and does RAMIN /*XXX: what *are* the limits on <NV40 cards?, and does RAMIN
* exist in vram on those cards as well? * exist in vram on those cards as well?
*/ */
dev_priv->ramin_rsvd_vram = (512*1024); dev_priv->ramin_rsvd_vram = (512*1024);
break;
} }
DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10); DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10);
@ -73,7 +70,6 @@ nv04_instmem_configure_fixed_tables(struct drm_device *dev)
case NV_11: case NV_11:
case NV_10: case NV_10:
case NV_04: case NV_04:
case NV_03:
default: default:
dev_priv->ramfc_offset = 0x11400; dev_priv->ramfc_offset = 0x11400;
dev_priv->ramfc_size = nouveau_fifo_number(dev) * dev_priv->ramfc_size = nouveau_fifo_number(dev) *
@ -93,17 +89,18 @@ int nv04_instmem_init(struct drm_device *dev)
nv04_instmem_determine_amount(dev); nv04_instmem_determine_amount(dev);
nv04_instmem_configure_fixed_tables(dev); nv04_instmem_configure_fixed_tables(dev);
if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset,
dev_priv->ramht_size,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ALLOW_NO_REFS,
&dev_priv->ramht, NULL)))
return ret;
/* Create a heap to manage RAMIN allocations, we don't allocate /* Create a heap to manage RAMIN allocations, we don't allocate
* the space that was reserved for RAMHT/FC/RO. * the space that was reserved for RAMHT/FC/RO.
*/ */
offset = dev_priv->ramfc_offset + dev_priv->ramfc_size; offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
/* On my NV4E, there's *something* clobbering the 16KiB just after
* where we setup these fixed tables. No idea what it is just yet,
* so reserve this space on all NV4X cards for now.
*/
if (dev_priv->card_type >= NV_40)
offset += 16*1024;
ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
offset, dev_priv->ramin_rsvd_vram - offset); offset, dev_priv->ramin_rsvd_vram - offset);
if (ret) { if (ret) {
@ -117,9 +114,6 @@ int nv04_instmem_init(struct drm_device *dev)
void void
nv04_instmem_takedown(struct drm_device *dev) nv04_instmem_takedown(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_gpuobj_del(dev, &dev_priv->ramht);
} }
int int
@ -139,7 +133,6 @@ nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
if (gpuobj && gpuobj->im_backing) { if (gpuobj && gpuobj->im_backing) {
if (gpuobj->im_bound) if (gpuobj->im_bound)
dev_priv->Engine.instmem.unbind(dev, gpuobj); dev_priv->Engine.instmem.unbind(dev, gpuobj);
nouveau_mem_free(dev, gpuobj->im_backing);
gpuobj->im_backing = NULL; gpuobj->im_backing = NULL;
} }
} }

View file

@ -13,8 +13,6 @@ nv04_mc_init(struct drm_device *dev)
*/ */
NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
NV_WRITE(NV03_PMC_INTR_EN_0, 0);
return 0; return 0;
} }

View file

@ -33,17 +33,17 @@
NV10_RAMFC_##offset/4, (val)) NV10_RAMFC_##offset/4, (val))
#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \ #define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \
NV10_RAMFC_##offset/4) NV10_RAMFC_##offset/4)
#define NV10_RAMFC(c) (dev_priv->ramfc_offset + NV10_RAMFC__SIZE) #define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32) #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
int int
nv10_fifo_create_context(struct drm_device *dev, int channel) nv10_fifo_create_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
int ret; int ret;
if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(channel), if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
NV10_RAMFC__SIZE, NV10_RAMFC__SIZE,
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, NVOBJ_FLAG_ZERO_FREE,
@ -65,30 +65,29 @@ nv10_fifo_create_context(struct drm_device *dev, int channel)
0); 0);
/* enable the fifo dma operation */ /* enable the fifo dma operation */
NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<channel)); NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<chan->id));
return 0; return 0;
} }
void void
nv10_fifo_destroy_context(struct drm_device *dev, int channel) nv10_fifo_destroy_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<channel)); NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
if (chan->ramfc) nouveau_gpuobj_ref_del(dev, &chan->ramfc);
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
} }
int int
nv10_fifo_load_context(struct drm_device *dev, int channel) nv10_fifo_load_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp; uint32_t tmp;
NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | channel); NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | chan->id);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT));
@ -124,10 +123,10 @@ nv10_fifo_load_context(struct drm_device *dev, int channel)
} }
int int
nv10_fifo_save_context(struct drm_device *dev, int channel) nv10_fifo_save_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp; uint32_t tmp;
RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));

View file

@ -181,11 +181,7 @@ static void nv10_praph_pipe(struct drm_device *dev) {
nouveau_wait_for_idle(dev); nouveau_wait_for_idle(dev);
} }
/* TODO replace address with name
use loops */
static int nv10_graph_ctx_regs [] = { static int nv10_graph_ctx_regs [] = {
NV03_PGRAPH_XY_LOGIC_MISC0,
NV10_PGRAPH_CTX_SWITCH1, NV10_PGRAPH_CTX_SWITCH1,
NV10_PGRAPH_CTX_SWITCH2, NV10_PGRAPH_CTX_SWITCH2,
NV10_PGRAPH_CTX_SWITCH3, NV10_PGRAPH_CTX_SWITCH3,
@ -455,6 +451,7 @@ NV03_PGRAPH_ABS_UCLIPA_YMIN,
NV03_PGRAPH_ABS_UCLIPA_YMAX, NV03_PGRAPH_ABS_UCLIPA_YMAX,
NV03_PGRAPH_ABS_ICLIP_XMAX, NV03_PGRAPH_ABS_ICLIP_XMAX,
NV03_PGRAPH_ABS_ICLIP_YMAX, NV03_PGRAPH_ABS_ICLIP_YMAX,
NV03_PGRAPH_XY_LOGIC_MISC0,
NV03_PGRAPH_XY_LOGIC_MISC1, NV03_PGRAPH_XY_LOGIC_MISC1,
NV03_PGRAPH_XY_LOGIC_MISC2, NV03_PGRAPH_XY_LOGIC_MISC2,
NV03_PGRAPH_XY_LOGIC_MISC3, NV03_PGRAPH_XY_LOGIC_MISC3,
@ -544,82 +541,115 @@ static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
return -1; return -1;
} }
static void restore_ctx_regs(struct drm_device *dev, int channel) int nv10_graph_load_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *fifo = dev_priv->fifos[channel];
int i, j; int i, j;
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
NV_WRITE(nv10_graph_ctx_regs[i], fifo->pgraph_ctx[i]); NV_WRITE(nv10_graph_ctx_regs[i], chan->pgraph_ctx[i]);
if (dev_priv->chipset>=0x17) { if (dev_priv->chipset>=0x17) {
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++) for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++)
NV_WRITE(nv17_graph_ctx_regs[j], fifo->pgraph_ctx[i]); NV_WRITE(nv17_graph_ctx_regs[j], chan->pgraph_ctx[i]);
} }
nouveau_wait_for_idle(dev); NV_WRITE(NV10_PGRAPH_CTX_USER, chan->id << 24);
return 0;
}
int nv10_graph_save_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i, j;
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
chan->pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]);
if (dev_priv->chipset>=0x17) {
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++)
chan->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]);
}
return 0;
} }
void nouveau_nv10_context_switch(struct drm_device *dev) void nouveau_nv10_context_switch(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv;
int channel, channel_old, i, j; struct nouveau_channel *next, *last;
int chid;
channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); if (!dev) {
channel_old = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); DRM_DEBUG("Invalid drm_device\n");
return;
}
dev_priv = dev->dev_private;
if (!dev_priv) {
DRM_DEBUG("Invalid drm_nouveau_private\n");
return;
}
if (!dev_priv->fifos) {
DRM_DEBUG("Invalid drm_nouveau_private->fifos\n");
return;
}
DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel); chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20)&(nouveau_fifo_number(dev)-1);
next = dev_priv->fifos[chid];
if (!next) {
DRM_DEBUG("Invalid next channel\n");
return;
}
chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
last = dev_priv->fifos[chid];
if (!last) {
DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n",
next->id);
} else {
DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",
last->id, next->id);
}
NV_WRITE(NV04_PGRAPH_FIFO,0x0); NV_WRITE(NV04_PGRAPH_FIFO,0x0);
#if 0 if (last) {
NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000); nv10_graph_save_context(last);
NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000000); }
NV_WRITE(NV_PFIFO_CACHES, 0x00000000);
#endif
// save PGRAPH context
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
dev_priv->fifos[channel_old]->pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]);
if (dev_priv->chipset>=0x17) {
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++)
dev_priv->fifos[channel_old]->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]);
}
nouveau_wait_for_idle(dev); nouveau_wait_for_idle(dev);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000);
NV_WRITE(NV10_PGRAPH_CTX_USER, (NV_READ(NV10_PGRAPH_CTX_USER) & 0xffffff) | (0x1f << 24)); NV_WRITE(NV10_PGRAPH_CTX_USER, (NV_READ(NV10_PGRAPH_CTX_USER) & 0xffffff) | (0x1f << 24));
nouveau_wait_for_idle(dev); nouveau_wait_for_idle(dev);
// restore PGRAPH context
#if 1
restore_ctx_regs(dev, channel);
#endif
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_CTX_USER, channel << 24);
NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF);
#if 0 nv10_graph_load_context(next);
NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001);
NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV_PFIFO_CACHES, 0x00000001); //NV_WRITE(NV10_PGRAPH_CTX_USER, next->id << 24);
#endif NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF);
NV_WRITE(NV04_PGRAPH_FIFO,0x1); NV_WRITE(NV04_PGRAPH_FIFO,0x1);
} }
#define NV_WRITE_CTX(reg, val) do { \ #define NV_WRITE_CTX(reg, val) do { \
int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \ int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
if (offset > 0) \ if (offset > 0) \
fifo->pgraph_ctx[offset] = val; \ chan->pgraph_ctx[offset] = val; \
} while (0) } while (0)
int nv10_graph_create_context(struct drm_device *dev, int channel) {
int nv10_graph_create_context(struct nouveau_channel *chan) {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *fifo = dev_priv->fifos[channel];
DRM_DEBUG("nv10_graph_context_create %d\n", chan->id);
memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx));
/* mmio trace suggest that should be done in ddx with methods/objects */
#if 0
uint32_t tmp, vramsz; uint32_t tmp, vramsz;
DRM_DEBUG("nv10_graph_context_create %d\n", channel);
memset(fifo->pgraph_ctx, 0, sizeof(fifo->pgraph_ctx));
/* per channel init from ddx */ /* per channel init from ddx */
tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
/*XXX the original ddx code, does this in 2 steps : /*XXX the original ddx code, does this in 2 steps :
@ -644,17 +674,28 @@ int nv10_graph_create_context(struct drm_device *dev, int channel) {
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
#endif
NV_WRITE_CTX(0x00400e88, 0x08000000);
NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff); NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
/* is it really needed ??? */ NV_WRITE_CTX(0x00400e10, 0x00001000);
NV_WRITE_CTX(0x00400e14, 0x00001000);
NV_WRITE_CTX(0x00400e30, 0x00080008);
NV_WRITE_CTX(0x00400e34, 0x00080008);
if (dev_priv->chipset>=0x17) { if (dev_priv->chipset>=0x17) {
/* is it really needed ??? */
NV_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4)); NV_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4));
NV_WRITE_CTX(0x004006b0, NV_READ(0x004006b0)); NV_WRITE_CTX(0x004006b0, NV_READ(0x004006b0));
NV_WRITE_CTX(0x00400eac, 0x0fff0000);
NV_WRITE_CTX(0x00400eb0, 0x0fff0000);
NV_WRITE_CTX(0x00400ec0, 0x00000080);
NV_WRITE_CTX(0x00400ed0, 0x00000080);
} }
/* for the first channel init the regs */ /* for the first channel init the regs */
if (dev_priv->fifo_alloc_count == 0) if (dev_priv->fifo_alloc_count == 0)
restore_ctx_regs(dev, channel); nv10_graph_load_context(chan);
//XXX should be saved/restored for each fifo //XXX should be saved/restored for each fifo
@ -663,20 +704,25 @@ int nv10_graph_create_context(struct drm_device *dev, int channel) {
return 0; return 0;
} }
void nv10_graph_destroy_context(struct drm_device *dev, int channel) void nv10_graph_destroy_context(struct nouveau_channel *chan)
{ {
} struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int chid;
chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
int nv10_graph_load_context(struct drm_device *dev, int channel) /* does this avoid a potential context switch while we are written graph
{ * reg, or we should mask graph interrupt ???
DRM_ERROR("stub!\n"); */
return 0; NV_WRITE(NV04_PGRAPH_FIFO,0x0);
} if (chid == chan->id) {
DRM_INFO("cleanning a channel with graph in current context\n");
int nv10_graph_save_context(struct drm_device *dev, int channel) nouveau_wait_for_idle(dev);
{ DRM_INFO("reseting current graph context\n");
DRM_ERROR("stub!\n"); nv10_graph_create_context(chan);
return 0; nv10_graph_load_context(chan);
}
NV_WRITE(NV04_PGRAPH_FIFO,0x1);
} }
int nv10_graph_init(struct drm_device *dev) { int nv10_graph_init(struct drm_device *dev) {
@ -688,16 +734,23 @@ int nv10_graph_init(struct drm_device *dev) {
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH); NV_PMC_ENABLE_PGRAPH);
NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); //NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); /* 0x25f92ad9 */
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0030 | NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
(1<<29) | (1<<29) |
(1<<31)); (1<<31));
if (dev_priv->chipset>=0x17) {
NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x1f000000);
NV_WRITE(0x004006b0, 0x40000020);
}
else
NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
/* copy tile info from PFB */ /* copy tile info from PFB */
for (i=0; i<NV10_PFB_TILE__SIZE; i++) { for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
@ -707,6 +760,10 @@ int nv10_graph_init(struct drm_device *dev) {
NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i))); NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));
} }
NV_WRITE(NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
NV_WRITE(NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
NV_WRITE(NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
NV_WRITE(NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);

View file

@ -29,39 +29,36 @@
#define NV20_GRCTX_SIZE (3529*4) #define NV20_GRCTX_SIZE (3529*4)
int nv20_graph_create_context(struct drm_device *dev, int channel) { int nv20_graph_create_context(struct nouveau_channel *chan) {
struct drm_nouveau_private *dev_priv = struct drm_device *dev = chan->dev;
(struct drm_nouveau_private *)dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
unsigned int ctx_size = NV20_GRCTX_SIZE; unsigned int ctx_size = NV20_GRCTX_SIZE;
int ret; int ret;
if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, ctx_size, 16, if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
NVOBJ_FLAG_ZERO_ALLOC, NVOBJ_FLAG_ZERO_ALLOC,
&chan->ramin_grctx))) &chan->ramin_grctx)))
return ret; return ret;
/* Initialise default context values */ /* Initialise default context values */
INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, channel<<24); /* CTX_USER */ INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); /* CTX_USER */
INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id,
chan->ramin_grctx->instance >> 4); chan->ramin_grctx->instance >> 4);
return 0; return 0;
} }
void nv20_graph_destroy_context(struct drm_device *dev, int channel) { void nv20_graph_destroy_context(struct nouveau_channel *chan) {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
if (chan->ramin_grctx) nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
INSTANCE_WR(dev_priv->ctx_table->gpuobj, channel, 0); INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0);
} }
static void nv20_graph_rdi(struct drm_device *dev) { static void nv20_graph_rdi(struct drm_device *dev) {
struct drm_nouveau_private *dev_priv = struct drm_nouveau_private *dev_priv = dev->dev_private;
(struct drm_nouveau_private *)dev->dev_private;
int i; int i;
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000);
@ -73,13 +70,12 @@ static void nv20_graph_rdi(struct drm_device *dev) {
/* Save current context (from PGRAPH) into the channel's context /* Save current context (from PGRAPH) into the channel's context
*/ */
int nv20_graph_save_context(struct drm_device *dev, int channel) { int nv20_graph_save_context(struct nouveau_channel *chan) {
struct drm_nouveau_private *dev_priv = struct drm_device *dev = chan->dev;
(struct drm_nouveau_private *)dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t instance; uint32_t instance;
instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel); instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, chan->id);
if (!instance) { if (!instance) {
return -EINVAL; return -EINVAL;
} }
@ -94,20 +90,19 @@ int nv20_graph_save_context(struct drm_device *dev, int channel) {
/* Restore the context for a specific channel into PGRAPH /* Restore the context for a specific channel into PGRAPH
*/ */
int nv20_graph_load_context(struct drm_device *dev, int channel) { int nv20_graph_load_context(struct nouveau_channel *chan) {
struct drm_nouveau_private *dev_priv = struct drm_device *dev = chan->dev;
(struct drm_nouveau_private *)dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t instance; uint32_t instance;
instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, channel); instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, chan->id);
if (!instance) { if (!instance) {
return -EINVAL; return -EINVAL;
} }
if (instance != (chan->ramin_grctx->instance >> 4)) if (instance != (chan->ramin_grctx->instance >> 4))
DRM_ERROR("nv20_graph_load_context_current : bad instance\n"); DRM_ERROR("nv20_graph_load_context_current : bad instance\n");
NV_WRITE(NV10_PGRAPH_CTX_USER, channel << 24); NV_WRITE(NV10_PGRAPH_CTX_USER, chan->id << 24);
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance);
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 1 /* restore ctx */); NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 1 /* restore ctx */);
return 0; return 0;
@ -116,27 +111,32 @@ int nv20_graph_load_context(struct drm_device *dev, int channel) {
void nouveau_nv20_context_switch(struct drm_device *dev) void nouveau_nv20_context_switch(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
int channel, channel_old; struct nouveau_channel *next, *last;
int chid;
channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
channel_old = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); next = dev_priv->fifos[chid];
DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel); chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
last = dev_priv->fifos[chid];
DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",
last->id, next->id);
NV_WRITE(NV04_PGRAPH_FIFO,0x0); NV_WRITE(NV04_PGRAPH_FIFO,0x0);
nv20_graph_save_context(dev, channel_old); nv20_graph_save_context(last);
nouveau_wait_for_idle(dev); nouveau_wait_for_idle(dev);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000);
nv20_graph_load_context(dev, channel); nv20_graph_load_context(next);
nouveau_wait_for_idle(dev); nouveau_wait_for_idle(dev);
if ((NV_READ(NV10_PGRAPH_CTX_USER) >> 24) != channel) if ((NV_READ(NV10_PGRAPH_CTX_USER) >> 24) != next->id)
DRM_ERROR("nouveau_nv20_context_switch : wrong channel restored %x %x!!!\n", channel, NV_READ(NV10_PGRAPH_CTX_USER) >> 24); DRM_ERROR("nouveau_nv20_context_switch : wrong channel restored %x %x!!!\n", next->id, NV_READ(NV10_PGRAPH_CTX_USER) >> 24);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF);
@ -157,7 +157,7 @@ int nv20_graph_init(struct drm_device *dev) {
/* Create Context Pointer Table */ /* Create Context Pointer Table */
dev_priv->ctx_table_size = 32 * 4; dev_priv->ctx_table_size = 32 * 4;
if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
dev_priv->ctx_table_size, 16, dev_priv->ctx_table_size, 16,
NVOBJ_FLAG_ZERO_ALLOC, NVOBJ_FLAG_ZERO_ALLOC,
&dev_priv->ctx_table))) &dev_priv->ctx_table)))
@ -169,8 +169,8 @@ int nv20_graph_init(struct drm_device *dev) {
//XXX need to be done and save/restore for each fifo ??? //XXX need to be done and save/restore for each fifo ???
nv20_graph_rdi(dev); nv20_graph_rdi(dev);
NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);

File diff suppressed because it is too large Load diff

View file

@ -37,13 +37,13 @@
#define NV40_RAMFC__SIZE 128 #define NV40_RAMFC__SIZE 128
int int
nv40_fifo_create_context(struct drm_device *dev, int channel) nv40_fifo_create_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
int ret; int ret;
if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(channel), if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
NV40_RAMFC__SIZE, NV40_RAMFC__SIZE,
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, NVOBJ_FLAG_ZERO_FREE,
@ -68,27 +68,27 @@ nv40_fifo_create_context(struct drm_device *dev, int channel)
RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF);
/* enable the fifo dma operation */ /* enable the fifo dma operation */
NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<channel)); NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<chan->id));
return 0; return 0;
} }
void void
nv40_fifo_destroy_context(struct drm_device *dev, int channel) nv40_fifo_destroy_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<channel)); NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
if (chan->ramfc) if (chan->ramfc)
nouveau_gpuobj_ref_del(dev, &chan->ramfc); nouveau_gpuobj_ref_del(dev, &chan->ramfc);
} }
int int
nv40_fifo_load_context(struct drm_device *dev, int channel) nv40_fifo_load_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp, tmp2; uint32_t tmp, tmp2;
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET)); NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET));
@ -135,7 +135,7 @@ nv40_fifo_load_context(struct drm_device *dev, int channel)
NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp); NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp);
/* Set channel active, and in DMA mode */ /* Set channel active, and in DMA mode */
NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00010000 | channel); NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00010000 | chan->id);
/* Reset DMA_CTL_AT_INFO to INVALID */ /* Reset DMA_CTL_AT_INFO to INVALID */
tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31); tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
@ -144,10 +144,10 @@ nv40_fifo_load_context(struct drm_device *dev, int channel)
} }
int int
nv40_fifo_save_context(struct drm_device *dev, int channel) nv40_fifo_save_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t tmp; uint32_t tmp;
RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT)); RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
@ -193,3 +193,16 @@ nv40_fifo_save_context(struct drm_device *dev, int channel)
return 0; return 0;
} }
int
nv40_fifo_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
if ((ret = nouveau_fifo_init(dev)))
return ret;
NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
return 0;
}

View file

@ -1224,11 +1224,10 @@ nv4e_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
} }
int int
nv40_graph_create_context(struct drm_device *dev, int channel) nv40_graph_create_context(struct nouveau_channel *chan)
{ {
struct drm_nouveau_private *dev_priv = struct drm_device *dev = chan->dev;
(struct drm_nouveau_private *)dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
unsigned int ctx_size; unsigned int ctx_size;
int ret; int ret;
@ -1250,6 +1249,7 @@ nv40_graph_create_context(struct drm_device *dev, int channel)
ctx_size = NV49_GRCTX_SIZE; ctx_size = NV49_GRCTX_SIZE;
ctx_init = nv49_graph_context_init; ctx_init = nv49_graph_context_init;
break; break;
case 0x44:
case 0x4a: case 0x4a:
ctx_size = NV4A_GRCTX_SIZE; ctx_size = NV4A_GRCTX_SIZE;
ctx_init = nv4a_graph_context_init; ctx_init = nv4a_graph_context_init;
@ -1272,7 +1272,7 @@ nv40_graph_create_context(struct drm_device *dev, int channel)
break; break;
} }
if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, ctx_size, 16, if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
NVOBJ_FLAG_ZERO_ALLOC, NVOBJ_FLAG_ZERO_ALLOC,
&chan->ramin_grctx))) &chan->ramin_grctx)))
return ret; return ret;
@ -1284,37 +1284,43 @@ nv40_graph_create_context(struct drm_device *dev, int channel)
} }
void void
nv40_graph_destroy_context(struct drm_device *dev, int channel) nv40_graph_destroy_context(struct nouveau_channel *chan)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
struct nouveau_fifo *chan = dev_priv->fifos[channel];
if (chan->ramin_grctx)
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
} }
static int static int
nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t old_cp, tv = 1000; uint32_t old_cp, tv = 1000, tmp;
int i; int i;
old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
NV_WRITE(NV40_PGRAPH_CTXCTL_0310,
save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : tmp = NV_READ(NV40_PGRAPH_CTXCTL_0310);
NV40_PGRAPH_CTXCTL_0310_XFER_LOAD); tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX); NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp);
tmp = NV_READ(NV40_PGRAPH_CTXCTL_0304);
tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp);
for (i = 0; i < tv; i++) { for (i = 0; i < tv; i++) {
if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0) if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
break; break;
} }
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
if (i == tv) { if (i == tv) {
DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save); uint32_t ucstat = NV_READ(NV40_PGRAPH_CTXCTL_UCODE_STAT);
DRM_ERROR("Failed: Instance=0x%08x Save=%d\n", inst, save);
DRM_ERROR("IP: 0x%02x, Opcode: 0x%08x\n",
ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
DRM_ERROR("0x40030C = 0x%08x\n", DRM_ERROR("0x40030C = 0x%08x\n",
NV_READ(NV40_PGRAPH_CTXCTL_030C)); NV_READ(NV40_PGRAPH_CTXCTL_030C));
return -EBUSY; return -EBUSY;
@ -1327,10 +1333,9 @@ nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
*XXX: fails sometimes, not sure why.. *XXX: fails sometimes, not sure why..
*/ */
int int
nv40_graph_save_context(struct drm_device *dev, int channel) nv40_graph_save_context(struct nouveau_channel *chan)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_device *dev = chan->dev;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t inst; uint32_t inst;
if (!chan->ramin_grctx) if (!chan->ramin_grctx)
@ -1344,10 +1349,10 @@ nv40_graph_save_context(struct drm_device *dev, int channel)
* XXX: fails sometimes.. not sure why * XXX: fails sometimes.. not sure why
*/ */
int int
nv40_graph_load_context(struct drm_device *dev, int channel) nv40_graph_load_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t inst; uint32_t inst;
int ret; int ret;
@ -1459,6 +1464,39 @@ static uint32_t nv43_ctx_voodoo[] = {
~0 ~0
}; };
static uint32_t nv44_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06,
0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
0x001041c6, 0x00104040, 0x00200001, 0x0060000a, 0x00700000, 0x001040c5,
0x00402320, 0x00402321, 0x00402322, 0x00402324, 0x00402326, 0x0040232b,
0x001040c5, 0x00402328, 0x001040c5, 0x00402320, 0x00402468, 0x0060000d,
0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, 0x00402be6,
0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, 0x00110158,
0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9,
0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0,
0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, 0x0011415f,
0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, 0x001046ec,
0x00500060, 0x00404b87, 0x0060000d, 0x004084e6, 0x002000f1, 0x0060000a,
0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, 0x00168691,
0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x001646cc,
0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7,
0x001043e1, 0x00500060, 0x00200232, 0x0060000a, 0x00104800, 0x00108901,
0x00104910, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00,
0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08,
0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x002002c8,
0x0060000a, 0x00300000, 0x00200080, 0x00407d00, 0x00200084, 0x00800001,
0x00200510, 0x0060000a, 0x002037e0, 0x0040838a, 0x00201320, 0x00800029,
0x00409400, 0x00600006, 0x004090e6, 0x00700080, 0x0020007a, 0x0060000a,
0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,
0x00200000, 0x0060000a, 0x00106002, 0x0040ac68, 0x00700000, 0x00200000,
0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, 0x00600007,
0x00409e88, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a,
0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
0x0060000b, 0x00500069, 0x0060000c, 0x00402c68, 0x0040ae06, 0x0040af05,
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
};
static uint32_t nv46_ctx_voodoo[] = { static uint32_t nv46_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
@ -1556,6 +1594,37 @@ static uint32_t nv4a_ctx_voodoo[] = {
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
}; };
static uint32_t nv4c_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406,
0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
0x0010427e, 0x001046ec, 0x00500060, 0x00404187, 0x0060000d, 0x00407ae6,
0x002000f2, 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682,
0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4,
0x001146c6, 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0,
0x00100700, 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200234, 0x0060000a,
0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00,
0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
0x00104f06, 0x002002c0, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,
0x00200084, 0x00800001, 0x00200508, 0x0060000a, 0x00201320, 0x0040798a,
0xfffffaf8, 0x00800029, 0x00408a00, 0x00600006, 0x004086e6, 0x00700080,
0x0020007a, 0x0060000a, 0x00104280, 0x002002c0, 0x0060000a, 0x00200004,
0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a168,
0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68,
0x00500060, 0x00600007, 0x00409488, 0x0060000f, 0x00500060, 0x00200000,
0x0060000a, 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000,
0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a306,
0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
};
static uint32_t nv4e_ctx_voodoo[] = { static uint32_t nv4e_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
@ -1615,10 +1684,12 @@ nv40_graph_init(struct drm_device *dev)
switch (dev_priv->chipset) { switch (dev_priv->chipset) {
case 0x40: ctx_voodoo = nv40_ctx_voodoo; break; case 0x40: ctx_voodoo = nv40_ctx_voodoo; break;
case 0x43: ctx_voodoo = nv43_ctx_voodoo; break; case 0x43: ctx_voodoo = nv43_ctx_voodoo; break;
case 0x44: ctx_voodoo = nv44_ctx_voodoo; break;
case 0x46: ctx_voodoo = nv46_ctx_voodoo; break; case 0x46: ctx_voodoo = nv46_ctx_voodoo; break;
case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break; case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break;
case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break; case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break;
case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break; case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break;
case 0x4c: ctx_voodoo = nv4c_ctx_voodoo; break;
case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break; case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break;
default: default:
DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n", DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n",
@ -1642,8 +1713,8 @@ nv40_graph_init(struct drm_device *dev)
/* No context present currently */ /* No context present currently */
NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000); NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
NV_WRITE(NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);

View file

@ -14,8 +14,6 @@ nv40_mc_init(struct drm_device *dev)
*/ */
NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
NV_WRITE(NV03_PMC_INTR_EN_0, 0);
switch (dev_priv->chipset) { switch (dev_priv->chipset) {
case 0x44: case 0x44:
case 0x46: /* G72 */ case 0x46: /* G72 */

View file

@ -30,7 +30,6 @@
typedef struct { typedef struct {
struct nouveau_gpuobj_ref *thingo; struct nouveau_gpuobj_ref *thingo;
struct nouveau_gpuobj_ref *dummyctx;
} nv50_fifo_priv; } nv50_fifo_priv;
#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) #define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
@ -47,7 +46,7 @@ nv50_fifo_init_thingo(struct drm_device *dev)
INSTANCE_WR(thingo->gpuobj, 0, 0x7e); INSTANCE_WR(thingo->gpuobj, 0, 0x7e);
INSTANCE_WR(thingo->gpuobj, 1, 0x7e); INSTANCE_WR(thingo->gpuobj, 1, 0x7e);
for (i = 0; i <NV_MAX_FIFO_NUMBER; i++, fi) { for (i = 1; i < 127; i++, fi) {
if (dev_priv->fifos[i]) { if (dev_priv->fifos[i]) {
INSTANCE_WR(thingo->gpuobj, fi, i); INSTANCE_WR(thingo->gpuobj, fi, i);
fi++; fi++;
@ -60,30 +59,23 @@ nv50_fifo_init_thingo(struct drm_device *dev)
} }
static int static int
nv50_fifo_channel_enable(struct drm_device *dev, int channel) nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_channel *chan = dev_priv->fifos[channel];
uint32_t inst;
DRM_DEBUG("ch%d\n", channel); DRM_DEBUG("ch%d\n", channel);
if (IS_G80) { if (!chan->ramfc)
if (!chan->ramin) return -EINVAL;
return -EINVAL;
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), if (IS_G80) inst = chan->ramfc->instance >> 12;
(chan->ramin->instance >> 12) | else inst = chan->ramfc->instance >> 8;
NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
} else { inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
if (!chan->ramfc)
return -EINVAL;
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), if (!nt) nv50_fifo_init_thingo(dev);
(chan->ramfc->instance >> 8) |
NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
}
nv50_fifo_init_thingo(dev);
return 0; return 0;
} }
@ -91,16 +83,13 @@ static void
nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt) nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t inst;
DRM_DEBUG("ch%d, nt=%d\n", channel, nt); DRM_DEBUG("ch%d, nt=%d\n", channel, nt);
if (IS_G80) { if (IS_G80) inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), else inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80); NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), inst);
} else {
NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84);
}
if (!nt) nv50_fifo_init_thingo(dev); if (!nt) nv50_fifo_init_thingo(dev);
} }
@ -119,6 +108,17 @@ nv50_fifo_init_reset(struct drm_device *dev)
NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PFIFO); NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PFIFO);
} }
static void
nv50_fifo_init_intr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
}
static void static void
nv50_fifo_init_context_table(struct drm_device *dev) nv50_fifo_init_context_table(struct drm_device *dev)
{ {
@ -145,18 +145,9 @@ static int
nv50_fifo_init_regs(struct drm_device *dev) nv50_fifo_init_regs(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
int ret;
DRM_DEBUG("\n"); DRM_DEBUG("\n");
if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, 0x1000,
0x1000,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
&priv->dummyctx)))
return ret;
NV_WRITE(0x2500, 0); NV_WRITE(0x2500, 0);
NV_WRITE(0x3250, 0); NV_WRITE(0x3250, 0);
NV_WRITE(0x3220, 0); NV_WRITE(0x3220, 0);
@ -164,13 +155,9 @@ nv50_fifo_init_regs(struct drm_device *dev)
NV_WRITE(0x3210, 0); NV_WRITE(0x3210, 0);
NV_WRITE(0x3270, 0); NV_WRITE(0x3270, 0);
if (IS_G80) { /* Enable dummy channels setup by nv50_instmem.c */
NV_WRITE(0x2600, (priv->dummyctx->instance>>8) | (1<<31)); nv50_fifo_channel_enable(dev, 0, 1);
NV_WRITE(0x27fc, (priv->dummyctx->instance>>8) | (1<<31)); nv50_fifo_channel_enable(dev, 127, 1);
} else {
NV_WRITE(0x2600, (priv->dummyctx->instance>>12) | (1<<31));
NV_WRITE(0x27fc, (priv->dummyctx->instance>>12) | (1<<31));
}
return 0; return 0;
} }
@ -190,13 +177,15 @@ nv50_fifo_init(struct drm_device *dev)
dev_priv->Engine.fifo.priv = priv; dev_priv->Engine.fifo.priv = priv;
nv50_fifo_init_reset(dev); nv50_fifo_init_reset(dev);
nv50_fifo_init_intr(dev);
if ((ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, (128+2)*4, 0x1000, if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, (128+2)*4, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, NVOBJ_FLAG_ZERO_ALLOC,
&priv->thingo))) { &priv->thingo))) {
DRM_ERROR("error creating thingo: %d\n", ret); DRM_ERROR("error creating thingo: %d\n", ret);
return ret; return ret;
} }
nv50_fifo_init_context_table(dev); nv50_fifo_init_context_table(dev);
nv50_fifo_init_regs__nv(dev); nv50_fifo_init_regs__nv(dev);
@ -218,31 +207,32 @@ nv50_fifo_takedown(struct drm_device *dev)
return; return;
nouveau_gpuobj_ref_del(dev, &priv->thingo); nouveau_gpuobj_ref_del(dev, &priv->thingo);
nouveau_gpuobj_ref_del(dev, &priv->dummyctx);
dev_priv->Engine.fifo.priv = NULL; dev_priv->Engine.fifo.priv = NULL;
drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
} }
int int
nv50_fifo_create_context(struct drm_device *dev, int channel) nv50_fifo_create_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
struct nouveau_gpuobj *ramfc = NULL; struct nouveau_gpuobj *ramfc = NULL;
int ret; int ret;
DRM_DEBUG("ch%d\n", channel); DRM_DEBUG("ch%d\n", chan->id);
if (IS_G80) { if (IS_G80) {
uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start; uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start;
if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, 0x100, uint32_t vram_offset = chan->ramin->gpuobj->im_backing->start;
if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset,
vram_offset, 0x100,
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, NVOBJ_FLAG_ZERO_FREE,
&ramfc, &chan->ramfc))) &ramfc, &chan->ramfc)))
return ret; return ret;
} else { } else {
if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, 0x100, if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100,
256, 256,
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, NVOBJ_FLAG_ZERO_FREE,
@ -266,15 +256,15 @@ nv50_fifo_create_context(struct drm_device *dev, int channel)
INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1); INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1);
if (!IS_G80) { if (!IS_G80) {
INSTANCE_WR(chan->ramin->gpuobj, 0, channel); INSTANCE_WR(chan->ramin->gpuobj, 0, chan->id);
INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance); INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance);
INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */ INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */
INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12); INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12);
} }
if ((ret = nv50_fifo_channel_enable(dev, channel))) { if ((ret = nv50_fifo_channel_enable(dev, chan->id, 0))) {
DRM_ERROR("error enabling ch%d: %d\n", channel, ret); DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret);
nouveau_gpuobj_ref_del(dev, &chan->ramfc); nouveau_gpuobj_ref_del(dev, &chan->ramfc);
return ret; return ret;
} }
@ -283,25 +273,29 @@ nv50_fifo_create_context(struct drm_device *dev, int channel)
} }
void void
nv50_fifo_destroy_context(struct drm_device *dev, int channel) nv50_fifo_destroy_context(struct nouveau_channel *chan)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_device *dev = chan->dev;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
DRM_DEBUG("ch%d\n", channel); DRM_DEBUG("ch%d\n", chan->id);
nv50_fifo_channel_disable(dev, chan->id, 0);
/* Dummy channel, also used on ch 127 */
if (chan->id == 0)
nv50_fifo_channel_disable(dev, 127, 0);
nv50_fifo_channel_disable(dev, channel, 0);
nouveau_gpuobj_ref_del(dev, &chan->ramfc); nouveau_gpuobj_ref_del(dev, &chan->ramfc);
} }
int int
nv50_fifo_load_context(struct drm_device *dev, int channel) nv50_fifo_load_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj; struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
DRM_DEBUG("ch%d\n", channel); DRM_DEBUG("ch%d\n", chan->id);
/*XXX: incomplete, only touches the regs that NV does */ /*XXX: incomplete, only touches the regs that NV does */
@ -319,14 +313,14 @@ nv50_fifo_load_context(struct drm_device *dev, int channel)
NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4)); NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4));
} }
NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, channel | (1<<16)); NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
return 0; return 0;
} }
int int
nv50_fifo_save_context(struct drm_device *dev, int channel) nv50_fifo_save_context(struct nouveau_channel *chan)
{ {
DRM_DEBUG("ch%d\n", channel); DRM_DEBUG("ch%d\n", chan->id);
DRM_ERROR("stub!\n"); DRM_ERROR("stub!\n");
return 0; return 0;
} }

View file

@ -44,6 +44,16 @@ nv50_graph_init_reset(struct drm_device *dev)
NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PGRAPH); NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PGRAPH);
} }
static void
nv50_graph_init_intr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
NV_WRITE(NV03_PGRAPH_INTR, 0xffffffff);
NV_WRITE(NV40_PGRAPH_INTR_EN, 0xffffffff);
}
static void static void
nv50_graph_init_regs__nv(struct drm_device *dev) nv50_graph_init_regs__nv(struct drm_device *dev)
{ {
@ -59,7 +69,6 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
NV_WRITE(0x402000, 0xc0000000); NV_WRITE(0x402000, 0xc0000000);
NV_WRITE(0x400108, 0xffffffff); NV_WRITE(0x400108, 0xffffffff);
NV_WRITE(0x400100, 0xffffffff);
NV_WRITE(0x400824, 0x00004000); NV_WRITE(0x400824, 0x00004000);
NV_WRITE(0x400500, 0x00010001); NV_WRITE(0x400500, 0x00010001);
@ -174,6 +183,7 @@ nv50_graph_init(struct drm_device *dev)
DRM_DEBUG("\n"); DRM_DEBUG("\n");
nv50_graph_init_reset(dev); nv50_graph_init_reset(dev);
nv50_graph_init_intr(dev);
nv50_graph_init_regs__nv(dev); nv50_graph_init_regs__nv(dev);
nv50_graph_init_regs(dev); nv50_graph_init_regs(dev);
nv50_graph_init_ctxctl(dev); nv50_graph_init_ctxctl(dev);
@ -188,17 +198,18 @@ nv50_graph_takedown(struct drm_device *dev)
} }
int int
nv50_graph_create_context(struct drm_device *dev, int channel) nv50_graph_create_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_engine *engine = &dev_priv->Engine;
struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
int grctx_size = 0x60000, hdr; int grctx_size = 0x60000, hdr;
int ret; int ret;
DRM_DEBUG("ch%d\n", channel); DRM_DEBUG("ch%d\n", chan->id);
if ((ret = nouveau_gpuobj_new_ref(dev, channel, -1, 0, if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
grctx_size, 0x1000, grctx_size, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, NVOBJ_FLAG_ZERO_FREE,
@ -214,17 +225,22 @@ nv50_graph_create_context(struct drm_device *dev, int channel)
INSTANCE_WR(ramin, (hdr + 0x10)/4, 0); INSTANCE_WR(ramin, (hdr + 0x10)/4, 0);
INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000); INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000);
if ((ret = engine->graph.load_context(chan))) {
DRM_ERROR("Error hacking up initial context: %d\n", ret);
return ret;
}
return 0; return 0;
} }
void void
nv50_graph_destroy_context(struct drm_device *dev, int channel) nv50_graph_destroy_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
int i, hdr; int i, hdr;
DRM_DEBUG("ch%d\n", channel); DRM_DEBUG("ch%d\n", chan->id);
hdr = IS_G80 ? 0x200 : 0x20; hdr = IS_G80 ? 0x200 : 0x20;
for (i=hdr; i<hdr+24; i+=4) for (i=hdr; i<hdr+24; i+=4)
@ -266,14 +282,14 @@ nv50_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
} }
int int
nv50_graph_load_context(struct drm_device *dev, int channel) nv50_graph_load_context(struct nouveau_channel *chan)
{ {
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31));
int ret; (void)ret; int ret; (void)ret;
DRM_DEBUG("ch%d\n", channel); DRM_DEBUG("ch%d\n", chan->id);
#if 0 #if 0
if ((ret = nv50_graph_transfer_context(dev, inst, 0))) if ((ret = nv50_graph_transfer_context(dev, inst, 0)))
@ -288,13 +304,12 @@ nv50_graph_load_context(struct drm_device *dev, int channel)
} }
int int
nv50_graph_save_context(struct drm_device *dev, int channel) nv50_graph_save_context(struct nouveau_channel *chan)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_device *dev = chan->dev;
struct nouveau_fifo *chan = dev_priv->fifos[channel];
uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31));
DRM_DEBUG("ch%d\n", channel); DRM_DEBUG("ch%d\n", chan->id);
return nv50_graph_transfer_context(dev, inst, 1); return nv50_graph_transfer_context(dev, inst, 1);
} }

View file

@ -31,118 +31,162 @@
typedef struct { typedef struct {
uint32_t save1700[5]; /* 0x1700->0x1710 */ uint32_t save1700[5]; /* 0x1700->0x1710 */
struct nouveau_gpuobj_ref *pramin_pt;
struct nouveau_gpuobj_ref *pramin_bar;
} nv50_instmem_priv; } nv50_instmem_priv;
#define NV50_INSTMEM_PAGE_SHIFT 12 #define NV50_INSTMEM_PAGE_SHIFT 12
#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT) #define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
#define NV50_INSTMEM_RSVD_SIZE (64 * 1024)
#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3) #define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
*/
#define BAR0_WI32(g,o,v) do { \
uint32_t offset; \
if ((g)->im_backing) { \
offset = (g)->im_backing->start; \
} else { \
offset = chan->ramin->gpuobj->im_backing->start; \
offset += (g)->im_pramin->start; \
} \
offset += (o); \
NV_WRITE(NV_RAMIN + (offset & 0xfffff), (v)); \
} while(0)
int int
nv50_instmem_init(struct drm_device *dev) nv50_instmem_init(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan;
uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
nv50_instmem_priv *priv; nv50_instmem_priv *priv;
uint32_t rv, pt, pts, cb, cb0, cb1, unk, as; int ret, i;
uint32_t i, v; uint32_t v;
int ret;
priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER); priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER);
if (!priv) if (!priv)
return -ENOMEM; return -ENOMEM;
dev_priv->Engine.instmem.priv = priv; dev_priv->Engine.instmem.priv = priv;
/* Save current state */ /* Reserve the last MiB of VRAM, we should probably try to avoid
for (i = 0x1700; i <= 0x1710; i+=4) * setting up the below tables over the top of the VBIOS image at
priv->save1700[(i-0x1700)/4] = NV_READ(i); * some point.
as = dev_priv->ramin->size;
rv = nouveau_mem_fb_amount(dev) - (1*1024*1024);
pt = rv + 0xd0000;
pts = NV50_INSTMEM_PT_SIZE(as);
cb = rv + 0xc8000;
if ((dev_priv->chipset & 0xf0) != 0x50) {
unk = cb + 0x4200;
cb0 = cb + 0x4240;
cb1 = cb + 0x278;
} else {
unk = cb + 0x5400;
cb0 = cb + 0x5440;
cb1 = cb + 0x1478;
}
DRM_DEBUG("PRAMIN config:\n");
DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", rv);
DRM_DEBUG(" Aperture size: %i MiB\n", as >> 20);
DRM_DEBUG(" PT base: 0x%08x\n", pt);
DRM_DEBUG(" PT size: %d KiB\n", pts >> 10);
DRM_DEBUG(" BIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8);
DRM_DEBUG(" Config base: 0x%08x\n", cb);
DRM_DEBUG(" ctxdma Config0: 0x%08x\n", cb0);
DRM_DEBUG(" Config1: 0x%08x\n", cb1);
/* Map first MiB of reserved vram into BAR0 PRAMIN aperture */
NV_WRITE(0x1700, (rv>>16));
/* Poke some regs.. */
NV_WRITE(0x1704, (cb>>12));
NV_WRITE(0x1710, (((unk-cb)>>4))|(1<<31));
NV_WRITE(0x1704, (cb>>12)|(1<<30));
/* CB0, some DMA object, NFI what it points at... Needed however,
* or the PRAMIN aperture doesn't operate as expected.
*/ */
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x00, 0x7fc00000); dev_priv->ramin_rsvd_vram = 1 << 20;
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x04, 0xe1ffffff); c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x08, 0xe0000000); c_size = 128 << 10;
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x0c, 0x01000001); c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x10, 0x00000000); c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
NV_WRITE(NV_RAMIN + (cb0 - rv) + 0x14, 0x00000000); c_base = c_vmpd + 0x4000;
pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin->size);
/* CB1, points at PRAMIN PT */ DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", c_offset);
NV_WRITE(NV_RAMIN + (cb1 - rv) + 0, pt | 0x63); DRM_DEBUG(" VBIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8);
NV_WRITE(NV_RAMIN + (cb1 - rv) + 4, 0x00000000); DRM_DEBUG(" Aperture size: %d MiB\n",
(uint32_t)dev_priv->ramin->size >> 20);
DRM_DEBUG(" PT size: %d KiB\n", pt_size >> 10);
/* Zero PRAMIN page table */ NV_WRITE(NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
v = NV_RAMIN + (pt - rv);
for (i = v; i < v + pts; i += 8) {
NV_WRITE(i + 0x00, 0x00000009);
NV_WRITE(i + 0x04, 0x00000000);
}
/* Map page table into PRAMIN aperture */ /* Create a fake channel, and use it as our "dummy" channels 0/127.
for (i = pt; i < pt + pts; i += 0x1000) { * The main reason for creating a channel is so we can use the gpuobj
uint32_t pte = NV_RAMIN + (pt-rv) + (((i-pt) >> 12) << 3); * code. However, it's probably worth noting that NVIDIA also setup
DRM_DEBUG("PRAMIN PTE = 0x%08x @ 0x%08x\n", i, pte); * their channels 0/127 with the same values they configure here.
NV_WRITE(pte + 0x00, i | 1); * So, there may be some other reason for doing this.
NV_WRITE(pte + 0x04, 0x00000000); *
} * Have to create the entire channel manually, as the real channel
* creation code assumes we have PRAMIN access, and we don't until
/* Points at CB0 */ * we're done here.
NV_WRITE(0x170c, (((cb0 - cb)>>4)|(1<<31)));
/* Confirm it all worked, should be able to read back the page table's
* PTEs from the PRAMIN BAR
*/ */
NV_WRITE(0x1700, pt >> 16); chan = drm_calloc(1, sizeof(*chan), DRM_MEM_DRIVER);
if (NV_READ(0x700000) != NV_RI32(0)) { if (!chan)
DRM_ERROR("Failed to init PRAMIN page table\n");
return -EINVAL;
}
/* Create a heap to manage PRAMIN aperture allocations */
ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, pts, as-pts);
if (ret) {
DRM_ERROR("Failed to init PRAMIN heap\n");
return -ENOMEM; return -ENOMEM;
chan->id = 0;
chan->dev = dev;
chan->file_priv = (struct drm_file *)-2;
dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
/* Channel's PRAMIN object + heap */
if ((ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, 128<<10, 0,
NULL, &chan->ramin)))
return ret;
if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
return -ENOMEM;
/* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
if ((ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
0x4000, 0, NULL, &chan->ramfc)))
return ret;
for (i = 0; i < c_vmpd; i += 4)
BAR0_WI32(chan->ramin->gpuobj, i, 0);
/* VM page directory */
if ((ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
0x4000, 0, &chan->vm_pd, NULL)))
return ret;
for (i = 0; i < 0x4000; i += 8) {
BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
} }
DRM_DEBUG("NV50: PRAMIN setup ok\n");
/* Don't alloc the last MiB of VRAM, probably too much, but be safe /* PRAMIN page table, cheat and map into VM at 0x0000000000.
* at least for now. * We map the entire fake channel into the start of the PRAMIN BAR
*/ */
dev_priv->ramin_rsvd_vram = 1*1024*1024; if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
0, &priv->pramin_pt)))
return ret;
/*XXX: probably incorrect, but needed to make hash func "work" */ for (i = 0, v = c_offset; i < pt_size; i+=8, v+=0x1000) {
if (v < (c_offset + c_size))
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
else
BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
}
BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
/* DMA object for PRAMIN BAR */
if ((ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
&priv->pramin_bar)))
return ret;
BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin->size - 1);
BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
/* Poke the relevant regs, and pray it works :) */
NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
NV_WRITE(NV50_PUNK_UNK1710, 0);
NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
NV50_PUNK_BAR_CFG_BASE_VALID);
NV_WRITE(NV50_PUNK_BAR1_CTXDMA, 0);
NV_WRITE(NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
NV50_PUNK_BAR3_CTXDMA_VALID);
/* Assume that praying isn't enough, check that we can re-read the
* entire fake channel back from the PRAMIN BAR */
for (i = 0; i < c_size; i+=4) {
if (NV_READ(NV_RAMIN + i) != NV_RI32(i)) {
DRM_ERROR("Error reading back PRAMIN at 0x%08x\n", i);
return -EINVAL;
}
}
/* Global PRAMIN heap */
if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
c_size, dev_priv->ramin->size - c_size)) {
dev_priv->ramin_heap = NULL;
DRM_ERROR("Failed to init RAMIN heap\n");
}
/*XXX: incorrect, but needed to make hash func "work" */
dev_priv->ramht_offset = 0x10000; dev_priv->ramht_offset = 0x10000;
dev_priv->ramht_bits = 9; dev_priv->ramht_bits = 9;
dev_priv->ramht_size = (1 << dev_priv->ramht_bits); dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
@ -154,8 +198,11 @@ nv50_instmem_takedown(struct drm_device *dev)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv; nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
struct nouveau_channel *chan = dev_priv->fifos[0];
int i; int i;
DRM_DEBUG("\n");
if (!priv) if (!priv)
return; return;
@ -163,6 +210,20 @@ nv50_instmem_takedown(struct drm_device *dev)
for (i = 0x1700; i <= 0x1710; i+=4) for (i = 0x1700; i <= 0x1710; i+=4)
NV_WRITE(i, priv->save1700[(i-0x1700)/4]); NV_WRITE(i, priv->save1700[(i-0x1700)/4]);
nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
/* Destroy dummy channel */
if (chan) {
nouveau_gpuobj_del(dev, &chan->vm_pd);
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
nouveau_gpuobj_ref_del(dev, &chan->ramin);
nouveau_mem_takedown(&chan->ramin_heap);
dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
}
dev_priv->Engine.instmem.priv = NULL; dev_priv->Engine.instmem.priv = NULL;
drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER); drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
} }
@ -205,6 +266,7 @@ int
nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
uint32_t pte, pte_end, vram; uint32_t pte, pte_end, vram;
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
@ -217,19 +279,14 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
vram = gpuobj->im_backing->start; vram = gpuobj->im_backing->start;
if (pte == pte_end) {
DRM_ERROR("WARNING: badness in bind() pte calc\n");
pte_end++;
}
DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n", DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end); gpuobj->im_pramin->start, pte, pte_end);
DRM_DEBUG("first vram page: 0x%llx\n", DRM_DEBUG("first vram page: 0x%llx\n",
gpuobj->im_backing->start); gpuobj->im_backing->start);
while (pte < pte_end) { while (pte < pte_end) {
NV_WI32(pte + 0, vram | 1); INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
NV_WI32(pte + 4, 0x00000000); INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
pte += 8; pte += 8;
vram += NV50_INSTMEM_PAGE_SIZE; vram += NV50_INSTMEM_PAGE_SIZE;
@ -243,6 +300,7 @@ int
nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{ {
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
uint32_t pte, pte_end; uint32_t pte, pte_end;
if (gpuobj->im_bound == 0) if (gpuobj->im_bound == 0)
@ -251,8 +309,8 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
pte = (gpuobj->im_pramin->start >> 12) << 3; pte = (gpuobj->im_pramin->start >> 12) << 3;
pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
while (pte < pte_end) { while (pte < pte_end) {
NV_WI32(pte + 0, 0x00000000); INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
NV_WI32(pte + 4, 0x00000000); INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
pte += 8; pte += 8;
} }

View file

@ -34,6 +34,7 @@ nv50_mc_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_private *dev_priv = dev->dev_private;
NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
return 0; return 0;
} }

View file

@ -222,11 +222,7 @@ typedef struct drm_r128_init {
R128_INIT_CCE = 0x01, R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02 R128_CLEANUP_CCE = 0x02
} func; } func;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int sarea_priv_offset;
#else
unsigned long sarea_priv_offset; unsigned long sarea_priv_offset;
#endif
int is_pci; int is_pci;
int cce_mode; int cce_mode;
int cce_secure; int cce_secure;
@ -240,21 +236,12 @@ typedef struct drm_r128_init {
unsigned int depth_offset, depth_pitch; unsigned int depth_offset, depth_pitch;
unsigned int span_offset; unsigned int span_offset;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
unsigned int fb_offset;
unsigned int mmio_offset;
unsigned int ring_offset;
unsigned int ring_rptr_offset;
unsigned int buffers_offset;
unsigned int agp_textures_offset;
#else
unsigned long fb_offset; unsigned long fb_offset;
unsigned long mmio_offset; unsigned long mmio_offset;
unsigned long ring_offset; unsigned long ring_offset;
unsigned long ring_rptr_offset; unsigned long ring_rptr_offset;
unsigned long buffers_offset; unsigned long buffers_offset;
unsigned long agp_textures_offset; unsigned long agp_textures_offset;
#endif
} drm_r128_init_t; } drm_r128_init_t;
typedef struct drm_r128_cce_stop { typedef struct drm_r128_cce_stop {
@ -264,15 +251,10 @@ typedef struct drm_r128_cce_stop {
typedef struct drm_r128_clear { typedef struct drm_r128_clear {
unsigned int flags; unsigned int flags;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int x, y, w, h;
#endif
unsigned int clear_color; unsigned int clear_color;
unsigned int clear_depth; unsigned int clear_depth;
#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
unsigned int color_mask; unsigned int color_mask;
unsigned int depth_mask; unsigned int depth_mask;
#endif
} drm_r128_clear_t; } drm_r128_clear_t;
typedef struct drm_r128_vertex { typedef struct drm_r128_vertex {

View file

@ -1,5 +1,6 @@
AM_CFLAGS = \ AM_CFLAGS = \
-I $(top_srcdir)/shared-core -I $(top_srcdir)/shared-core \
-I $(top_srcdir)/libdrm
noinst_PROGRAMS = \ noinst_PROGRAMS = \
dristat \ dristat \
@ -14,10 +15,15 @@ libdrmtest_la_LIBADD = \
LDADD = libdrmtest.la LDADD = libdrmtest.la
TESTS = openclose \ TESTS = auth \
openclose \
getversion \ getversion \
getclient \ getclient \
getstats \
lock \
setversion \
updatedraw updatedraw
EXTRA_PROGRAMS = $(TESTS) EXTRA_PROGRAMS = $(TESTS)
CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES) CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES)

137
tests/auth.c Normal file
View file

@ -0,0 +1,137 @@
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <limits.h>
#include "drmtest.h"
enum auth_event {
SERVER_READY,
CLIENT_MAGIC,
CLIENT_DONE,
};
int commfd[2];
static void wait_event(int pipe, enum auth_event expected_event)
{
int ret;
enum auth_event event;
unsigned char in;
ret = read(commfd[pipe], &in, 1);
if (ret == -1)
err(1, "read error");
event = in;
if (event != expected_event)
errx(1, "unexpected event: %d\n", event);
}
static void
send_event(int pipe, enum auth_event send_event)
{
int ret;
unsigned char event;
event = send_event;
ret = write(commfd[pipe], &event, 1);
if (ret == -1)
err(1, "failed to send event %d", event);
}
static void client()
{
struct drm_auth auth;
int drmfd, ret;
/* XXX: Should make sure we open the same DRM as the master */
drmfd = drm_open_any();
wait_event(0, SERVER_READY);
/* Get a client magic number and pass it to the master for auth. */
auth.magic = 0; /* Quiet valgrind */
ret = ioctl(drmfd, DRM_IOCTL_GET_MAGIC, &auth);
if (ret == -1)
err(1, "Couldn't get client magic");
send_event(0, CLIENT_MAGIC);
ret = write(commfd[0], &auth.magic, sizeof(auth.magic));
if (ret == -1)
err(1, "Couldn't write auth data");
/* Signal that the client is completely done. */
send_event(0, CLIENT_DONE);
}
static void server()
{
int drmfd, ret;
struct drm_auth auth;
drmfd = drm_open_any_master();
auth.magic = 0xd0d0d0d0;
ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth);
if (ret != -1 || errno != EINVAL)
errx(1, "Authenticating bad magic succeeded\n");
send_event(1, SERVER_READY);
wait_event(1, CLIENT_MAGIC);
ret = read(commfd[1], &auth.magic, sizeof(auth.magic));
if (ret == -1)
err(1, "Failure to read client magic");
ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth);
if (ret == -1)
err(1, "Failure to authenticate client magic\n");
wait_event(1, CLIENT_DONE);
}
/**
* Checks DRM authentication mechanisms.
*/
int main(int argc, char **argv)
{
int ret;
ret = pipe(commfd);
if (ret == -1)
err(1, "Couldn't create pipe");
ret = fork();
if (ret == -1)
err(1, "failure to fork client");
if (ret == 0)
client();
else
server();
return 0;
}

51
tests/getstats.c Normal file
View file

@ -0,0 +1,51 @@
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <limits.h>
#include "drmtest.h"
/**
* Checks DRM_IOCTL_GET_STATS.
*
* I don't care too much about the actual contents, just that the kernel
* doesn't crash.
*/
int main(int argc, char **argv)
{
int fd, ret;
drm_stats_t stats;
fd = drm_open_any();
ret = ioctl(fd, DRM_IOCTL_GET_STATS, &stats);
assert(ret == 0);
assert(stats.count >= 0);
close(fd);
return 0;
}

262
tests/lock.c Normal file
View file

@ -0,0 +1,262 @@
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
/** @file lock.c
* Tests various potential failures of the DRM locking mechanisms
*/
#include <limits.h>
#include "drmtest.h"
enum auth_event {
SERVER_READY,
CLIENT_MAGIC,
SERVER_LOCKED,
CLIENT_LOCKED,
};
int commfd[2];
unsigned int lock1 = 0x00001111;
unsigned int lock2 = 0x00002222;
/* return time in milliseconds */
static unsigned int
get_millis()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000 + tv.tv_usec / 1000;
}
static void
wait_event(int pipe, enum auth_event expected_event)
{
int ret;
enum auth_event event;
unsigned char in;
ret = read(commfd[pipe], &in, 1);
if (ret == -1)
err(1, "read error");
event = in;
if (event != expected_event)
errx(1, "unexpected event: %d\n", event);
}
static void
send_event(int pipe, enum auth_event send_event)
{
int ret;
unsigned char event;
event = send_event;
ret = write(commfd[pipe], &event, 1);
if (ret == -1)
err(1, "failed to send event %d", event);
}
static void
client_auth(int drmfd)
{
struct drm_auth auth;
int ret;
wait_event(0, SERVER_READY);
/* Get a client magic number and pass it to the master for auth. */
ret = ioctl(drmfd, DRM_IOCTL_GET_MAGIC, &auth);
if (ret == -1)
err(1, "Couldn't get client magic");
send_event(0, CLIENT_MAGIC);
ret = write(commfd[0], &auth.magic, sizeof(auth.magic));
if (ret == -1)
err(1, "Couldn't write auth data");
}
static void
server_auth(int drmfd)
{
struct drm_auth auth;
int ret;
send_event(1, SERVER_READY);
wait_event(1, CLIENT_MAGIC);
ret = read(commfd[1], &auth.magic, sizeof(auth.magic));
if (ret == -1)
err(1, "Failure to read client magic");
ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth);
if (ret == -1)
err(1, "Failure to authenticate client magic\n");
}
/** Tests that locking is successful in normal conditions */
static void
test_lock_unlock(int drmfd)
{
int ret;
ret = drmGetLock(drmfd, lock1, 0);
if (ret != 0)
err(1, "Locking failed");
ret = drmUnlock(drmfd, lock1);
if (ret != 0)
err(1, "Unlocking failed");
}
/** Tests that unlocking the lock while it's not held works correctly */
static void
test_unlock_unlocked(int drmfd)
{
int ret;
ret = drmUnlock(drmfd, lock1);
if (ret == 0)
err(1, "Unlocking unlocked lock succeeded");
}
/** Tests that unlocking a lock held by another context fails appropriately */
static void
test_unlock_unowned(int drmfd)
{
int ret;
ret = drmGetLock(drmfd, lock1, 0);
assert(ret == 0);
ret = drmUnlock(drmfd, lock2);
if (ret == 0)
errx(1, "Unlocking other context's lock succeeded");
ret = drmUnlock(drmfd, lock1);
assert(ret == 0);
}
/**
* Tests that an open/close by the same process doesn't result in the lock
* being dropped.
*/
static void test_open_close_locked(drmfd)
{
int ret, tempfd;
ret = drmGetLock(drmfd, lock1, 0);
assert(ret == 0);
/* XXX: Need to make sure that this is the same device as drmfd */
tempfd = drm_open_any();
close(tempfd);
ret = drmUnlock(drmfd, lock1);
if (ret != 0)
errx(1, "lock lost during open/close by same pid");
close(drmfd);
}
static void client()
{
int drmfd, ret;
unsigned int time;
/* XXX: Should make sure we open the same DRM as the master */
drmfd = drm_open_any();
client_auth(drmfd);
/* Wait for the server to grab the lock, then grab it ourselves (to
* contest it). Hopefully we hit it within the window of when the
* server locks.
*/
wait_event(0, SERVER_LOCKED);
ret = drmGetLock(drmfd, lock2, 0);
time = get_millis();
if (ret != 0)
err(1, "Failed to get lock on client\n");
drmUnlock(drmfd, lock2);
/* Tell the server that our locking completed, and when it did */
send_event(0, CLIENT_LOCKED);
ret = write(commfd[0], &time, sizeof(time));
exit(0);
}
static void server()
{
int drmfd, tempfd, ret;
unsigned int client_time, unlock_time;
drmfd = drm_open_any_master();
test_lock_unlock(drmfd);
test_unlock_unlocked(drmfd);
test_unlock_unowned(drmfd);
test_open_close_locked(drmfd);
/* Perform the authentication sequence with the client. */
server_auth(drmfd);
/* Now, test that the client attempting to lock while the server
* holds the lock works correctly.
*/
ret = drmGetLock(drmfd, lock1, 0);
assert(ret == 0);
send_event(1, SERVER_LOCKED);
/* Wait a while for the client to do its thing */
sleep(1);
ret = drmUnlock(drmfd, lock1);
assert(ret == 0);
unlock_time = get_millis();
wait_event(1, CLIENT_LOCKED);
ret = read(commfd[1], &client_time, sizeof(client_time));
if (ret == -1)
err(1, "Failure to read client magic");
if (client_time < unlock_time)
errx(1, "Client took lock before server released it");
}
int main(int argc, char **argv)
{
int ret;
ret = pipe(commfd);
if (ret == -1)
err(1, "Couldn't create pipe");
ret = fork();
if (ret == -1)
err(1, "failure to fork client");
if (ret == 0)
client();
else
server();
return 0;
}

84
tests/setversion.c Normal file
View file

@ -0,0 +1,84 @@
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <limits.h>
#include "drmtest.h"
/**
* Checks DRM_IOCTL_SET_VERSION.
*
* This tests that we can get the actual version out, and that setting invalid
* major/minor numbers fails appropriately. It does not check the actual
* behavior differenses resulting from an increased DI version.
*/
int main(int argc, char **argv)
{
int fd, ret;
drm_set_version_t sv, version;
fd = drm_open_any_master();
/* First, check that we can get the DD/DI versions. */
memset(&version, 0, sizeof(version));
version.drm_di_major = -1;
version.drm_di_minor = -1;
version.drm_dd_major = -1;
version.drm_dd_minor = -1;
ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &version);
assert(ret == 0);
assert(version.drm_di_major != -1);
assert(version.drm_di_minor != -1);
assert(version.drm_dd_major != -1);
assert(version.drm_dd_minor != -1);
/* Check that an invalid DI major fails */
sv = version;
sv.drm_di_major++;
ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
assert(ret == -1 && errno == EINVAL);
/* Check that an invalid DI minor fails */
sv = version;
sv.drm_di_major++;
ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
assert(ret == -1 && errno == EINVAL);
/* Check that an invalid DD major fails */
sv = version;
sv.drm_dd_major++;
ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
assert(ret == -1 && errno == EINVAL);
/* Check that an invalid DD minor fails */
sv = version;
sv.drm_dd_minor++;
ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
assert(ret == -1 && errno == EINVAL);
close(fd);
return 0;
}