Merge branch 'master' into xgi-0-0-1-local

Conflicts:

	linux-core/Makefile
	linux-core/Makefile.kernel
	shared-core/drm_pciids.txt
This commit is contained in:
Ian Romanick 2007-06-21 14:34:25 -07:00
commit 5347c99f0e
212 changed files with 16119 additions and 7637 deletions

105
.gitignore vendored
View file

@ -1,54 +1,55 @@
*-core/linux
*-core/drm.h
*-core/drm_sarea.h
*-core/i915_dma.c
*-core/i915_drm.h
*-core/i915_drv.h
*-core/i915_irq.c
*-core/i915_mem.c
*-core/mach64_dma.c
*-core/mach64_drm.h
*-core/mach64_drv.h
*-core/mach64_irq.c
*-core/mach64_state.c
*-core/mga_dma.c
*-core/mga_drm.h
*-core/mga_drv.h
*-core/mga_irq.c
*-core/mga_state.c
*-core/mga_ucode.h
*-core/mga_warp.c
*-core/nv_drv.h
*-core/r128_cce.c
*-core/r128_drm.h
*-core/r128_drv.h
*-core/r128_irq.c
*-core/r128_state.c
*-core/r300_cmdbuf.c
*-core/r300_reg.h
*-core/radeon_cp.c
*-core/radeon_drm.h
*-core/radeon_drv.h
*-core/radeon_irq.c
*-core/radeon_mem.c
*-core/radeon_state.c
*-core/savage_bci.c
*-core/savage_drm.h
*-core/savage_drv.h
*-core/savage_state.c
*-core/sis_drm.h
*-core/sis_drv.h
*-core/tdfx_drv.h
*-core/via_3d_reg.h
*-core/via_dma.c
*-core/via_drm.h
*-core/via_drv.c
*-core/via_drv.h
*-core/via_irq.c
*-core/via_map.c
*-core/via_verifier.c
*-core/via_verifier.h
*-core/via_video.c
bsd-core/linux
bsd-core/drm.h
bsd-core/drm_sarea.h
bsd-core/i915_dma.c
bsd-core/i915_drm.h
bsd-core/i915_drv.h
bsd-core/i915_irq.c
bsd-core/i915_mem.c
bsd-core/mach64_dma.c
bsd-core/mach64_drm.h
bsd-core/mach64_drv.h
bsd-core/mach64_irq.c
bsd-core/mach64_state.c
bsd-core/mga_dma.c
bsd-core/mga_drm.h
bsd-core/mga_drv.h
bsd-core/mga_irq.c
bsd-core/mga_state.c
bsd-core/mga_ucode.h
bsd-core/mga_warp.c
bsd-core/nv_drv.h
bsd-core/r128_cce.c
bsd-core/r128_drm.h
bsd-core/r128_drv.h
bsd-core/r128_irq.c
bsd-core/r128_state.c
bsd-core/r300_cmdbuf.c
bsd-core/r300_reg.h
bsd-core/radeon_cp.c
bsd-core/radeon_drm.h
bsd-core/radeon_drv.h
bsd-core/radeon_irq.c
bsd-core/radeon_mem.c
bsd-core/radeon_state.c
bsd-core/savage_bci.c
bsd-core/savage_drm.h
bsd-core/savage_drv.h
bsd-core/savage_state.c
bsd-core/sis_drm.h
bsd-core/sis_drv.h
bsd-core/tdfx_drv.h
bsd-core/via_3d_reg.h
bsd-core/via_dma.c
bsd-core/via_drm.h
bsd-core/via_drv.c
bsd-core/via_drv.h
bsd-core/via_irq.c
bsd-core/via_map.c
bsd-core/via_verifier.c
bsd-core/via_verifier.h
bsd-core/via_video.c
*~
*.flags
*.ko
*.ko.cmd
@ -74,6 +75,7 @@ config.log
config.status
config.sub
configure
cscope.*
depcomp
device_if.h
drm.kld
@ -81,6 +83,7 @@ drm_pciids.h
export_syms
i915.kld
install-sh
libdrm/config.h.in
libdrm.pc
libtool
ltmain.sh

71
README
View file

@ -1,4 +1,69 @@
By default, this will install into /usr/local. If you want to install this
libdrm to replace your system copy, say:
./configure --prefix=/usr --exec-prefix=/
DRM README file
There are two main parts to this package: the DRM client library/interface
(libdrm.so) and kernel/hardware-specific device modules (such as i915.ko).
Compiling
---------
By default, libdrm and the DRM header files will install into /usr/local/.
If you want to install this DRM to replace your system copy, say:
./configure --prefix=/usr --exec-prefix=/
Then,
make install
To build the device-specific kernel modules:
cd linux-core/
make
cp *.ko /lib/modules/VERSION/kernel/drivers/char/drm/
(where VERSION is your kernel version: uname -f)
Or,
cd bsd-core/
make
copy the kernel modules to the appropriate place
Tips & Trouble-shooting
-----------------------
1. You'll need kernel sources. If using Fedora Core 5, for example, you may
need to install RPMs such as:
kernel-smp-devel-2.6.15-1.2054_FC5.i686.rpm
kernel-devel-2.6.15-1.2054_FC5.i686.rpm
etc.
2. You may need to make a symlink from /lib/modules/VERSION/build to your
kernel sources in /usr/src/kernels/VERSION (where version is `uname -r`):
cd /lib/modules/VERSION
ln -s /usr/src/kernels/VERSION build
3. If you've build the kernel modules but they won't load because of an
error like this:
$ /sbin/modprobe drm
FATAL: Error inserting drm (/lib/modules/2.6.15-1.2054_FC5smp/kernel/drivers/char/drm/drm.ko): Invalid module format
And 'dmesg|tail' says:
drm: disagrees about version of symbol struct_module
Try recompiling your drm modules without the Module.symvers file.
That is rm the /usr/src/kernels/2.6.15-1.2054_FC5-smp-i686/Module.symvers
file (or rename it). Then do a 'make clean' before rebuilding your drm
modules.

View file

@ -69,4 +69,3 @@ drm_pciids.h: ${SHARED}/drm_pciids.txt
${SHAREDFILES}:
ln -sf ${SHARED}/$@ $@

View file

@ -32,8 +32,6 @@
#include "drmP.h"
#define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */
#define ATI_MAX_PCIGART_PAGES 8192 /* 32 MB aperture, 4K pages */
#define ATI_PCIGART_TABLE_SIZE 32768
int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
{
@ -48,7 +46,7 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
/* GART table in system memory */
dev->sg->dmah = drm_pci_alloc(dev, ATI_PCIGART_TABLE_SIZE, 0,
dev->sg->dmah = drm_pci_alloc(dev, gart_info->table_size, 0,
0xfffffffful);
if (dev->sg->dmah == NULL) {
DRM_ERROR("cannot allocate PCI GART table!\n");
@ -63,9 +61,9 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
pci_gart = gart_info->addr;
}
pages = DRM_MIN(dev->sg->pages, ATI_MAX_PCIGART_PAGES);
pages = DRM_MIN(dev->sg->pages, gart_info->table_size / sizeof(u32));
bzero(pci_gart, ATI_PCIGART_TABLE_SIZE);
bzero(pci_gart, gart_info->table_size);
KASSERT(PAGE_SIZE >= ATI_PCIGART_PAGE_SIZE, ("page size too small"));
@ -73,10 +71,17 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
page_base = (u32) dev->sg->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
if (gart_info->is_pcie)
*pci_gart = (cpu_to_le32(page_base) >> 8) | 0xc;
else
switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP:
*pci_gart = cpu_to_le32(page_base | 0xc);
break;
case DRM_ATI_GART_PCIE:
*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
break;
default:
*pci_gart = cpu_to_le32(page_base);
break;
}
pci_gart++;
page_base += ATI_PCIGART_PAGE_SIZE;
}

View file

@ -47,6 +47,9 @@ typedef struct drm_file drm_file_t;
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/stat.h>
#if __FreeBSD_version >= 700000
#include <sys/priv.h>
#endif
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/fcntl.h>
@ -230,7 +233,11 @@ enum {
#define PAGE_ALIGN(addr) round_page(addr)
/* DRM_SUSER returns true if the user is superuser */
#if __FreeBSD_version >= 700000
#define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0)
#else
#define DRM_SUSER(p) (suser(p) == 0)
#endif
#define DRM_AGP_FIND_DEVICE() agp_find_device()
#define DRM_MTRR_WC MDF_WRITECOMBINE
#define jiffies ticks
@ -394,19 +401,6 @@ do { \
} \
} while (0)
#define DRM_GETSAREA() \
do { \
drm_local_map_t *map; \
DRM_SPINLOCK_ASSERT(&dev->dev_lock); \
TAILQ_FOREACH(map, &dev->maplist, link) { \
if (map->type == _DRM_SHM && \
map->flags & _DRM_CONTAINS_LOCK) { \
dev_priv->sarea = map; \
break; \
} \
} \
} while (0)
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
for ( ret = 0 ; !ret && !(condition) ; ) { \
@ -627,12 +621,17 @@ typedef struct drm_vbl_sig {
#define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2
#define DRM_ATI_GART_PCI 1
#define DRM_ATI_GART_PCIE 2
#define DRM_ATI_GART_IGP 3
typedef struct ati_pcigart_info {
int gart_table_location;
int is_pcie;
int gart_reg_if;
void *addr;
dma_addr_t bus_addr;
drm_local_map_t mapping;
int table_size;
} drm_ati_pcigart_info;
struct drm_driver_info {
@ -822,6 +821,7 @@ dev_type_read(drm_read);
dev_type_poll(drm_poll);
dev_type_mmap(drm_mmap);
#endif
extern drm_local_map_t *drm_getsarea(drm_device_t *dev);
/* File operations helpers (drm_fops.c) */
#ifdef __FreeBSD__

View file

@ -43,7 +43,7 @@ static int
drm_device_find_capability(drm_device_t *dev, int cap)
{
#ifdef __FreeBSD__
#if __FreeBSD_version >= 700010
#if __FreeBSD_version >= 602102
return (pci_find_extcap(dev->device, cap, NULL) == 0);
#else

View file

@ -316,6 +316,9 @@ void drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
case _DRM_CONSISTENT:
drm_pci_free(dev, map->dmah);
break;
default:
DRM_ERROR("Bad map type %d\n", map->type);
break;
}
if (map->bsr != NULL) {

View file

@ -1 +0,0 @@
../shared-core/drm_drawable.c

51
bsd-core/drm_drawable.c Normal file
View file

@ -0,0 +1,51 @@
/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include "drmP.h"
int drm_adddraw(DRM_IOCTL_ARGS)
{
drm_draw_t draw;
draw.handle = 0; /* NOOP */
DRM_DEBUG("%d\n", draw.handle);
DRM_COPY_TO_USER_IOCTL( (drm_draw_t *)data, draw, sizeof(draw) );
return 0;
}
int drm_rmdraw(DRM_IOCTL_ARGS)
{
return 0; /* NOOP */
}

View file

@ -912,6 +912,18 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
return DRM_ERR(retcode);
}
drm_local_map_t *drm_getsarea(drm_device_t *dev)
{
drm_local_map_t *map;
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
TAILQ_FOREACH(map, &dev->maplist, link) {
if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
return map;
}
return NULL;
}
#if DRM_LINUX

View file

@ -102,11 +102,13 @@ int drm_irq_install(drm_device_t *dev)
retcode = ENOENT;
goto err;
}
#if __FreeBSD_version < 500000
retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY,
dev->irq_handler, dev, &dev->irqh);
#if __FreeBSD_version >= 700031
retcode = bus_setup_intr(dev->device, dev->irqr,
INTR_TYPE_TTY | INTR_MPSAFE,
NULL, drm_irq_handler_wrap, dev, &dev->irqh);
#else
retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY | INTR_MPSAFE,
retcode = bus_setup_intr(dev->device, dev->irqr,
INTR_TYPE_TTY | INTR_MPSAFE,
drm_irq_handler_wrap, dev, &dev->irqh);
#endif
if (retcode != 0)

View file

@ -19,7 +19,7 @@
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
AC_PREREQ(2.57)
AC_INIT([libdrm], 2.3.0, [dri-devel@lists.sourceforge.net], libdrm)
AC_INIT([libdrm], 2.3.1, [dri-devel@lists.sourceforge.net], libdrm)
AC_CONFIG_SRCDIR([Makefile.am])
AM_INIT_AUTOMAKE([dist-bzip2])

View file

@ -31,10 +31,6 @@
* DEALINGS IN THE SOFTWARE.
*/
#ifdef HAVE_XORG_CONFIG_H
#include <xorg-config.h>
#endif
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
@ -148,13 +144,15 @@ void *drmGetHashTable(void)
void *drmMalloc(int size)
{
void *pt;
if ((pt = malloc(size))) memset(pt, 0, size);
if ((pt = malloc(size)))
memset(pt, 0, size);
return pt;
}
void drmFree(void *pt)
{
if (pt) free(pt);
if (pt)
free(pt);
}
/* drmStrdup can't use strdup(3), since it doesn't call _DRM_MALLOC... */
@ -190,7 +188,8 @@ drmHashEntry *drmGetEntry(int fd)
void *value;
drmHashEntry *entry;
if (!drmHashTable) drmHashTable = drmHashCreate();
if (!drmHashTable)
drmHashTable = drmHashCreate();
if (drmHashLookup(drmHashTable, key, &value)) {
entry = drmMalloc(sizeof(*entry));
@ -287,7 +286,8 @@ static int drmOpenDevice(long dev, int minor)
}
if (stat(DRM_DIR_NAME, &st)) {
if (!isroot) return DRM_ERR_NOT_ROOT;
if (!isroot)
return DRM_ERR_NOT_ROOT;
mkdir(DRM_DIR_NAME, DRM_DEV_DIRMODE);
chown(DRM_DIR_NAME, 0, 0); /* root:root */
chmod(DRM_DIR_NAME, DRM_DEV_DIRMODE);
@ -295,7 +295,8 @@ static int drmOpenDevice(long dev, int minor)
/* Check if the device node exists and create it if necessary. */
if (stat(buf, &st)) {
if (!isroot) return DRM_ERR_NOT_ROOT;
if (!isroot)
return DRM_ERR_NOT_ROOT;
remove(buf);
mknod(buf, S_IFCHR | devmode, dev);
}
@ -308,13 +309,15 @@ static int drmOpenDevice(long dev, int minor)
fd = open(buf, O_RDWR, 0);
drmMsg("drmOpenDevice: open result is %d, (%s)\n",
fd, fd < 0 ? strerror(errno) : "OK");
if (fd >= 0) return fd;
if (fd >= 0)
return fd;
/* Check if the device node is not what we expect it to be, and recreate it
* and try again if so.
*/
if (st.st_rdev != dev) {
if (!isroot) return DRM_ERR_NOT_ROOT;
if (!isroot)
return DRM_ERR_NOT_ROOT;
remove(buf);
mknod(buf, S_IFCHR | devmode, dev);
if (drm_server_info) {
@ -325,7 +328,8 @@ static int drmOpenDevice(long dev, int minor)
fd = open(buf, O_RDWR, 0);
drmMsg("drmOpenDevice: open result is %d, (%s)\n",
fd, fd < 0 ? strerror(errno) : "OK");
if (fd >= 0) return fd;
if (fd >= 0)
return fd;
drmMsg("drmOpenDevice: Open failed\n");
remove(buf);
@ -350,10 +354,12 @@ static int drmOpenMinor(int minor, int create)
int fd;
char buf[64];
if (create) return drmOpenDevice(makedev(DRM_MAJOR, minor), minor);
if (create)
return drmOpenDevice(makedev(DRM_MAJOR, minor), minor);
sprintf(buf, DRM_DEV_NAME, DRM_DIR_NAME, minor);
if ((fd = open(buf, O_RDWR, 0)) >= 0) return fd;
if ((fd = open(buf, O_RDWR, 0)) >= 0)
return fd;
return -errno;
}
@ -377,7 +383,8 @@ int drmAvailable(void)
if ((fd = drmOpenMinor(0, 1)) < 0) {
#ifdef __linux__
/* Try proc for backward Linux compatibility */
if (!access("/proc/dri/0", R_OK)) return 1;
if (!access("/proc/dri/0", R_OK))
return 1;
#endif
return 0;
}
@ -428,7 +435,8 @@ static int drmOpenByBusid(const char *busid)
drmFreeBusid(buf);
return fd;
}
if (buf) drmFreeBusid(buf);
if (buf)
drmFreeBusid(buf);
close(fd);
}
}
@ -458,13 +466,13 @@ static int drmOpenByName(const char *name)
char * id;
if (!drmAvailable()) {
if (!drm_server_info)
if (!drm_server_info) {
return -1;
}
else {
/* try to load the kernel module now */
if (!drm_server_info->load_module(name)) {
drmMsg("[drm] failed to load kernel module \"%s\"\n",
name);
drmMsg("[drm] failed to load kernel module \"%s\"\n", name);
return -1;
}
}
@ -552,16 +560,13 @@ int drmOpen(const char *name, const char *busid)
if (!drmAvailable() && name != NULL && drm_server_info) {
/* try to load the kernel */
if (!drm_server_info->load_module(name)) {
drmMsg("[drm] failed to load kernel module \"%s\"\n",
name);
drmMsg("[drm] failed to load kernel module \"%s\"\n", name);
return -1;
}
}
if (busid) {
int fd;
fd = drmOpenByBusid(busid);
int fd = drmOpenByBusid(busid);
if (fd >= 0)
return fd;
}
@ -584,10 +589,11 @@ int drmOpen(const char *name, const char *busid)
*/
void drmFreeVersion(drmVersionPtr v)
{
if (!v) return;
if (v->name) drmFree(v->name);
if (v->date) drmFree(v->date);
if (v->desc) drmFree(v->desc);
if (!v)
return;
drmFree(v->name);
drmFree(v->date);
drmFree(v->desc);
drmFree(v);
}
@ -603,10 +609,11 @@ void drmFreeVersion(drmVersionPtr v)
*/
static void drmFreeKernelVersion(drm_version_t *v)
{
if (!v) return;
if (v->name) drmFree(v->name);
if (v->date) drmFree(v->date);
if (v->desc) drmFree(v->desc);
if (!v)
return;
drmFree(v->name);
drmFree(v->date);
drmFree(v->desc);
drmFree(v);
}
@ -760,9 +767,11 @@ char *drmGetBusid(int fd)
u.unique_len = 0;
u.unique = NULL;
if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) return NULL;
if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
return NULL;
u.unique = drmMalloc(u.unique_len + 1);
if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) return NULL;
if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
return NULL;
u.unique[u.unique_len] = '\0';
return u.unique;
@ -799,7 +808,8 @@ int drmGetMagic(int fd, drm_magic_t * magic)
drm_auth_t auth;
*magic = 0;
if (ioctl(fd, DRM_IOCTL_GET_MAGIC, &auth)) return -errno;
if (ioctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
return -errno;
*magic = auth.magic;
return 0;
}
@ -809,7 +819,8 @@ int drmAuthMagic(int fd, drm_magic_t magic)
drm_auth_t auth;
auth.magic = magic;
if (ioctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth)) return -errno;
if (ioctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
return -errno;
return 0;
}
@ -873,8 +884,10 @@ int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type,
map.handle = 0;
map.type = type;
map.flags = flags;
if (ioctl(fd, DRM_IOCTL_ADD_MAP, &map)) return -errno;
if (handle) *handle = (drm_handle_t)map.handle;
if (ioctl(fd, DRM_IOCTL_ADD_MAP, &map))
return -errno;
if (handle)
*handle = (drm_handle_t)map.handle;
return 0;
}
@ -884,7 +897,8 @@ int drmRmMap(int fd, drm_handle_t handle)
map.handle = (void *)handle;
if(ioctl(fd, DRM_IOCTL_RM_MAP, &map)) return -errno;
if(ioctl(fd, DRM_IOCTL_RM_MAP, &map))
return -errno;
return 0;
}
@ -916,7 +930,8 @@ int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,
request.flags = flags;
request.agp_start = agp_offset;
if (ioctl(fd, DRM_IOCTL_ADD_BUFS, &request)) return -errno;
if (ioctl(fd, DRM_IOCTL_ADD_BUFS, &request))
return -errno;
return request.count;
}
@ -928,9 +943,11 @@ int drmMarkBufs(int fd, double low, double high)
info.count = 0;
info.list = NULL;
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) return -EINVAL;
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
return -EINVAL;
if (!info.count) return -EINVAL;
if (!info.count)
return -EINVAL;
if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
return -ENOMEM;
@ -976,7 +993,8 @@ int drmFreeBufs(int fd, int count, int *list)
request.count = count;
request.list = list;
if (ioctl(fd, DRM_IOCTL_FREE_BUFS, &request)) return -errno;
if (ioctl(fd, DRM_IOCTL_FREE_BUFS, &request))
return -errno;
return 0;
}
@ -1024,7 +1042,8 @@ int drmMap(int fd, drm_handle_t handle, drmSize size, drmAddressPtr address)
{
static unsigned long pagesize_mask = 0;
if (fd < 0) return -EINVAL;
if (fd < 0)
return -EINVAL;
if (!pagesize_mask)
pagesize_mask = getpagesize() - 1;
@ -1032,7 +1051,8 @@ int drmMap(int fd, drm_handle_t handle, drmSize size, drmAddressPtr address)
size = (size + pagesize_mask) & ~pagesize_mask;
*address = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, handle);
if (*address == MAP_FAILED) return -errno;
if (*address == MAP_FAILED)
return -errno;
return 0;
}
@ -1062,7 +1082,8 @@ drmBufInfoPtr drmGetBufInfo(int fd)
info.count = 0;
info.list = NULL;
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) return NULL;
if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
return NULL;
if (info.count) {
if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
@ -1112,9 +1133,11 @@ drmBufMapPtr drmMapBufs(int fd)
bufs.count = 0;
bufs.list = NULL;
bufs.virtual = NULL;
if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) return NULL;
if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
return NULL;
if (!bufs.count) return NULL;
if (!bufs.count)
return NULL;
if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list))))
return NULL;
@ -1269,20 +1292,25 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
res.count = 0;
res.contexts = NULL;
if (ioctl(fd, DRM_IOCTL_RES_CTX, &res)) return NULL;
if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
return NULL;
if (!res.count) return NULL;
if (!res.count)
return NULL;
if (!(list = drmMalloc(res.count * sizeof(*list)))) return NULL;
if (!(list = drmMalloc(res.count * sizeof(*list))))
return NULL;
if (!(retval = drmMalloc(res.count * sizeof(*retval)))) {
drmFree(list);
return NULL;
}
res.contexts = list;
if (ioctl(fd, DRM_IOCTL_RES_CTX, &res)) return NULL;
if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
return NULL;
for (i = 0; i < res.count; i++) retval[i] = list[i].handle;
for (i = 0; i < res.count; i++)
retval[i] = list[i].handle;
drmFree(list);
*count = res.count;
@ -1317,7 +1345,8 @@ int drmCreateContext(int fd, drm_context_t *handle)
drm_ctx_t ctx;
ctx.flags = 0; /* Modified with functions below */
if (ioctl(fd, DRM_IOCTL_ADD_CTX, &ctx)) return -errno;
if (ioctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
return -errno;
*handle = ctx.handle;
return 0;
}
@ -1327,7 +1356,8 @@ int drmSwitchToContext(int fd, drm_context_t context)
drm_ctx_t ctx;
ctx.handle = context;
if (ioctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx)) return -errno;
if (ioctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
return -errno;
return 0;
}
@ -1343,9 +1373,12 @@ int drmSetContextFlags(int fd, drm_context_t context, drm_context_tFlags flags)
*/
ctx.handle = context;
ctx.flags = 0;
if (flags & DRM_CONTEXT_PRESERVED) ctx.flags |= _DRM_CONTEXT_PRESERVED;
if (flags & DRM_CONTEXT_2DONLY) ctx.flags |= _DRM_CONTEXT_2DONLY;
if (ioctl(fd, DRM_IOCTL_MOD_CTX, &ctx)) return -errno;
if (flags & DRM_CONTEXT_PRESERVED)
ctx.flags |= _DRM_CONTEXT_PRESERVED;
if (flags & DRM_CONTEXT_2DONLY)
ctx.flags |= _DRM_CONTEXT_2DONLY;
if (ioctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
return -errno;
return 0;
}
@ -1355,10 +1388,13 @@ int drmGetContextFlags(int fd, drm_context_t context,
drm_ctx_t ctx;
ctx.handle = context;
if (ioctl(fd, DRM_IOCTL_GET_CTX, &ctx)) return -errno;
if (ioctl(fd, DRM_IOCTL_GET_CTX, &ctx))
return -errno;
*flags = 0;
if (ctx.flags & _DRM_CONTEXT_PRESERVED) *flags |= DRM_CONTEXT_PRESERVED;
if (ctx.flags & _DRM_CONTEXT_2DONLY) *flags |= DRM_CONTEXT_2DONLY;
if (ctx.flags & _DRM_CONTEXT_PRESERVED)
*flags |= DRM_CONTEXT_PRESERVED;
if (ctx.flags & _DRM_CONTEXT_2DONLY)
*flags |= DRM_CONTEXT_2DONLY;
return 0;
}
@ -1383,14 +1419,16 @@ int drmDestroyContext(int fd, drm_context_t handle)
{
drm_ctx_t ctx;
ctx.handle = handle;
if (ioctl(fd, DRM_IOCTL_RM_CTX, &ctx)) return -errno;
if (ioctl(fd, DRM_IOCTL_RM_CTX, &ctx))
return -errno;
return 0;
}
int drmCreateDrawable(int fd, drm_drawable_t *handle)
{
drm_draw_t draw;
if (ioctl(fd, DRM_IOCTL_ADD_DRAW, &draw)) return -errno;
if (ioctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
return -errno;
*handle = draw.handle;
return 0;
}
@ -1399,7 +1437,8 @@ int drmDestroyDrawable(int fd, drm_drawable_t handle)
{
drm_draw_t draw;
draw.handle = handle;
if (ioctl(fd, DRM_IOCTL_RM_DRAW, &draw)) return -errno;
if (ioctl(fd, DRM_IOCTL_RM_DRAW, &draw))
return -errno;
return 0;
}
@ -1414,7 +1453,8 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
update.num = num;
update.data = (unsigned long long)(unsigned long)data;
if (ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update)) return -errno;
if (ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
return -errno;
return 0;
}
@ -1433,7 +1473,8 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
*/
int drmAgpAcquire(int fd)
{
if (ioctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL)) return -errno;
if (ioctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
return -errno;
return 0;
}
@ -1450,7 +1491,8 @@ int drmAgpAcquire(int fd)
*/
int drmAgpRelease(int fd)
{
if (ioctl(fd, DRM_IOCTL_AGP_RELEASE, NULL)) return -errno;
if (ioctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
return -errno;
return 0;
}
@ -1472,7 +1514,8 @@ int drmAgpEnable(int fd, unsigned long mode)
drm_agp_mode_t m;
m.mode = mode;
if (ioctl(fd, DRM_IOCTL_AGP_ENABLE, &m)) return -errno;
if (ioctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
return -errno;
return 0;
}
@ -1502,8 +1545,10 @@ int drmAgpAlloc(int fd, unsigned long size, unsigned long type,
b.size = size;
b.handle = 0;
b.type = type;
if (ioctl(fd, DRM_IOCTL_AGP_ALLOC, &b)) return -errno;
if (address != 0UL) *address = b.physical;
if (ioctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
return -errno;
if (address != 0UL)
*address = b.physical;
*handle = b.handle;
return 0;
}
@ -1527,7 +1572,8 @@ int drmAgpFree(int fd, drm_handle_t handle)
b.size = 0;
b.handle = handle;
if (ioctl(fd, DRM_IOCTL_AGP_FREE, &b)) return -errno;
if (ioctl(fd, DRM_IOCTL_AGP_FREE, &b))
return -errno;
return 0;
}
@ -1551,7 +1597,8 @@ int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset)
b.handle = handle;
b.offset = offset;
if (ioctl(fd, DRM_IOCTL_AGP_BIND, &b)) return -errno;
if (ioctl(fd, DRM_IOCTL_AGP_BIND, &b))
return -errno;
return 0;
}
@ -1574,7 +1621,8 @@ int drmAgpUnbind(int fd, drm_handle_t handle)
b.handle = handle;
b.offset = 0;
if (ioctl(fd, DRM_IOCTL_AGP_UNBIND, &b)) return -errno;
if (ioctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
return -errno;
return 0;
}
@ -1594,7 +1642,8 @@ int drmAgpVersionMajor(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return -errno;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
return -errno;
return i.agp_version_major;
}
@ -1614,7 +1663,8 @@ int drmAgpVersionMinor(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return -errno;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
return -errno;
return i.agp_version_minor;
}
@ -1634,7 +1684,8 @@ unsigned long drmAgpGetMode(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.mode;
}
@ -1654,7 +1705,8 @@ unsigned long drmAgpBase(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.aperture_base;
}
@ -1674,7 +1726,8 @@ unsigned long drmAgpSize(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.aperture_size;
}
@ -1694,7 +1747,8 @@ unsigned long drmAgpMemoryUsed(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.memory_used;
}
@ -1714,7 +1768,8 @@ unsigned long drmAgpMemoryAvail(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.memory_allowed;
}
@ -1734,7 +1789,8 @@ unsigned int drmAgpVendorId(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.id_vendor;
}
@ -1754,7 +1810,8 @@ unsigned int drmAgpDeviceId(int fd)
{
drm_agp_info_t i;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) return 0;
if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.id_device;
}
@ -1765,7 +1822,8 @@ int drmScatterGatherAlloc(int fd, unsigned long size, drm_handle_t *handle)
*handle = 0;
sg.size = size;
sg.handle = 0;
if (ioctl(fd, DRM_IOCTL_SG_ALLOC, &sg)) return -errno;
if (ioctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
return -errno;
*handle = sg.handle;
return 0;
}
@ -1776,7 +1834,8 @@ int drmScatterGatherFree(int fd, drm_handle_t handle)
sg.size = 0;
sg.handle = handle;
if (ioctl(fd, DRM_IOCTL_SG_FREE, &sg)) return -errno;
if (ioctl(fd, DRM_IOCTL_SG_FREE, &sg))
return -errno;
return 0;
}
@ -1806,12 +1865,21 @@ int drmWaitVBlank(int fd, drmVBlankPtr vbl)
int drmError(int err, const char *label)
{
switch (err) {
case DRM_ERR_NO_DEVICE: fprintf(stderr, "%s: no device\n", label); break;
case DRM_ERR_NO_ACCESS: fprintf(stderr, "%s: no access\n", label); break;
case DRM_ERR_NOT_ROOT: fprintf(stderr, "%s: not root\n", label); break;
case DRM_ERR_INVALID: fprintf(stderr, "%s: invalid args\n", label);break;
case DRM_ERR_NO_DEVICE:
fprintf(stderr, "%s: no device\n", label);
break;
case DRM_ERR_NO_ACCESS:
fprintf(stderr, "%s: no access\n", label);
break;
case DRM_ERR_NOT_ROOT:
fprintf(stderr, "%s: not root\n", label);
break;
case DRM_ERR_INVALID:
fprintf(stderr, "%s: invalid args\n", label);
break;
default:
if (err < 0) err = -err;
if (err < 0)
err = -err;
fprintf( stderr, "%s: error %d (%s)\n", label, err, strerror(err) );
break;
}
@ -1837,7 +1905,8 @@ int drmCtlInstHandler(int fd, int irq)
ctl.func = DRM_INST_HANDLER;
ctl.irq = irq;
if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl)) return -errno;
if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
return -errno;
return 0;
}
@ -1859,7 +1928,8 @@ int drmCtlUninstHandler(int fd)
ctl.func = DRM_UNINST_HANDLER;
ctl.irq = 0;
if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl)) return -errno;
if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
return -errno;
return 0;
}
@ -1875,7 +1945,8 @@ int drmFinish(int fd, int context, drmLockFlags flags)
if (flags & DRM_LOCK_FLUSH_ALL) lock.flags |= _DRM_LOCK_FLUSH_ALL;
if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
if (ioctl(fd, DRM_IOCTL_FINISH, &lock)) return -errno;
if (ioctl(fd, DRM_IOCTL_FINISH, &lock))
return -errno;
return 0;
}
@ -1900,7 +1971,8 @@ int drmGetInterruptFromBusID(int fd, int busnum, int devnum, int funcnum)
p.busnum = busnum;
p.devnum = devnum;
p.funcnum = funcnum;
if (ioctl(fd, DRM_IOCTL_IRQ_BUSID, &p)) return -errno;
if (ioctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
return -errno;
return p.irq;
}
@ -1927,7 +1999,8 @@ void *drmGetContextTag(int fd, drm_context_t context)
drmHashEntry *entry = drmGetEntry(fd);
void *value;
if (drmHashLookup(entry->tagTable, context, &value)) return NULL;
if (drmHashLookup(entry->tagTable, context, &value))
return NULL;
return value;
}
@ -1940,7 +2013,8 @@ int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
map.ctx_id = ctx_id;
map.handle = (void *)handle;
if (ioctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map)) return -errno;
if (ioctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
return -errno;
return 0;
}
@ -1951,8 +2025,10 @@ int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
map.ctx_id = ctx_id;
if (ioctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map)) return -errno;
if (handle) *handle = (drm_handle_t)map.handle;
if (ioctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
return -errno;
if (handle)
*handle = (drm_handle_t)map.handle;
return 0;
}
@ -1964,7 +2040,8 @@ int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size,
drm_map_t map;
map.offset = idx;
if (ioctl(fd, DRM_IOCTL_GET_MAP, &map)) return -errno;
if (ioctl(fd, DRM_IOCTL_GET_MAP, &map))
return -errno;
*offset = map.offset;
*size = map.size;
*type = map.type;
@ -1980,7 +2057,8 @@ int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid,
drm_client_t client;
client.idx = idx;
if (ioctl(fd, DRM_IOCTL_GET_CLIENT, &client)) return -errno;
if (ioctl(fd, DRM_IOCTL_GET_CLIENT, &client))
return -errno;
*auth = client.auth;
*pid = client.pid;
*uid = client.uid;
@ -1994,7 +2072,8 @@ int drmGetStats(int fd, drmStatsT *stats)
drm_stats_t s;
int i;
if (ioctl(fd, DRM_IOCTL_GET_STATS, &s)) return -errno;
if (ioctl(fd, DRM_IOCTL_GET_STATS, &s))
return -errno;
stats->count = 0;
memset(stats, 0, sizeof(*stats));
@ -2267,12 +2346,13 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
* DRM_FENCE_MASK_DRIVER
*/
int drmFenceCreate(int fd, unsigned flags, int class,unsigned type,
int drmFenceCreate(int fd, unsigned flags, int class, unsigned type,
drmFence *fence)
{
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.flags = flags;
arg.type = type;
arg.class = class;
arg.op = drm_fence_create;
@ -2384,13 +2464,9 @@ int drmFenceUpdate(int fd, drmFence *fence)
int drmFenceSignaled(int fd, drmFence *fence, unsigned fenceType,
int *signaled)
{
int
ret;
if ((fence->flags & DRM_FENCE_FLAG_SHAREABLE) ||
((fenceType & fence->signaled) != fenceType)) {
ret = drmFenceFlush(fd, fence, fenceType);
int ret = drmFenceFlush(fd, fence, fenceType);
if (ret)
return ret;
}
@ -2412,6 +2488,7 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
drm_fence_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.class = fence->class;
arg.flags = flags;
arg.handle = fence->handle;
arg.type = emit_type;
@ -2501,7 +2578,7 @@ void drmBOFreeList(drmBOList *list)
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
l = list->free.next;
l = list->list.next;
list->numCurrent--;
list->numOnList--;
}
@ -2516,8 +2593,8 @@ void drmBOFreeList(drmBOList *list)
}
}
int drmBOResetList(drmBOList *list) {
int drmBOResetList(drmBOList *list)
{
drmMMListHead *l;
int ret;
@ -2526,7 +2603,7 @@ int drmBOResetList(drmBOList *list) {
return ret;
l = list->list.next;
while(l != &list->list) {
while (l != &list->list) {
DRMLISTDEL(l);
DRMLISTADD(l, &list->free);
list->numOnList--;
@ -2549,7 +2626,8 @@ static drmBONode *drmAddListItem(drmBOList *list, drmBO *item,
return NULL;
}
list->numCurrent++;
} else {
}
else {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
}
@ -2586,7 +2664,6 @@ drmBO *drmBOListBuf(void *iterator)
drmBONode *node;
drmMMListHead *l = (drmMMListHead *) iterator;
node = DRMLISTENTRY(drmBONode, l, head);
return node->buf;
}
@ -2601,8 +2678,7 @@ int drmBOCreateList(int numTarget, drmBOList *list)
return drmAdjustListNodes(list);
}
static void drmBOCopyReply(const drm_bo_arg_reply_t *rep,
drmBO *buf)
static void drmBOCopyReply(const drm_bo_arg_reply_t *rep, drmBO *buf)
{
buf->handle = rep->handle;
buf->flags = rep->flags;
@ -2616,8 +2692,6 @@ static void drmBOCopyReply(const drm_bo_arg_reply_t *rep,
buf->pageAlignment = rep->page_alignment;
}
int drmBOCreate(int fd, unsigned long start, unsigned long size,
unsigned pageAlignment, void *user_buffer, drm_bo_type_t type,
unsigned mask,
@ -2706,7 +2780,6 @@ int drmBODestroy(int fd, drmBO *buf)
int drmBOReference(int fd, unsigned handle, drmBO *buf)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
@ -2739,7 +2812,6 @@ int drmBOUnReference(int fd, drmBO *buf)
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
(void) munmap(buf->mapVirtual, buf->start + buf->size);
buf->mapVirtual = NULL;
@ -2772,7 +2844,6 @@ int drmBOUnReference(int fd, drmBO *buf)
int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
void **address)
{
drm_bo_arg_t arg;
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
@ -2812,7 +2883,7 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
} while (ret != 0 && errno == EAGAIN);
if (ret)
return ret;
return -errno;
if (!arg.handled)
return -EFAULT;
if (rep->ret)
@ -2832,7 +2903,6 @@ int drmBOUnmap(int fd, drmBO *buf)
drm_bo_arg_request_t *req = &arg.d.req;
drm_bo_arg_reply_t *rep = &arg.d.rep;
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
req->op = drm_bo_unmap;
@ -2868,7 +2938,7 @@ int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
} while (ret && errno == EAGAIN);
if (ret)
return ret;
return -errno;
if (!arg.handled)
return -EFAULT;
if (rep->ret)
@ -2890,12 +2960,12 @@ int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle)
req->handle = buf->handle;
req->mask = flags;
req->arg_handle = fenceHandle;
req->op = drm_bo_validate;
req->op = drm_bo_fence;
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
if (ret)
return ret;
return -errno;
if (!arg.handled)
return -EFAULT;
if (rep->ret)
@ -2917,7 +2987,7 @@ int drmBOInfo(int fd, drmBO *buf)
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
if (ret)
return ret;
return -errno;
if (!arg.handled)
return -EFAULT;
if (rep->ret)
@ -2945,7 +3015,7 @@ int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint)
} while (ret && errno == EAGAIN);
if (ret)
return ret;
return -errno;
if (!arg.handled)
return -EFAULT;
if (rep->ret)
@ -2961,7 +3031,8 @@ int drmBOBusy(int fd, drmBO *buf, int *busy)
!(buf->replyFlags & DRM_BO_REP_BUSY)) {
*busy = 0;
return 0;
} else {
}
else {
int ret = drmBOInfo(fd, buf);
if (ret)
return ret;
@ -2997,7 +3068,8 @@ int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
*newItem = 1;
cur->arg0 = flags;
cur->arg1 = mask;
} else {
}
else {
unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM;
unsigned memFlags = cur->arg0 & flags & memMask;
@ -3029,7 +3101,6 @@ int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
int drmBOValidateList(int fd, drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
drm_bo_arg_t *arg, *first;
@ -3065,11 +3136,10 @@ int drmBOValidateList(int fd, drmBOList *list)
if (!first)
return 0;
do{
do {
ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first);
} while (ret && errno == EAGAIN);
if (ret)
return -errno;
@ -3095,7 +3165,6 @@ int drmBOValidateList(int fd, drmBOList *list)
int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
{
drmBONode *node;
drmMMListHead *l;
drm_bo_arg_t *arg, *first;
@ -3138,10 +3207,8 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
rep = &arg->d.rep;
if (!arg->handled)
return -EFAULT;
if (rep->ret)
@ -3165,7 +3232,6 @@ int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
return 0;
}
@ -3173,14 +3239,12 @@ int drmMMTakedown(int fd, unsigned memType)
{
drm_mm_init_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.req.op = mm_takedown;
arg.req.mem_type = memType;
if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
return 0;
}
@ -3197,7 +3261,7 @@ int drmMMLock(int fd, unsigned memType)
ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
} while (ret && errno == EAGAIN);
return ret;
return (ret) ? -errno : 0;
}
int drmMMUnlock(int fd, unsigned memType)
@ -3213,7 +3277,7 @@ int drmMMUnlock(int fd, unsigned memType)
ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
} while (ret && errno == EAGAIN);
return ret;
return (ret) ? -errno : 0;
}
#define DRM_MAX_FDS 16

View file

@ -31,8 +31,6 @@
*
*/
/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/xf86drm.h,v 1.26 2003/08/16 19:26:37 dawes Exp $ */
#ifndef _XF86DRM_H_
#define _XF86DRM_H_
@ -270,6 +268,7 @@ typedef struct _drmTextureRegion {
typedef enum {
DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */

View file

@ -25,8 +25,6 @@
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmHash.c,v 1.4 2001/03/21 18:08:54 dawes Exp $
*
* DESCRIPTION
*
* This file contains a straightforward implementation of a fixed-sized
@ -70,25 +68,14 @@
*
*/
#ifdef HAVE_XORG_CONFIG_H
#include <xorg-config.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#define HASH_MAIN 0
#if HASH_MAIN
# include <stdio.h>
# include <stdlib.h>
#else
#if !HASH_MAIN
# include "drm.h"
# include "xf86drm.h"
# ifdef XFree86LOADER
# include "xf86.h"
# include "xf86_ansic.h"
# else
# include <stdio.h>
# include <stdlib.h>
# endif
#endif
#define HASH_MAGIC 0xdeadbeef

View file

@ -25,8 +25,6 @@
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmRandom.c,v 1.4 2000/06/17 00:03:34 martin Exp $
*
* DESCRIPTION
*
* This file contains a simple, straightforward implementation of the Park
@ -73,25 +71,14 @@
*
*/
#ifdef HAVE_XORG_CONFIG_H
#include <xorg-config.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#define RANDOM_MAIN 0
#if RANDOM_MAIN
# include <stdio.h>
# include <stdlib.h>
#else
#if !RANDOM_MAIN
# include "drm.h"
# include "xf86drm.h"
# ifdef XFree86LOADER
# include "xf86.h"
# include "xf86_ansic.h"
# else
# include <stdio.h>
# include <stdlib.h>
# endif
#endif
#define RANDOM_MAGIC 0xfeedbeef

View file

@ -25,8 +25,6 @@
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
*
* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/xf86drmSL.c,v 1.3 2000/06/17 00:03:34 martin Exp $
*
* DESCRIPTION
*
* This file contains a straightforward skip list implementation.n
@ -40,26 +38,16 @@
*
*/
#ifdef HAVE_XORG_CONFIG_H
#include <xorg-config.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#define SL_MAIN 0
#if SL_MAIN
# include <stdio.h>
# include <stdlib.h>
# include <sys/time.h>
#else
#if !SL_MAIN
# include "drm.h"
# include "xf86drm.h"
# ifdef XFree86LOADER
# include "xf86.h"
# include "xf86_ansic.h"
# else
# include <stdio.h>
# include <stdlib.h>
# endif
#else
# include <sys/time.h>
#endif
#define SL_LIST_MAGIC 0xfacade00LU

View file

@ -93,7 +93,8 @@ typedef struct _drmMMListHead
#define DRMLISTENTRY(__type, __item, __field) \
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
typedef struct _drmFence{
typedef struct _drmFence
{
unsigned handle;
int class;
unsigned type;
@ -102,7 +103,8 @@ typedef struct _drmFence{
unsigned pad[4]; /* for future expansion */
} drmFence;
typedef struct _drmBO{
typedef struct _drmBO
{
drm_bo_type_t type;
unsigned handle;
drm_u64_t mapHandle;
@ -121,8 +123,8 @@ typedef struct _drmBO{
unsigned pad[8]; /* for future expansion */
} drmBO;
typedef struct _drmBONode {
typedef struct _drmBONode
{
drmMMListHead head;
drmBO *buf;
drm_bo_arg_t bo_arg;
@ -138,11 +140,13 @@ typedef struct _drmBOList {
drmMMListHead free;
} drmBOList;
/* Fencing */
/*
* Fence functions.
*/
extern int drmFenceCreate(int fd, unsigned flags, int class,
unsigned type,
drmFence *fence);
unsigned type, drmFence *fence);
extern int drmFenceDestroy(int fd, const drmFence *fence);
extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
extern int drmFenceUnreference(int fd, const drmFence *fence);

1
linux-core/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
Module*.symvers

View file

@ -53,22 +53,13 @@ choice
depends on DRM && AGP && AGP_INTEL
optional
config DRM_I830
tristate "i830 driver"
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM or 865G integrated graphics. If M is selected, the
module will be called i830. AGP support is required for this driver
to work. This driver will eventually be replaced by the i915 one.
config DRM_I915
tristate "i915 driver"
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
module will be called i915. AGP support is required for this driver
to work. This driver will eventually replace the I830 driver, when
later release of X start to use the new DDX and DRI.
852GM, 855GM, 865G, 915G, 915GM, 945G, 945GM and 965G integrated
graphics. If M is selected, the module will be called i915.
AGP support is required for this driver to work.
endchoice

View file

@ -58,12 +58,12 @@ endif
# Modules for all architectures
MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
mach64.o nv.o xgi.o
mach64.o nv.o nouveau.o xgi.o
# Modules only for ix86 architectures
ifneq (,$(findstring 86,$(MACHINE)))
ARCHX86 := 1
MODULE_LIST += i830.o i810.o i915.o
MODULE_LIST += i810.o i915.o
endif
ifneq (,$(findstring sparc64,$(MACHINE)))
@ -75,46 +75,31 @@ DRM_MODULES ?= $(MODULE_LIST)
# These definitions are for handling dependencies in the out of kernel build.
DRMSHARED = drm.h drm_sarea.h drm_drawable.c
DRMHEADERS = drmP.h drm_compat.h drm_os_linux.h drm.h drm_sarea.h
COREHEADERS = drm_core.h drm_sman.h drm_hashtab.h
TDFXHEADERS = tdfx_drv.h $(DRMHEADERS)
TDFXSHARED = tdfx_drv.h
R128HEADERS = r128_drv.h r128_drm.h $(DRMHEADERS)
R128SHARED = r128_drv.h r128_drm.h r128_cce.c r128_state.c r128_irq.c
RADEONHEADERS = radeon_drv.h radeon_drm.h r300_reg.h $(DRMHEADERS)
RADEONSHARED = radeon_drv.h radeon_drm.h radeon_cp.c radeon_irq.c \
radeon_mem.c radeon_state.c r300_cmdbuf.c r300_reg.h
MGAHEADERS = mga_drv.h mga_drm.h mga_ucode.h $(DRMHEADERS)
MGASHARED = mga_dma.c mga_drm.h mga_drv.h mga_irq.c mga_state.c \
mga_ucode.h mga_warp.c
I810HEADERS = i810_drv.h i810_drm.h $(DRMHEADERS)
I830HEADERS = i830_drv.h i830_drm.h $(DRMHEADERS)
I915HEADERS = i915_drv.h i915_drm.h $(DRMHEADERS)
I915SHARED = i915_drv.h i915_drm.h i915_irq.c i915_mem.c i915_dma.c
SISHEADERS= sis_drv.h sis_drm.h drm_hashtab.h drm_sman.h $(DRMHEADERS)
SISSHARED= sis_drv.h sis_drm.h
SAVAGEHEADERS= savage_drv.h savage_drm.h $(DRMHEADERS)
SAVAGESHARED= savage_drv.h savage_drm.h savage_bci.c savage_state.c
VIAHEADERS = via_drm.h via_drv.h via_3d_reg.h via_verifier.h $(DRMHEADERS)
VIASHARED = via_drm.h via_drv.h via_3d_reg.h via_drv.c via_irq.c via_map.c \
via_dma.c via_verifier.c via_verifier.h via_video.c
MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
MACH64SHARED = mach64_drv.h mach64_drm.h mach64_dma.c \
mach64_irq.c mach64_state.c
NVHEADERS = nv_drv.h $(DRMHEADERS)
<<<<<<< HEAD:linux-core/Makefile
NVSHARED = nv_drv.h
XGIHEADERS = xgi_drv.h xgi_drm.h
=======
>>>>>>> 40f6a696cb22ffa064f78198a7a241015d365967:linux-core/Makefile
FFBHEADERS = ffb_drv.h $(DRMHEADERS)
SHAREDSRC = $(DRMSHARED) $(MGASHARED) $(R128SHARED) $(RADEONSHARED) \
$(SISSHARED) $(TDFXSHARED) $(VIASHARED) $(MACH64SHARED) \
$(I915SHARED) $(SAVAGESHARED) $(NVSHARED)
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
PROGS = dristat drmstat
CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c linux drm_pciids.h .tmp_versions
CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c drm_pciids.h .tmp_versions
# VERSION is not defined from the initial invocation. It is defined when
# this Makefile is invoked from the kernel's root Makefile.
@ -227,27 +212,13 @@ endif
SHAREDDIR := ../shared-core
HASSHARED := $(shell if [ -d $(SHAREDDIR) ]; then echo y; fi)
ifeq ($(HASSHARED),y)
includes:: $(SHAREDSRC) drm_pciids.h
ifeq ($(shell if [ -d $(SHAREDDIR) ]; then echo y; fi),y)
includes:: drm_pciids.h
drm_pciids.h: $(SHAREDDIR)/drm_pciids.txt
sh ../scripts/create_linux_pci_lists.sh < $(SHAREDDIR)/drm_pciids.txt
$(SHAREDSRC):
@if [ -r $(SHAREDDIR)/$@ ]; then \
(rm -f $@; set -x; ln -s $(SHAREDDIR)/$@ $@); fi
CLEANFILES += $(SHAREDSRC)
endif
includes:: linux
linux:
rm -f linux
ln -s . linux
clean cleandir:
rm -rf $(CLEANFILES)
@ -271,15 +242,18 @@ dristat: dristat.c
drmstat: drmstat.c
$(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS)
install:
make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules_install
else
# Check for kernel versions that we don't support.
BELOW24 := $(shell if [ $(VERSION) -lt 2 -o $(PATCHLEVEL) -lt 4 ]; then \
BELOW26 := $(shell if [ $(VERSION) -lt 2 -o $(PATCHLEVEL) -lt 6 ]; then \
echo y; fi)
ifeq ($(BELOW24),y)
$(error Only 2.4.x and later kernels are supported \
ifeq ($(BELOW26),y)
$(error Only 2.6.x and later kernels are supported \
($(VERSION).$(PATCHLEVEL).$(SUBLEVEL)))
endif
@ -292,30 +266,6 @@ endif
# This needs to go before all other include paths.
CC += -I$(DRMSRCDIR)
# Check for Red Hat's 4-argument do_munmap().
DOMUNMAP := $(shell grep do_munmap $(LINUXDIR)/include/linux/mm.h | \
grep -c acct)
ifneq ($(DOMUNMAP),0)
EXTRA_CFLAGS += -DDO_MUNMAP_4_ARGS
endif
# Check for 5-argument remap_page_range() in RH9 kernel, and 2.5.x kernels
RPR := $(shell grep remap_page_range $(LINUXDIR)/include/linux/mm.h | \
grep -c vma)
ifneq ($(RPR),0)
EXTRA_CFLAGS += -DREMAP_PAGE_RANGE_5_ARGS
endif
# Check for 4-argument vmap() in some 2.5.x and 2.4.x kernels
VMAP := $(shell grep -A1 'vmap.*count,$$' $(LINUXDIR)/include/linux/vmalloc.h | \
grep -c prot)
ifneq ($(VMAP),0)
EXTRA_CFLAGS += -DVMAP_4_ARGS
endif
# Check for PAGE_AGP definition
PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \
grep -c PAGE_AGP)
@ -324,7 +274,6 @@ ifneq ($(PAGE_AGP),0)
EXTRA_CFLAGS += -DHAVE_PAGE_AGP
endif
# Start with all modules turned off.
CONFIG_DRM_GAMMA := n
CONFIG_DRM_TDFX := n
@ -332,7 +281,6 @@ CONFIG_DRM_MGA := n
CONFIG_DRM_I810 := n
CONFIG_DRM_R128 := n
CONFIG_DRM_RADEON := n
CONFIG_DRM_I830 := n
CONFIG_DRM_I915 := n
CONFIG_DRM_SIS := n
CONFIG_DRM_FFB := n
@ -340,7 +288,11 @@ CONFIG_DRM_SAVAGE := n
CONFIG_DRM_VIA := n
CONFIG_DRM_MACH64 := n
CONFIG_DRM_NV := n
<<<<<<< HEAD:linux-core/Makefile
CONFIG_DRM_XGI := n
=======
CONFIG_DRM_NOUVEAU := n
>>>>>>> 40f6a696cb22ffa064f78198a7a241015d365967:linux-core/Makefile
# Enable module builds for the modules requested/supported.
@ -374,8 +326,13 @@ endif
ifneq (,$(findstring nv,$(DRM_MODULES)))
CONFIG_DRM_NV := m
endif
<<<<<<< HEAD:linux-core/Makefile
ifneq (,$(findstring xgi,$(DRM_MODULES)))
CONFIG_DRM_XGI := m
=======
ifneq (,$(findstring nouveau,$(DRM_MODULES)))
CONFIG_DRM_NOUVEAU := m
>>>>>>> 40f6a696cb22ffa064f78198a7a241015d365967:linux-core/Makefile
endif
# These require AGP support
@ -383,9 +340,6 @@ endif
ifneq (,$(findstring i810,$(DRM_MODULES)))
CONFIG_DRM_I810 := m
endif
ifneq (,$(findstring i830,$(DRM_MODULES)))
CONFIG_DRM_I830 := m
endif
ifneq (,$(findstring i915,$(DRM_MODULES)))
CONFIG_DRM_I915 := m
endif
@ -398,7 +352,6 @@ $(tdfx-objs): $(TDFXHEADERS)
$(r128-objs): $(R128HEADERS)
$(mga-objs): $(MGAHEADERS)
$(i810-objs): $(I810HEADERS)
$(i830-objs): $(I830HEADERS)
$(i915-objs): $(I915HEADERS)
$(radeon-objs): $(RADEONHEADERS)
$(sis-objs): $(SISHEADERS)
@ -407,7 +360,11 @@ $(savage-objs): $(SAVAGEHEADERS)
$(via-objs): $(VIAHEADERS)
$(mach64-objs): $(MACH64HEADERS)
$(nv-objs): $(NVHEADERS)
<<<<<<< HEAD:linux-core/Makefile
$(xgi-objs): $(XGIHEADERS)
=======
$(nouveau-objs): $(NOUVEAUHEADERS)
>>>>>>> 40f6a696cb22ffa064f78198a7a241015d365967:linux-core/Makefile
endif

View file

@ -13,20 +13,26 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i830-objs := i830_drv.o i830_dma.o i830_irq.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o \
nv04_fb.o nv10_fb.o nv40_fb.o \
nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \
nv40_graph.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
via_video.o via_dmablit.o
via_video.o via_dmablit.o via_fence.o via_buffer.o
mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
nv-objs := nv_drv.o
xgi-objs := xgi_drv.o xgi_mm.o
@ -37,6 +43,7 @@ radeon-objs += radeon_ioc32.o
mga-objs += mga_ioc32.o
r128-objs += r128_ioc32.o
i915-objs += i915_ioc32.o
nouveau-objs += nouveau_ioc32.o
endif
obj-m += drm.o
@ -45,7 +52,6 @@ obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o
obj-$(CONFIG_DRM_I830) += i830.o
obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_SIS) += sis.o
obj-$(CONFIG_DRM_FFB) += ffb.o
@ -53,4 +59,5 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage.o
obj-$(CONFIG_DRM_VIA) += via.o
obj-$(CONFIG_DRM_MACH64)+= mach64.o
obj-$(CONFIG_DRM_NV) += nv.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
obj-$(CONFIG_DRM_XGI) += xgi.o

View file

@ -1,6 +1,6 @@
************************************************************
* For the very latest on DRI development, please see: *
* http://dri.sourceforge.net/ *
* http://dri.freedesktop.org/ *
************************************************************
The Direct Rendering Manager (drm) is a device-independent kernel-level
@ -23,24 +23,3 @@ ways:
4. The DRM is extensible via the use of small device-specific modules
that rely extensively on the API exported by the DRM module.
Documentation on the DRI is available from:
http://precisioninsight.com/piinsights.html
For specific information about kernel-level support, see:
The Direct Rendering Manager, Kernel Support for the Direct Rendering
Infrastructure
http://precisioninsight.com/dr/drm.html
Hardware Locking for the Direct Rendering Infrastructure
http://precisioninsight.com/dr/locking.html
A Security Analysis of the Direct Rendering Infrastructure
http://precisioninsight.com/dr/security.html
************************************************************
* For the very latest on DRI development, please see: *
* http://dri.sourceforge.net/ *
************************************************************

View file

@ -33,41 +33,25 @@
#include "drmP.h"
#if PAGE_SIZE == 65536
# define ATI_PCIGART_TABLE_ORDER 0
# define ATI_PCIGART_TABLE_PAGES (1 << 0)
#elif PAGE_SIZE == 16384
# define ATI_PCIGART_TABLE_ORDER 1
# define ATI_PCIGART_TABLE_PAGES (1 << 1)
#elif PAGE_SIZE == 8192
# define ATI_PCIGART_TABLE_ORDER 2
# define ATI_PCIGART_TABLE_PAGES (1 << 2)
#elif PAGE_SIZE == 4096
# define ATI_PCIGART_TABLE_ORDER 3
# define ATI_PCIGART_TABLE_PAGES (1 << 3)
#else
# error - PAGE_SIZE not 64K, 16K, 8K or 4K
#endif
# define ATI_MAX_PCIGART_PAGES 8192 /**< 32 MB aperture, 4K pages */
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
static void *drm_ati_alloc_pcigart_table(void)
static void *drm_ati_alloc_pcigart_table(int order)
{
unsigned long address;
struct page *page;
int i;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("%s: alloc %d order\n", __FUNCTION__, order);
address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
ATI_PCIGART_TABLE_ORDER);
order);
if (address == 0UL) {
return NULL;
}
page = virt_to_page(address);
for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) {
for (i = 0; i < order; i++, page++) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
get_page(page);
#endif
@ -78,22 +62,23 @@ static void *drm_ati_alloc_pcigart_table(void)
return (void *)address;
}
static void drm_ati_free_pcigart_table(void *address)
static void drm_ati_free_pcigart_table(void *address, int order)
{
struct page *page;
int i;
int num_pages = 1 << order;
DRM_DEBUG("%s\n", __FUNCTION__);
page = virt_to_page((unsigned long)address);
for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) {
for (i = 0; i < num_pages; i++, page++) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
__put_page(page);
#endif
ClearPageReserved(page);
}
free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER);
free_pages((unsigned long)address, order);
}
int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
@ -101,6 +86,8 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
drm_sg_mem_t *entry = dev->sg;
unsigned long pages;
int i;
int order;
int num_pages, max_pages;
/* we need to support large memory configurations */
if (!entry) {
@ -108,15 +95,19 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
return 0;
}
order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
num_pages = 1 << order;
if (gart_info->bus_addr) {
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
pci_unmap_single(dev->pdev, gart_info->bus_addr,
ATI_PCIGART_TABLE_PAGES * PAGE_SIZE,
num_pages * PAGE_SIZE,
PCI_DMA_TODEVICE);
}
pages = (entry->pages <= ATI_MAX_PCIGART_PAGES)
? entry->pages : ATI_MAX_PCIGART_PAGES;
max_pages = (gart_info->table_size / sizeof(u32));
pages = (entry->pages <= max_pages)
? entry->pages : max_pages;
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
@ -132,7 +123,8 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
&& gart_info->addr) {
drm_ati_free_pcigart_table(gart_info->addr);
drm_ati_free_pcigart_table(gart_info->addr, order);
gart_info->addr = NULL;
}
@ -147,6 +139,9 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
unsigned long pages;
u32 *pci_gart, page_base, bus_address = 0;
int i, j, ret = 0;
int order;
int max_pages;
int num_pages;
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
@ -156,7 +151,10 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
address = drm_ati_alloc_pcigart_table();
order = drm_order((gart_info->table_size +
(PAGE_SIZE-1)) / PAGE_SIZE);
num_pages = 1 << order;
address = drm_ati_alloc_pcigart_table(order);
if (!address) {
DRM_ERROR("cannot allocate PCI GART page!\n");
goto done;
@ -168,11 +166,13 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
}
bus_address = pci_map_single(dev->pdev, address,
ATI_PCIGART_TABLE_PAGES *
PAGE_SIZE, PCI_DMA_TODEVICE);
num_pages * PAGE_SIZE,
PCI_DMA_TODEVICE);
if (bus_address == 0) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_free_pcigart_table(address);
order = drm_order((gart_info->table_size +
(PAGE_SIZE-1)) / PAGE_SIZE);
drm_ati_free_pcigart_table(address, order);
address = NULL;
goto done;
}
@ -185,10 +185,11 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
pci_gart = (u32 *) address;
pages = (entry->pages <= ATI_MAX_PCIGART_PAGES)
? entry->pages : ATI_MAX_PCIGART_PAGES;
max_pages = (gart_info->table_size / sizeof(u32));
pages = (entry->pages <= max_pages)
? entry->pages : max_pages;
memset(pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32));
memset(pci_gart, 0, max_pages * sizeof(u32));
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
@ -206,10 +207,18 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
page_base = (u32) entry->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
if (gart_info->is_pcie)
switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP:
*pci_gart = cpu_to_le32((page_base) | 0xc);
break;
case DRM_ATI_GART_PCIE:
*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
else
break;
default:
case DRM_ATI_GART_PCI:
*pci_gart = cpu_to_le32(page_base);
break;
}
pci_gart++;
page_base += ATI_PCIGART_PAGE_SIZE;
}

1
linux-core/drm.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/drm.h

View file

@ -67,23 +67,16 @@
#include <asm/mtrr.h>
#endif
#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
#include <asm/agp.h>
#include <linux/types.h>
#include <linux/agp_backend.h>
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41)
#define HAS_WORKQUEUE 0
#else
#define HAS_WORKQUEUE 1
#endif
#if !HAS_WORKQUEUE
#include <linux/tqueue.h>
#else
#include <linux/workqueue.h>
#endif
#include <linux/poll.h>
#include <asm/pgalloc.h>
#include "drm.h"
#include <linux/slab.h>
#include <linux/idr.h>
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
@ -308,19 +301,14 @@ typedef struct drm_devstate {
} drm_devstate_t;
typedef struct drm_magic_entry {
drm_hash_item_t hash_item;
struct list_head head;
drm_hash_item_t hash_item;
struct drm_file *priv;
} drm_magic_entry_t;
typedef struct drm_magic_head {
struct drm_magic_entry *head;
struct drm_magic_entry *tail;
} drm_magic_head_t;
typedef struct drm_vma_entry {
struct list_head head;
struct vm_area_struct *vma;
struct drm_vma_entry *next;
pid_t pid;
} drm_vma_entry_t;
@ -419,8 +407,7 @@ typedef struct drm_file {
uid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
struct drm_file *next;
struct drm_file *prev;
struct list_head lhead;
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
@ -466,6 +453,10 @@ typedef struct drm_lock_data {
struct file *filp; /**< File descr of lock holder (0=kernel) */
wait_queue_head_t lock_queue; /**< Queue of blocked processes */
unsigned long lock_time; /**< Time of last lock in jiffies */
spinlock_t spinlock;
uint32_t kernel_waiters;
uint32_t user_waiters;
int idle_has_lock;
} drm_lock_data_t;
/**
@ -497,8 +488,7 @@ typedef struct drm_agp_mem {
DRM_AGP_MEM *memory;
unsigned long bound; /**< address */
int pages;
struct drm_agp_mem *prev; /**< previous entry */
struct drm_agp_mem *next; /**< next entry */
struct list_head head;
} drm_agp_mem_t;
/**
@ -508,7 +498,7 @@ typedef struct drm_agp_mem {
*/
typedef struct drm_agp_head {
DRM_AGP_KERN agp_info; /**< AGP device information */
drm_agp_mem_t *memory; /**< memory entries */
struct list_head memory;
unsigned long mode; /**< AGP mode */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)
struct agp_bridge_data *bridge;
@ -553,7 +543,8 @@ typedef struct drm_mm_node {
} drm_mm_node_t;
typedef struct drm_mm {
drm_mm_node_t root_node;
struct list_head fl_entry;
struct list_head ml_entry;
} drm_mm_t;
@ -579,6 +570,10 @@ typedef struct drm_ctx_list {
drm_file_t *tag; /**< associated fd private data */
} drm_ctx_list_t;
struct drm_ctx_sarea_list {
drm_map_t *map;
};
typedef struct drm_vbl_sig {
struct list_head head;
unsigned int sequence;
@ -590,86 +585,24 @@ typedef struct drm_vbl_sig {
#define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2
#define DRM_ATI_GART_PCI 1
#define DRM_ATI_GART_PCIE 2
#define DRM_ATI_GART_IGP 3
typedef struct ati_pcigart_info {
int gart_table_location;
int is_pcie;
int gart_reg_if;
void *addr;
dma_addr_t bus_addr;
drm_local_map_t mapping;
int table_size;
} drm_ati_pcigart_info;
/*
* User space objects and their references.
*/
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
typedef enum {
drm_fence_type,
drm_buffer_type,
drm_ttm_type
/*
* Add other user space object types here.
*/
} drm_object_type_t;
/*
* A user object is a structure that helps the drm give out user handles
* to kernel internal objects and to keep track of these objects so that
* they can be destroyed, for example when the user space process exits.
* Designed to be accessible using a user space 32-bit handle.
*/
typedef struct drm_user_object{
drm_hash_item_t hash;
struct list_head list;
drm_object_type_t type;
atomic_t refcount;
int shareable;
drm_file_t *owner;
void (*ref_struct_locked) (drm_file_t *priv, struct drm_user_object *obj,
drm_ref_t ref_action);
void (*unref)(drm_file_t *priv, struct drm_user_object *obj,
drm_ref_t unref_action);
void (*remove)(drm_file_t *priv, struct drm_user_object *obj);
} drm_user_object_t;
/*
* A ref object is a structure which is used to
* keep track of references to user objects and to keep track of these
* references so that they can be destroyed for example when the user space
* process exits. Designed to be accessible using a pointer to the _user_ object.
*/
typedef struct drm_ref_object {
drm_hash_item_t hash;
struct list_head list;
atomic_t refcount;
drm_ref_t unref_action;
} drm_ref_object_t;
#include "drm_ttm.h"
/*
* buffer object driver
*/
typedef struct drm_bo_driver{
int cached[DRM_BO_MEM_TYPES];
drm_local_map_t *iomap[DRM_BO_MEM_TYPES];
drm_ttm_backend_t *(*create_ttm_backend_entry)
(struct drm_device *dev);
int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type);
int (*invalidate_caches)(struct drm_device *dev, uint32_t flags);
} drm_bo_driver_t;
struct drm_drawable_list {
drm_drawable_info_t info;
};
#include "drm_objects.h"
/**
* DRM driver structure. This structure represent the common code for
@ -719,6 +652,8 @@ struct drm_driver {
void (*reclaim_buffers) (struct drm_device *dev, struct file * filp);
void (*reclaim_buffers_locked) (struct drm_device *dev,
struct file * filp);
void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
struct file * filp);
unsigned long (*get_map_ofs) (drm_map_t * map);
unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
@ -755,70 +690,6 @@ typedef struct drm_head {
struct class_device *dev_class;
} drm_head_t;
typedef struct drm_cache {
/*
* Memory caches
*/
kmem_cache_t *mm;
kmem_cache_t *fence_object;
} drm_cache_t;
typedef struct drm_fence_driver{
int no_types;
uint32_t wrap_diff;
uint32_t flush_diff;
uint32_t sequence_mask;
int lazy_capable;
int (*emit) (struct drm_device *dev, uint32_t flags,
uint32_t *breadcrumb,
uint32_t *native_type);
void (*poke_flush) (struct drm_device *dev);
} drm_fence_driver_t;
#define _DRM_FENCE_TYPE_EXE 0x00
typedef struct drm_fence_manager{
int initialized;
rwlock_t lock;
/*
* The list below should be maintained in sequence order and
* access is protected by the above spinlock.
*/
struct list_head ring;
struct list_head *fence_types[32];
volatile uint32_t pending_flush;
wait_queue_head_t fence_queue;
int pending_exe_flush;
uint32_t last_exe_flush;
uint32_t exe_flush_sequence;
atomic_t count;
} drm_fence_manager_t;
typedef struct drm_buffer_manager{
struct mutex init_mutex;
int nice_mode;
int initialized;
drm_file_t *last_to_validate;
int has_type[DRM_BO_MEM_TYPES];
int use_type[DRM_BO_MEM_TYPES];
drm_mm_t manager[DRM_BO_MEM_TYPES];
struct list_head lru[DRM_BO_MEM_TYPES];
struct list_head pinned[DRM_BO_MEM_TYPES];
struct list_head unfenced;
struct list_head ddestroy;
struct work_struct wq;
uint32_t fence_type;
unsigned long cur_pages;
atomic_t count;
} drm_buffer_manager_t;
/**
* DRM device structure. This structure represent a complete card that
@ -856,15 +727,14 @@ typedef struct drm_device {
/** \name Authentication */
/*@{ */
drm_file_t *file_first; /**< file list head */
drm_file_t *file_last; /**< file list tail */
struct list_head filelist;
drm_open_hash_t magiclist;
struct list_head magicfree;
/*@} */
/** \name Memory management */
/*@{ */
drm_map_list_t *maplist; /**< Linked list of regions */
struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
drm_open_hash_t map_hash; /**< User token hash table for maps */
drm_mm_t offset_manager; /**< User token manager */
@ -874,14 +744,13 @@ typedef struct drm_device {
/** \name Context handle management */
/*@{ */
drm_ctx_list_t *ctxlist; /**< Linked list of context handles */
struct list_head ctxlist; /**< Linked list of context handles */
int ctx_count; /**< Number of context handles */
struct mutex ctxlist_mutex; /**< For ctxlist */
drm_map_t **context_sareas; /**< per-context SAREA's */
int max_context;
struct idr ctx_idr;
drm_vma_entry_t *vmalist; /**< List of vmas (for debugging) */
struct list_head vmalist; /**< List of vmas (for debugging) */
drm_lock_data_t lock; /**< Information on hardware lock */
/*@} */
@ -908,11 +777,8 @@ typedef struct drm_device {
unsigned long last_switch; /**< jiffies at last context switch */
/*@} */
#if !HAS_WORKQUEUE
struct tq_struct tq;
#else
struct work_struct work;
#endif
/** \name VBLANK IRQ support */
/*@{ */
@ -920,8 +786,8 @@ typedef struct drm_device {
atomic_t vbl_received;
atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs; /**< signal list to send on VBLANK */
drm_vbl_sig_t vbl_sigs2; /**< signals to send on secondary VBLANK */
struct list_head vbl_sigs; /**< signal list to send on VBLANK */
struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
unsigned int vbl_pending;
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
void (*locked_tasklet_func)(struct drm_device *dev);
@ -940,14 +806,9 @@ typedef struct drm_device {
int pci_vendor; /**< PCI vendor id */
int pci_device; /**< PCI device id */
#ifdef __alpha__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
struct pci_controler *hose;
#else
struct pci_controller *hose;
#endif
#endif
drm_sg_mem_t *sg; /**< Scatter gather memory */
unsigned long *ctx_bitmap; /**< context bitmap */
void *dev_private; /**< device private data */
drm_sigdata_t sigdata; /**< For block_all_signals */
sigset_t sigmask;
@ -963,81 +824,19 @@ typedef struct drm_device {
/** \name Drawable information */
/*@{ */
spinlock_t drw_lock;
unsigned int drw_bitfield_length;
u32 *drw_bitfield;
unsigned int drw_info_length;
drm_drawable_info_t **drw_info;
struct idr drw_idr;
/*@} */
} drm_device_t;
#if __OS_HAS_AGP
typedef struct drm_agp_ttm_priv {
typedef struct drm_agp_ttm_backend {
drm_ttm_backend_t backend;
DRM_AGP_MEM *mem;
struct agp_bridge_data *bridge;
unsigned alloc_type;
unsigned cached_type;
unsigned uncached_type;
int populated;
} drm_agp_ttm_priv;
} drm_agp_ttm_backend_t;
#endif
typedef struct drm_fence_object{
drm_user_object_t base;
atomic_t usage;
/*
* The below three fields are protected by the fence manager spinlock.
*/
struct list_head ring;
int class;
uint32_t native_type;
uint32_t type;
uint32_t signaled;
uint32_t sequence;
uint32_t flush_mask;
uint32_t submitted_flush;
} drm_fence_object_t;
typedef struct drm_buffer_object{
drm_device_t *dev;
drm_user_object_t base;
/*
* If there is a possibility that the usage variable is zero,
* then dev->struct_mutext should be locked before incrementing it.
*/
atomic_t usage;
drm_ttm_object_t *ttm_object;
drm_ttm_t *ttm;
unsigned long num_pages;
unsigned long buffer_start;
drm_bo_type_t type;
unsigned long offset;
uint32_t page_alignment;
atomic_t mapped;
uint32_t flags;
uint32_t mask;
drm_mm_node_t *node_ttm; /* MM node for on-card RAM */
drm_mm_node_t *node_card; /* MM node for ttm*/
struct list_head lru_ttm; /* LRU for the ttm pages*/
struct list_head lru_card; /* For memory types with on-card RAM */
struct list_head ddestroy;
uint32_t fence_type;
uint32_t fence_class;
drm_fence_object_t *fence;
uint32_t priv_flags;
wait_queue_head_t event_queue;
struct mutex mutex;
} drm_buffer_object_t;
#define _DRM_BO_FLAG_UNFENCED 0x00000001
#define _DRM_BO_FLAG_EVICTED 0x00000002
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
@ -1046,7 +845,7 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
}
#ifdef __alpha__
#define drm_get_pci_domain(dev) dev->hose->bus->number
#define drm_get_pci_domain(dev) dev->hose->index
#else
#define drm_get_pci_domain(dev) 0
#endif
@ -1094,6 +893,7 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
}
#define drm_core_has_MTRR(dev) (0)
#define DRM_MTRR_WC 0
#endif
@ -1147,7 +947,8 @@ extern void drm_query_memctl(drm_u64_t *cur_used,
drm_u64_t *low_threshold,
drm_u64_t *high_threshold);
extern void drm_init_memctl(size_t low_threshold,
size_t high_threshold);
size_t high_threshold,
size_t unit_size);
/* Misc. IOCTL support (drm_ioctl.h) */
extern int drm_irq_by_busid(struct inode *inode, struct file *filp,
@ -1201,6 +1002,7 @@ extern int drm_update_drawable_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev,
drm_drawable_t id);
extern void drm_drawable_free_all(drm_device_t *dev);
/* Authentication IOCTL support (drm_auth.h) */
extern int drm_getmagic(struct inode *inode, struct file *filp,
@ -1213,9 +1015,11 @@ extern int drm_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context);
extern int drm_lock_free(drm_device_t * dev,
__volatile__ unsigned int *lock, unsigned int context);
extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context);
extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context);
extern void drm_idlelock_take(drm_lock_data_t *lock_data);
extern void drm_idlelock_release(drm_lock_data_t *lock_data);
/*
* These are exported to drivers so that they can implement fencing using
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
@ -1308,8 +1112,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
drm_ttm_backend_t *backend);
extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev);
/* Stub support (drm_stub.h) */
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
@ -1318,10 +1121,11 @@ extern int drm_put_head(drm_head_t * head);
extern unsigned int drm_debug; /* 1 to enable debug output */
extern unsigned int drm_cards_limit;
extern drm_head_t **drm_heads;
extern drm_cache_t drm_cache;
extern struct drm_sysfs_class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
/* Proc support (drm_proc.h) */
extern int drm_proc_init(drm_device_t * dev,
int minor,
@ -1377,133 +1181,14 @@ static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)
return block->mm;
}
/*
* User space object bookkeeping (drm_object.c)
*/
/*
* Must be called with the struct_mutex held.
*/
extern int drm_add_user_object(drm_file_t *priv, drm_user_object_t *item,
/*
* Must be called with the struct_mutex held.
*/
int shareable);
extern drm_user_object_t *drm_lookup_user_object(drm_file_t *priv, uint32_t key);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_user_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
extern int drm_remove_user_object(drm_file_t *priv, drm_user_object_t *item);
/*
* Must be called with the struct_mutex held. May temporarily release it.
*/
extern int drm_add_ref_object(drm_file_t *priv, drm_user_object_t *referenced_object,
drm_ref_t ref_action);
/*
* Must be called with the struct_mutex held.
*/
drm_ref_object_t *drm_lookup_ref_object(drm_file_t *priv,
drm_user_object_t *referenced_object,
drm_ref_t ref_action);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
extern void drm_remove_ref_object(drm_file_t *priv, drm_ref_object_t *item);
extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type,
drm_user_object_t **object);
extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type);
/*
* fence objects (drm_fence.c)
*/
extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type);
extern void drm_fence_manager_init(drm_device_t *dev);
extern void drm_fence_manager_takedown(drm_device_t *dev);
extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence);
extern int drm_fence_object_flush(drm_device_t * dev,
volatile drm_fence_object_t * fence,
uint32_t type);
extern int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
uint32_t type);
extern void drm_fence_usage_deref_locked(drm_device_t * dev,
drm_fence_object_t * fence);
extern void drm_fence_usage_deref_unlocked(drm_device_t * dev,
drm_fence_object_t * fence);
extern int drm_fence_object_wait(drm_device_t * dev,
volatile drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask);
extern int drm_fence_object_create(drm_device_t *dev, uint32_t type,
uint32_t fence_flags,
drm_fence_object_t **c_fence);
extern int drm_fence_add_user_object(drm_file_t *priv,
drm_fence_object_t *fence,
int shareable);
extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
/*
* buffer objects (drm_bo.c)
*/
extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
extern int drm_bo_driver_finish(drm_device_t *dev);
extern int drm_bo_driver_init(drm_device_t *dev);
extern int drm_fence_buffer_objects(drm_file_t * priv,
struct list_head *list,
uint32_t fence_flags,
drm_fence_object_t *fence,
drm_fence_object_t **used_fence);
/* Inline replacements for DRM_IOREMAP macros */
static __inline__ void drm_core_ioremap(struct drm_map *map,
struct drm_device *dev)
{
map->handle = drm_ioremap(map->offset, map->size, dev);
}
static __inline__ void drm_core_ioremap_nocache(struct drm_map *map,
struct drm_device *dev)
{
map->handle = drm_ioremap_nocache(map->offset, map->size, dev);
}
static __inline__ void drm_core_ioremapfree(struct drm_map *map,
struct drm_device *dev)
{
if (map->handle && map->size)
drm_ioremapfree(map->handle, map->size, dev);
}
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
unsigned int token)
{
drm_map_list_t *_entry;
list_for_each_entry(_entry, &dev->maplist->head, head)
list_for_each_entry(_entry, &dev->maplist, head)
if (_entry->user_token == token)
return _entry->map;
return NULL;
@ -1581,25 +1266,6 @@ static inline void drm_ctl_free(void *pt, size_t size, int area)
drm_free_memctl(size);
}
static inline void *drm_ctl_cache_alloc(kmem_cache_t *cache, size_t size,
int flags)
{
void *ret;
if (drm_alloc_memctl(size))
return NULL;
ret = kmem_cache_alloc(cache, flags);
if (!ret)
drm_free_memctl(size);
return ret;
}
static inline void drm_ctl_cache_free(kmem_cache_t *cache, size_t size,
void *obj)
{
kmem_cache_free(cache, obj);
drm_free_memctl(size);
}
/*@}*/
#endif /* __KERNEL__ */

View file

@ -106,10 +106,6 @@ int drm_agp_acquire(drm_device_t * dev)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
#ifndef VMAP_4_ARGS
if (dev->agp->cant_use_aperture)
return -EINVAL;
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
if ((retcode = agp_backend_acquire()))
return retcode;
@ -253,11 +249,7 @@ int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
entry->prev = NULL;
entry->next = dev->agp->memory;
if (dev->agp->memory)
dev->agp->memory->prev = entry;
dev->agp->memory = entry;
list_add(&entry->head, &dev->agp->memory);
request->handle = entry->handle;
request->physical = memory->physical;
@ -284,10 +276,12 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
return err;
if (copy_to_user(argp, &request, sizeof(request))) {
drm_agp_mem_t *entry = dev->agp->memory;
dev->agp->memory = entry->next;
dev->agp->memory->prev = NULL;
drm_agp_mem_t *entry;
list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == request.handle)
break;
}
list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -EFAULT;
@ -310,7 +304,7 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
{
drm_agp_mem_t *entry;
for (entry = dev->agp->memory; entry; entry = entry->next) {
list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == handle)
return entry;
}
@ -439,13 +433,7 @@ int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
if (entry->bound)
drm_unbind_agp(entry->memory);
if (entry->prev)
entry->prev->next = entry->next;
else
dev->agp->memory = entry->next;
if (entry->next)
entry->next->prev = entry->prev;
list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
@ -506,7 +494,7 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev)
drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
return NULL;
}
head->memory = NULL;
INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
return head;
@ -563,6 +551,8 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
#define AGP_USER_MEMORY (AGP_USER_TYPES)
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
#endif
#define AGP_REQUIRED_MAJOR 0
#define AGP_REQUIRED_MINOR 102
static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) {
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
@ -572,7 +562,8 @@ static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) {
static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
struct page **pages) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
drm_agp_ttm_backend_t *agp_be =
container_of(backend, drm_agp_ttm_backend_t, backend);
struct page **cur_page, **last_page = pages + num_pages;
DRM_AGP_MEM *mem;
@ -581,9 +572,9 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
DRM_DEBUG("drm_agp_populate_ttm\n");
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
mem = drm_agp_allocate_memory(num_pages, agp_priv->alloc_type);
mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
#else
mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type);
mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
#endif
if (!mem) {
drm_free_memctl(num_pages *sizeof(void *));
@ -595,7 +586,7 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
for (cur_page = pages; cur_page < last_page; ++cur_page) {
mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
}
agp_priv->mem = mem;
agp_be->mem = mem;
return 0;
}
@ -603,110 +594,114 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
unsigned long offset,
int cached)
{
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
DRM_AGP_MEM *mem = agp_priv->mem;
drm_agp_ttm_backend_t *agp_be =
container_of(backend, drm_agp_ttm_backend_t, backend);
DRM_AGP_MEM *mem = agp_be->mem;
int ret;
DRM_DEBUG("drm_agp_bind_ttm\n");
DRM_MASK_VAL(backend->flags, DRM_BE_FLAG_BOUND_CACHED,
(cached) ? DRM_BE_FLAG_BOUND_CACHED : 0);
mem->is_flushed = TRUE;
mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY :
AGP_USER_MEMORY;
ret = drm_agp_bind_memory(mem, offset);
if (ret) {
DRM_ERROR("AGP Bind memory failed\n");
}
DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0,
DRM_BE_FLAG_BOUND_CACHED);
return ret;
}
static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
drm_agp_ttm_backend_t *agp_be =
container_of(backend, drm_agp_ttm_backend_t, backend);
DRM_DEBUG("drm_agp_unbind_ttm\n");
if (agp_priv->mem->is_bound)
return drm_agp_unbind_memory(agp_priv->mem);
if (agp_be->mem->is_bound)
return drm_agp_unbind_memory(agp_be->mem);
else
return 0;
}
static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
DRM_AGP_MEM *mem = agp_priv->mem;
drm_agp_ttm_backend_t *agp_be =
container_of(backend, drm_agp_ttm_backend_t, backend);
DRM_AGP_MEM *mem = agp_be->mem;
DRM_DEBUG("drm_agp_clear_ttm\n");
if (mem) {
unsigned long num_pages = mem->page_count;
backend->unbind(backend);
backend->func->unbind(backend);
agp_free_memory(mem);
drm_free_memctl(num_pages *sizeof(void *));
}
agp_priv->mem = NULL;
agp_be->mem = NULL;
}
static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv;
drm_agp_ttm_backend_t *agp_be;
if (backend) {
DRM_DEBUG("drm_agp_destroy_ttm\n");
agp_priv = (drm_agp_ttm_priv *) backend->private;
if (agp_priv) {
if (agp_priv->mem) {
backend->clear(backend);
agp_be = container_of(backend, drm_agp_ttm_backend_t, backend);
if (agp_be) {
if (agp_be->mem) {
backend->func->clear(backend);
}
drm_ctl_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
backend->private = NULL;
}
if (backend->flags & DRM_BE_FLAG_NEEDS_FREE) {
drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM);
}
}
}
static drm_ttm_backend_func_t agp_ttm_backend =
{
.needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
.populate = drm_agp_populate,
.clear = drm_agp_clear_ttm,
.bind = drm_agp_bind_ttm,
.unbind = drm_agp_unbind_ttm,
.destroy = drm_agp_destroy_ttm,
};
drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
drm_ttm_backend_t *backend)
drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev)
{
drm_ttm_backend_t *agp_be;
drm_agp_ttm_priv *agp_priv;
drm_agp_ttm_backend_t *agp_be;
struct agp_kern_info *info;
agp_be = (backend != NULL) ? backend:
drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
if (!agp_be)
if (!dev->agp) {
DRM_ERROR("AGP is not initialized.\n");
return NULL;
}
info = &dev->agp->agp_info;
agp_priv = drm_ctl_calloc(1, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
if (!agp_priv) {
drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
if (info->version.major != AGP_REQUIRED_MAJOR ||
info->version.minor < AGP_REQUIRED_MINOR) {
DRM_ERROR("Wrong agpgart version %d.%d\n"
"\tYou need at least version %d.%d.\n",
info->version.major,
info->version.minor,
AGP_REQUIRED_MAJOR,
AGP_REQUIRED_MINOR);
return NULL;
}
agp_priv->mem = NULL;
agp_priv->alloc_type = AGP_USER_MEMORY;
agp_priv->cached_type = AGP_USER_CACHED_MEMORY;
agp_priv->uncached_type = AGP_USER_MEMORY;
agp_priv->bridge = dev->agp->bridge;
agp_priv->populated = FALSE;
agp_be->aperture_base = dev->agp->agp_info.aper_base;
agp_be->private = (void *) agp_priv;
agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust;
agp_be->populate = drm_agp_populate;
agp_be->clear = drm_agp_clear_ttm;
agp_be->bind = drm_agp_bind_ttm;
agp_be->unbind = drm_agp_unbind_ttm;
agp_be->destroy = drm_agp_destroy_ttm;
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE,
(backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0);
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CBA,
(dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CBA : 0);
agp_be->drm_map_type = _DRM_AGP;
return agp_be;
agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
if (!agp_be)
return NULL;
agp_be->mem = NULL;
agp_be->bridge = dev->agp->bridge;
agp_be->populated = FALSE;
agp_be->backend.func = &agp_ttm_backend;
agp_be->backend.mem_type = DRM_BO_MEM_TT;
return &agp_be->backend;
}
EXPORT_SYMBOL(drm_agp_init_ttm);

File diff suppressed because it is too large Load diff

411
linux-core/drm_bo_move.c Normal file
View file

@ -0,0 +1,411 @@
/**************************************************************************
*
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
/**
* Free the old memory node unless it's a pinned region and we
* have not been requested to free also pinned regions.
*/
static void drm_bo_free_old_node(drm_buffer_object_t * bo)
{
drm_bo_mem_reg_t *old_mem = &bo->mem;
if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
mutex_lock(&bo->dev->struct_mutex);
drm_mm_put_block(old_mem->mm_node);
old_mem->mm_node = NULL;
mutex_unlock(&bo->dev->struct_mutex);
}
old_mem->mm_node = NULL;
}
int drm_bo_move_ttm(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
{
drm_ttm_t *ttm = bo->ttm;
drm_bo_mem_reg_t *old_mem = &bo->mem;
uint32_t save_flags = old_mem->flags;
uint32_t save_mask = old_mem->mask;
int ret;
if (old_mem->mem_type == DRM_BO_MEM_TT) {
if (evict)
drm_ttm_evict(ttm);
else
drm_ttm_unbind(ttm);
drm_bo_free_old_node(bo);
DRM_FLAG_MASKED(old_mem->flags,
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
old_mem->mem_type = DRM_BO_MEM_LOCAL;
save_flags = old_mem->flags;
}
if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
ret = drm_bind_ttm(ttm,
new_mem->flags & DRM_BO_FLAG_CACHED,
new_mem->mm_node->start);
if (ret)
return ret;
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->mask = save_mask;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
EXPORT_SYMBOL(drm_bo_move_ttm);
/**
* \c Return a kernel virtual address to the buffer object PCI memory.
*
* \param bo The buffer object.
* \return Failure indication.
*
* Returns -EINVAL if the buffer object is currently not mappable.
* Returns -ENOMEM if the ioremap operation failed.
* Otherwise returns zero.
*
* After a successfull call, bo->iomap contains the virtual address, or NULL
* if the buffer object content is not accessible through PCI space.
* Call bo->mutex locked.
*/
int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
void **virtual)
{
drm_buffer_manager_t *bm = &dev->bm;
drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
unsigned long bus_offset;
unsigned long bus_size;
unsigned long bus_base;
int ret;
void *addr;
*virtual = NULL;
ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
if (ret || bus_size == 0)
return ret;
if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
addr = (void *)(((u8 *) man->io_addr) + bus_offset);
else {
addr = ioremap_nocache(bus_base + bus_offset, bus_size);
if (!addr)
return -ENOMEM;
}
*virtual = addr;
return 0;
}
/**
* \c Unmap mapping obtained using drm_bo_ioremap
*
* \param bo The buffer object.
*
* Call bo->mutex locked.
*/
void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
void *virtual)
{
drm_buffer_manager_t *bm;
drm_mem_type_manager_t *man;
bm = &dev->bm;
man = &bm->man[mem->mem_type];
if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
iounmap(virtual);
}
}
static int drm_copy_io_page(void *dst, void *src, unsigned long page)
{
uint32_t *dstP =
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
uint32_t *srcP =
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
int i;
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
iowrite32(ioread32(srcP++), dstP++);
return 0;
}
static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page)
{
struct page *d = drm_ttm_get_page(ttm, page);
void *dst;
if (!d)
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
dst = kmap(d);
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
kunmap(d);
return 0;
}
static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page)
{
struct page *s = drm_ttm_get_page(ttm, page);
void *src;
if (!s)
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
src = kmap(s);
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
kunmap(s);
return 0;
}
int drm_bo_move_memcpy(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
{
drm_device_t *dev = bo->dev;
drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
drm_ttm_t *ttm = bo->ttm;
drm_bo_mem_reg_t *old_mem = &bo->mem;
drm_bo_mem_reg_t old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
uint32_t save_flags = old_mem->flags;
uint32_t save_mask = old_mem->mask;
unsigned long i;
unsigned long page;
unsigned long add = 0;
int dir;
ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
if (ret)
return ret;
ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
if (ret)
goto out;
if (old_iomap == NULL && new_iomap == NULL)
goto out2;
if (old_iomap == NULL && ttm == NULL)
goto out2;
add = 0;
dir = 1;
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->mm_node->start <
old_mem->mm_node->start + old_mem->mm_node->size)) {
dir = -1;
add = new_mem->num_pages - 1;
}
for (i = 0; i < new_mem->num_pages; ++i) {
page = i * dir + add;
if (old_iomap == NULL)
ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
else if (new_iomap == NULL)
ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
else
ret = drm_copy_io_page(new_iomap, old_iomap, page);
if (ret)
goto out1;
}
mb();
out2:
drm_bo_free_old_node(bo);
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->mask = save_mask;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
drm_ttm_unbind(ttm);
drm_destroy_ttm(ttm);
bo->ttm = NULL;
}
out1:
drm_mem_reg_iounmap(dev, new_mem, new_iomap);
out:
drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
return ret;
}
EXPORT_SYMBOL(drm_bo_move_memcpy);
/*
* Transfer a buffer object's memory and LRU status to a newly
* created object. User-space references remains with the old
* object. Call bo->mutex locked.
*/
int drm_buffer_object_transfer(drm_buffer_object_t * bo,
drm_buffer_object_t ** new_obj)
{
drm_buffer_object_t *fbo;
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
if (!fbo)
return -ENOMEM;
*fbo = *bo;
mutex_init(&fbo->mutex);
mutex_lock(&fbo->mutex);
mutex_lock(&dev->struct_mutex);
DRM_INIT_WAITQUEUE(&bo->event_queue);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->pinned_lru);
#ifdef DRM_ODD_MM_COMPAT
INIT_LIST_HEAD(&fbo->vma_list);
INIT_LIST_HEAD(&fbo->p_mm_list);
#endif
atomic_inc(&bo->fence->usage);
fbo->pinned_node = NULL;
fbo->mem.mm_node->private = (void *)fbo;
atomic_set(&fbo->usage, 1);
atomic_inc(&bm->count);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&fbo->mutex);
*new_obj = fbo;
return 0;
}
/*
* Since move is underway, we need to block signals in this function.
* We cannot restart until it has finished.
*/
int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
int evict,
int no_wait,
uint32_t fence_class,
uint32_t fence_type,
uint32_t fence_flags, drm_bo_mem_reg_t * new_mem)
{
drm_device_t *dev = bo->dev;
drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
drm_bo_mem_reg_t *old_mem = &bo->mem;
int ret;
uint32_t save_flags = old_mem->flags;
uint32_t save_mask = old_mem->mask;
drm_buffer_object_t *old_obj;
if (bo->fence)
drm_fence_usage_deref_unlocked(dev, bo->fence);
ret = drm_fence_object_create(dev, fence_class, fence_type,
fence_flags | DRM_FENCE_FLAG_EMIT,
&bo->fence);
if (ret)
return ret;
#ifdef DRM_ODD_MM_COMPAT
/*
* In this mode, we don't allow pipelining a copy blit,
* since the buffer will be accessible from user space
* the moment we return and rebuild the page tables.
*
* With normal vm operation, page tables are rebuilt
* on demand using fault(), which waits for buffer idle.
*/
if (1)
#else
if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
bo->mem.mm_node != NULL))
#endif
{
ret = drm_bo_wait(bo, 0, 1, 0);
if (ret)
return ret;
drm_bo_free_old_node(bo);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
drm_ttm_unbind(bo->ttm);
drm_destroy_ttm(bo->ttm);
bo->ttm = NULL;
}
} else {
/* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
* and leave it to be released when the GPU
* operation has completed.
*/
ret = drm_buffer_object_transfer(bo, &old_obj);
if (ret)
return ret;
if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
old_obj->ttm = NULL;
else
bo->ttm = NULL;
mutex_lock(&dev->struct_mutex);
list_del_init(&old_obj->lru);
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
drm_bo_add_to_lru(old_obj);
drm_bo_usage_deref_locked(old_obj);
mutex_unlock(&dev->struct_mutex);
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->mask = save_mask;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);

View file

@ -51,10 +51,8 @@ EXPORT_SYMBOL(drm_get_resource_len);
static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
drm_local_map_t *map)
{
struct list_head *list;
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
drm_map_list_t *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && map->type == entry->map->type &&
((entry->map->offset == map->offset) ||
(map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
@ -179,7 +177,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
}
}
if (map->type == _DRM_REGISTERS)
map->handle = drm_ioremap(map->offset, map->size, dev);
map->handle = ioremap(map->offset, map->size);
break;
case _DRM_SHM:
list = drm_find_matching_map(dev, map);
@ -195,7 +193,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
*maplist = list;
return 0;
}
map->handle = vmalloc_32(map->size);
map->handle = vmalloc_user(map->size);
DRM_DEBUG("%lu %d %p\n",
map->size, drm_order(map->size), map->handle);
if (!map->handle) {
@ -237,14 +235,14 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
* skipped and we double check that dev->agp->memory is
* actually set as well as being invalid before EPERM'ing
*/
for (entry = dev->agp->memory; entry; entry = entry->next) {
list_for_each_entry(entry, &dev->agp->memory, head) {
if ((map->offset >= entry->bound) &&
(map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
if (dev->agp->memory && !valid) {
if (!list_empty(&dev->agp->memory) && !valid) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EPERM;
}
@ -279,6 +277,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
if (!list) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
}
@ -286,7 +286,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
list->map = map;
mutex_lock(&dev->struct_mutex);
list_add(&list->head, &dev->maplist->head);
list_add(&list->head, &dev->maplist);
/* Assign a 32-bit handle */
@ -295,6 +295,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
ret = drm_map_handle(dev, &list->hash, user_token, 0);
if (ret) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
mutex_unlock(&dev->struct_mutex);
@ -376,33 +378,32 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
*/
int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
{
struct list_head *list;
drm_map_list_t *r_list = NULL;
drm_map_list_t *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
int found = 0;
/* Find the list entry for the map and remove it */
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
list_del(list);
list_del(&r_list->head);
drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
found = 1;
break;
}
}
if (!found) {
return -EINVAL;
}
/* List has wrapped around to the head pointer, or it's empty and we
* didn't find anything.
*/
if (list == (&dev->maplist->head)) {
return -EINVAL;
}
switch (map->type) {
case _DRM_REGISTERS:
drm_ioremapfree(map->handle, map->size, dev);
iounmap(map->handle);
/* FALLTHROUGH */
case _DRM_FRAME_BUFFER:
if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
@ -460,7 +461,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->head->dev;
drm_map_t request;
drm_local_map_t *map = NULL;
struct list_head *list;
drm_map_list_t *r_list;
int ret;
if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
@ -468,9 +469,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->user_token == (unsigned long)request.handle &&
r_list->map->flags & _DRM_REMOVABLE) {
@ -482,7 +481,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
/* List has wrapped around to the head pointer, or its empty we didn't
* find anything.
*/
if (list == (&dev->maplist->head)) {
if (list_empty(&dev->maplist) || !map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
@ -606,14 +605,14 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
/* Make sure buffers are located in AGP memory that we own */
valid = 0;
for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) {
list_for_each_entry(agp_entry, &dev->agp->memory, head) {
if ((agp_offset >= agp_entry->bound) &&
(agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
if (dev->agp->memory && !valid) {
if (!list_empty(&dev->agp->memory) && !valid) {
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}

View file

@ -79,54 +79,14 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
/*
* vm code for kernels below 2,6,15 in which version a major vm write
* vm code for kernels below 2.6.15 in which version a major vm write
* occured. This implement a simple straightforward
* version similar to what's going to be
* in kernel 2.6.20+?
* in kernel 2.6.19+
* Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
* nopfn.
*/
static int drm_pte_is_clear(struct vm_area_struct *vma,
unsigned long addr)
{
struct mm_struct *mm = vma->vm_mm;
int ret = 1;
pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
goto unlock;
pud = pud_offset(pgd, addr);
if (pud_none(*pud))
goto unlock;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
goto unlock;
pte = pte_offset_map(pmd, addr);
if (!pte)
goto unlock;
ret = pte_none(*pte);
pte_unmap(pte);
unlock:
spin_unlock(&mm->page_table_lock);
return ret;
}
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot)
{
int ret;
if (!drm_pte_is_clear(vma, addr))
return -EBUSY;
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
return ret;
}
static struct {
spinlock_t lock;
struct page *dummy_page;
@ -134,6 +94,11 @@ static struct {
} drm_np_retry =
{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data);
struct page * get_nopage_retry(void)
{
if (atomic_read(&drm_np_retry.present) == 0) {
@ -160,7 +125,7 @@ void free_nopage_retry(void)
}
}
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
@ -171,7 +136,7 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
data.address = address;
data.vma = vma;
drm_vm_ttm_fault(vma, &data);
drm_bo_vm_fault(vma, &data);
switch (data.type) {
case VM_FAULT_OOM:
return NOPAGE_OOM;
@ -186,10 +151,179 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
#endif
#if !defined(DRM_FULL_MM_COMPAT) && \
((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
static int drm_pte_is_clear(struct vm_area_struct *vma,
unsigned long addr)
{
struct mm_struct *mm = vma->vm_mm;
int ret = 1;
pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
goto unlock;
pud = pud_offset(pgd, addr);
if (pud_none(*pud))
goto unlock;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
goto unlock;
pte = pte_offset_map(pmd, addr);
if (!pte)
goto unlock;
ret = pte_none(*pte);
pte_unmap(pte);
unlock:
spin_unlock(&mm->page_table_lock);
return ret;
}
static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
int ret;
if (!drm_pte_is_clear(vma, addr))
return -EBUSY;
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
return ret;
}
static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data)
{
unsigned long address = data->address;
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
drm_ttm_t *ttm;
drm_device_t *dev;
unsigned long pfn;
int err;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
mutex_lock(&bo->mutex);
err = drm_bo_wait(bo, 0, 1, 0);
if (err) {
data->type = (err == -EAGAIN) ?
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
goto out_unlock;
}
/*
* If buffer happens to be in a non-mappable location,
* move it to a mappable.
*/
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
unsigned long _end = jiffies + 3*DRM_HZ;
uint32_t new_mask = bo->mem.mask |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
do {
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
} while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
if (err) {
DRM_ERROR("Timeout moving buffer to mappable location.\n");
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
}
if (address > vma->vm_end) {
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
dev = bo->dev;
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
&bus_size);
if (err) {
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
if (bus_size) {
drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
} else {
ttm = bo->ttm;
drm_ttm_fixup_caching(ttm);
page = drm_ttm_get_page(ttm, page_offset);
if (!page) {
data->type = VM_FAULT_OOM;
goto out_unlock;
}
pfn = page_to_pfn(page);
vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
drm_io_prot(_DRM_TTM, vma);
}
err = vm_insert_pfn(vma, address, pfn);
if (!err || err == -EBUSY)
data->type = VM_FAULT_MINOR;
else
data->type = VM_FAULT_OOM;
out_unlock:
mutex_unlock(&bo->mutex);
return NULL;
}
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
!defined(DRM_FULL_MM_COMPAT)
/**
*/
unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
unsigned long address)
{
struct fault_data data;
data.address = address;
(void) drm_bo_vm_fault(vma, &data);
if (data.type == VM_FAULT_OOM)
return NOPFN_OOM;
else if (data.type == VM_FAULT_SIGBUS)
return NOPFN_SIGBUS;
/*
* pfn already set.
*/
return 0;
}
#endif
#ifdef DRM_ODD_MM_COMPAT
/*
* VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
* VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
* workaround for a single BUG statement in do_no_page in these versions. The
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
@ -212,108 +346,100 @@ typedef struct vma_entry {
} vma_entry_t;
struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
drm_buffer_manager_t *bm;
drm_device_t *dev;
/*
* FIXME: Check can't map aperture flag.
*/
mutex_lock(&bo->mutex);
if (type)
*type = VM_FAULT_MINOR;
if (!map)
return NOPAGE_OOM;
if (address > vma->vm_end) {
page = NOPAGE_SIGBUS;
goto out_unlock;
}
if (address > vma->vm_end)
return NOPAGE_SIGBUS;
dev = bo->dev;
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
mutex_lock(&dev->struct_mutex);
drm_fixup_ttm_caching(ttm);
BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
if (drm_mem_reg_is_pci(dev, &bo->mem)) {
DRM_ERROR("Invalid compat nopage.\n");
page = NOPAGE_SIGBUS;
goto out_unlock;
}
bm = &dev->bm;
ttm = bo->ttm;
drm_ttm_fixup_caching(ttm);
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
page = drm_ttm_get_page(ttm, page_offset);
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
page = NOPAGE_OOM;
goto out;
}
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
if (!page) {
drm_free_memctl(PAGE_SIZE);
page = NOPAGE_OOM;
goto out;
}
++bm->cur_pages;
SetPageLocked(page);
goto out_unlock;
}
get_page(page);
out:
mutex_unlock(&dev->struct_mutex);
out_unlock:
mutex_unlock(&bo->mutex);
return page;
}
int drm_ttm_map_bound(struct vm_area_struct *vma)
int drm_bo_map_bound(struct vm_area_struct *vma)
{
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
int ret = 0;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
unsigned long pfn = ttm->aper_offset +
(ttm->be->aperture_base >> PAGE_SHIFT);
pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
&bus_offset, &bus_size);
BUG_ON(ret);
if (bus_size) {
drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type];
unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
vma->vm_end - vma->vm_start,
pgprot);
}
return ret;
}
int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n_entry;
vma_entry_t *v_entry;
drm_local_map_t *map = (drm_local_map_t *)
vma->vm_private_data;
struct mm_struct *mm = vma->vm_mm;
v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
if (!v_entry) {
DRM_ERROR("Allocation of vma pointer entry failed\n");
return -ENOMEM;
}
v_entry->vma = vma;
map->handle = (void *) v_entry;
list_add_tail(&v_entry->head, &ttm->vma_list);
list_for_each_entry(entry, &ttm->p_mm_list, head) {
list_add_tail(&v_entry->head, &bo->vma_list);
list_for_each_entry(entry, &bo->p_mm_list, head) {
if (mm == entry->mm) {
atomic_inc(&entry->refcount);
return 0;
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
}
n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
if (!n_entry) {
DRM_ERROR("Allocation of process mm pointer entry failed\n");
return -ENOMEM;
@ -327,29 +453,29 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
return 0;
}
void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n;
vma_entry_t *v_entry, *v_n;
int found = 0;
struct mm_struct *mm = vma->vm_mm;
list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
if (v_entry->vma == vma) {
found = 1;
list_del(&v_entry->head);
drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
break;
}
}
BUG_ON(!found);
list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
if (mm == entry->mm) {
if (atomic_add_negative(-1, &entry->refcount)) {
list_del(&entry->head);
BUG_ON(entry->locked);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
}
return;
}
@ -359,12 +485,12 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
int drm_ttm_lock_mm(drm_ttm_t * ttm)
int drm_bo_lock_kmm(drm_buffer_object_t * bo)
{
p_mm_entry_t *entry;
int lock_ok = 1;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(entry->locked);
if (!down_write_trylock(&entry->mm->mmap_sem)) {
lock_ok = 0;
@ -376,7 +502,7 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm)
if (lock_ok)
return 0;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
list_for_each_entry(entry, &bo->p_mm_list, head) {
if (!entry->locked)
break;
up_write(&entry->mm->mmap_sem);
@ -391,44 +517,164 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm)
return -EAGAIN;
}
void drm_ttm_unlock_mm(drm_ttm_t * ttm)
void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
{
p_mm_entry_t *entry;
list_for_each_entry(entry, &ttm->p_mm_list, head) {
list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(!entry->locked);
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
}
}
int drm_ttm_remap_bound(drm_ttm_t *ttm)
int drm_bo_remap_bound(drm_buffer_object_t *bo)
{
vma_entry_t *v_entry;
int ret = 0;
list_for_each_entry(v_entry, &ttm->vma_list, head) {
ret = drm_ttm_map_bound(v_entry->vma);
if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
list_for_each_entry(v_entry, &bo->vma_list, head) {
ret = drm_bo_map_bound(v_entry->vma);
if (ret)
break;
}
}
drm_ttm_unlock_mm(ttm);
return ret;
}
void drm_ttm_finish_unmap(drm_ttm_t *ttm)
void drm_bo_finish_unmap(drm_buffer_object_t *bo)
{
vma_entry_t *v_entry;
if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
return;
list_for_each_entry(v_entry, &ttm->vma_list, head) {
list_for_each_entry(v_entry, &bo->vma_list, head) {
v_entry->vma->vm_flags &= ~VM_PFNMAP;
}
drm_ttm_unlock_mm(ttm);
}
#endif
#ifdef DRM_IDR_COMPAT_FN
/* only called when idp->lock is held */
static void __free_layer(struct idr *idp, struct idr_layer *p)
{
p->ary[0] = idp->id_free;
idp->id_free = p;
idp->id_free_cnt++;
}
static void free_layer(struct idr *idp, struct idr_layer *p)
{
unsigned long flags;
/*
* Depends on the return element being zeroed.
*/
spin_lock_irqsave(&idp->lock, flags);
__free_layer(idp, p);
spin_unlock_irqrestore(&idp->lock, flags);
}
/**
* idr_for_each - iterate through all stored pointers
* @idp: idr handle
* @fn: function to be called for each pointer
* @data: data passed back to callback function
*
* Iterate over the pointers registered with the given idr. The
* callback function will be called for each pointer currently
* registered, passing the id, the pointer and the data pointer passed
* to this function. It is not safe to modify the idr tree while in
* the callback, so functions such as idr_get_new and idr_remove are
* not allowed.
*
* We check the return of @fn each time. If it returns anything other
* than 0, we break out and return that value.
*
* The caller must serialize idr_find() vs idr_get_new() and idr_remove().
*/
int idr_for_each(struct idr *idp,
int (*fn)(int id, void *p, void *data), void *data)
{
int n, id, max, error = 0;
struct idr_layer *p;
struct idr_layer *pa[MAX_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
p = idp->top;
max = 1 << n;
id = 0;
while (id < max) {
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
p = p->ary[(id >> n) & IDR_MASK];
}
if (p) {
error = fn(id, (void *)p, data);
if (error)
break;
}
id += 1 << n;
while (n < fls(id)) {
n += IDR_BITS;
p = *--paa;
}
}
return error;
}
EXPORT_SYMBOL(idr_for_each);
/**
* idr_remove_all - remove all ids from the given idr tree
* @idp: idr handle
*
* idr_destroy() only frees up unused, cached idp_layers, but this
* function will remove all id mappings and leave all idp_layers
* unused.
*
* A typical clean-up sequence for objects stored in an idr tree, will
* use idr_for_each() to free all objects, if necessay, then
* idr_remove_all() to remove all ids, and idr_destroy() to free
* up the cached idr_layers.
*/
void idr_remove_all(struct idr *idp)
{
int n, id, max, error = 0;
struct idr_layer *p;
struct idr_layer *pa[MAX_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
p = idp->top;
max = 1 << n;
id = 0;
while (id < max && !error) {
while (n > IDR_BITS && p) {
n -= IDR_BITS;
*paa++ = p;
p = p->ary[(id >> n) & IDR_MASK];
}
id += 1 << n;
while (n < fls(id)) {
if (p) {
memset(p, 0, sizeof *p);
free_layer(idp, p);
}
n += IDR_BITS;
p = *--paa;
}
}
idp->top = NULL;
idp->layers = 0;
}
EXPORT_SYMBOL(idr_remove_all);
#endif

View file

@ -31,7 +31,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <asm/agp.h>
#ifndef _DRM_COMPAT_H_
#define _DRM_COMPAT_H_
@ -57,6 +56,12 @@
#define module_param(name, type, perm)
#endif
/* older kernels had different irq args */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
#undef DRM_IRQ_ARGS
#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs
#endif
#ifndef list_for_each_safe
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
@ -80,92 +85,6 @@
pos = n, n = list_entry(n->member.next, typeof(*n), member))
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
static inline struct page *vmalloc_to_page(void *vmalloc_addr)
{
unsigned long addr = (unsigned long)vmalloc_addr;
struct page *page = NULL;
pgd_t *pgd = pgd_offset_k(addr);
pmd_t *pmd;
pte_t *ptep, pte;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, addr);
if (!pmd_none(*pmd)) {
preempt_disable();
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
if (pte_present(pte))
page = pte_page(pte);
pte_unmap(ptep);
preempt_enable();
}
}
return page;
}
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
#define down_write down
#define up_write up
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
#define DRM_PCI_DEV(pdev) &pdev->dev
#else
#define DRM_PCI_DEV(pdev) NULL
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
static inline unsigned iminor(struct inode *inode)
{
return MINOR(inode->i_rdev);
}
#define old_encode_dev(x) (x)
struct drm_sysfs_class;
struct class_simple;
struct device;
#define pci_dev_put(x) do {} while (0)
#define pci_get_subsys pci_find_subsys
static inline struct class_device *DRM(sysfs_device_add) (struct drm_sysfs_class
* cs, dev_t dev,
struct device *
device,
const char *fmt,
...) {
return NULL;
}
static inline void DRM(sysfs_device_remove) (dev_t dev) {
}
static inline void DRM(sysfs_destroy) (struct drm_sysfs_class * cs) {
}
static inline struct drm_sysfs_class *DRM(sysfs_create) (struct module * owner,
char *name) {
return NULL;
}
#ifndef pci_pretty_name
#define pci_pretty_name(x) x->name
#endif
struct drm_device;
static inline int radeon_create_i2c_busses(struct drm_device *dev)
{
return 0;
};
static inline void radeon_delete_i2c_busses(struct drm_device *dev)
{
};
#endif
#ifndef __user
#define __user
#endif
@ -178,22 +97,29 @@ static inline void radeon_delete_i2c_busses(struct drm_device *dev)
#define __GFP_COMP 0
#endif
#ifndef REMAP_PAGE_RANGE_5_ARGS
#define DRM_RPR_ARG(vma)
#else
#define DRM_RPR_ARG(vma) vma,
#if !defined(IRQF_SHARED)
#define IRQF_SHARED SA_SHIRQ
#endif
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
{
return remap_page_range(DRM_RPR_ARG(vma) from,
return remap_page_range(vma, from,
pfn << PAGE_SHIFT,
size,
pgprot);
}
static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
{
void *addr;
addr = kmalloc(size * nmemb, flags);
if (addr != NULL)
memset((void *)addr, 0, size * nmemb);
return addr;
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
@ -215,10 +141,6 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from
#define __x86_64__
#endif
#ifndef pci_pretty_name
#define pci_pretty_name(dev) ""
#endif
/* sysfs __ATTR macro */
#ifndef __ATTR
#define __ATTR(_name,_mode,_show,_store) { \
@ -228,14 +150,31 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
#define vmalloc_user(_size) ({void * tmp = vmalloc(_size); \
if (tmp) memset(tmp, 0, size); \
(tmp);})
#endif
#ifndef list_for_each_entry_safe_reverse
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member), \
n = list_entry(pos->member.prev, typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
#endif
#include <linux/mm.h>
#include <asm/page.h>
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
#define DRM_ODD_MM_COMPAT
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
#define DRM_FULL_MM_COMPAT
#endif
/*
@ -253,16 +192,9 @@ extern void drm_clear_vma(struct vm_area_struct *vma,
extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
/*
* These are similar to the current kernel gatt pages allocator, only that we
* want a struct page pointer instead of a virtual address. This allows for pages
* that are not in the kernel linear map.
*/
#define drm_alloc_gatt_pages(order) ({ \
void *_virt = alloc_gatt_pages(order); \
((_virt) ? virt_to_page(_virt) : NULL);})
#define drm_free_gatt_pages(pages, order) free_gatt_pages(page_address(pages), order)
#ifndef GFP_DMA32
#define GFP_DMA32 0
#endif
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
@ -280,18 +212,14 @@ extern int drm_map_page_into_agp(struct page *page);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
extern struct page *get_nopage_retry(void);
extern void free_nopage_retry(void);
struct fault_data;
extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
struct fault_data *data);
#define NOPAGE_REFAULT get_nopage_retry()
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
#ifndef DRM_FULL_MM_COMPAT
/*
* Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
* For now, just return a dummy page that we've allocated out of
* static space. The page will be put by do_nopage() since we've already
* filled out the pte.
@ -306,19 +234,20 @@ struct fault_data {
int type;
};
extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type);
#endif
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
!defined(DRM_FULL_MM_COMPAT)
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
unsigned long address);
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
#endif /* ndef DRM_FULL_MM_COMPAT */
#ifdef DRM_ODD_MM_COMPAT
struct drm_ttm;
struct drm_buffer_object;
/*
@ -326,13 +255,13 @@ struct drm_ttm;
* process mm pointer to the ttm mm list. Needs the ttm mutex.
*/
extern int drm_ttm_add_vma(struct drm_ttm * ttm,
extern int drm_bo_add_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
* Delete a vma and the corresponding mm pointer from the
* ttm lists. Needs the ttm mutex.
*/
extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
@ -342,12 +271,12 @@ extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
* schedule() and try again.
*/
extern int drm_ttm_lock_mm(struct drm_ttm * ttm);
extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
/*
* Unlock all relevant mmap_sems for a ttm.
*/
extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
/*
* If the ttm was bound to the aperture, this function shall be called
@ -357,7 +286,7 @@ extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
* releases the mmap_sems for this ttm.
*/
extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
/*
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
@ -366,14 +295,23 @@ extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
* releases the mmap_sems for this ttm.
*/
extern int drm_ttm_remap_bound(struct drm_ttm *ttm);
extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
/*
* Remap a vma for a bound ttm. Call with the ttm mutex held and
* the relevant mmap_sem locked.
*/
extern int drm_ttm_map_bound(struct vm_area_struct *vma);
extern int drm_bo_map_bound(struct vm_area_struct *vma);
#endif
/* fixme when functions are upstreamed */
#define DRM_IDR_COMPAT_FN
#ifdef DRM_IDR_COMPAT_FN
int idr_for_each(struct idr *idp,
int (*fn)(int id, void *p, void *data), void *data);
void idr_remove_all(struct idr *idp);
#endif
#endif

View file

@ -53,25 +53,21 @@
* \param ctx_handle context handle.
*
* Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
* in drm_device::context_sareas, while holding the drm_device::struct_mutex
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
{
if (ctx_handle < 0)
goto failed;
if (!dev->ctx_bitmap)
goto failed;
struct drm_ctx_sarea_list *ctx;
if (ctx_handle < DRM_MAX_CTXBITMAP) {
mutex_lock(&dev->struct_mutex);
clear_bit(ctx_handle, dev->ctx_bitmap);
dev->context_sareas[ctx_handle] = NULL;
mutex_unlock(&dev->struct_mutex);
return;
}
failed:
ctx = idr_find(&dev->ctx_idr, ctx_handle);
if (ctx) {
idr_remove(&dev->ctx_idr, ctx_handle);
drm_free(ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
} else
DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
mutex_unlock(&dev->struct_mutex);
return;
}
@ -81,62 +77,34 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
* \param dev DRM device.
* \return (non-negative) context handle on success or a negative number on failure.
*
* Find the first zero bit in drm_device::ctx_bitmap and (re)allocates
* drm_device::context_sareas to accommodate the new entry while holding the
* Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
static int drm_ctxbitmap_next(drm_device_t * dev)
{
int bit;
int new_id;
int ret;
struct drm_ctx_sarea_list *new_ctx;
if (!dev->ctx_bitmap)
new_ctx = drm_calloc(1, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
if (!new_ctx)
return -1;
again:
if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Out of memory expanding drawable idr\n");
drm_free(new_ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
return -ENOMEM;
}
mutex_lock(&dev->struct_mutex);
bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
if (bit < DRM_MAX_CTXBITMAP) {
set_bit(bit, dev->ctx_bitmap);
DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit);
if ((bit + 1) > dev->max_context) {
dev->max_context = (bit + 1);
if (dev->context_sareas) {
drm_map_t **ctx_sareas;
ret = idr_get_new_above(&dev->ctx_idr, new_ctx, DRM_RESERVED_CONTEXTS, &new_id);
if (ret == -EAGAIN) {
mutex_unlock(&dev->struct_mutex);
goto again;
}
ctx_sareas = drm_realloc(dev->context_sareas,
(dev->max_context -
1) *
sizeof(*dev->
context_sareas),
dev->max_context *
sizeof(*dev->
context_sareas),
DRM_MEM_MAPS);
if (!ctx_sareas) {
clear_bit(bit, dev->ctx_bitmap);
mutex_unlock(&dev->struct_mutex);
return -1;
}
dev->context_sareas = ctx_sareas;
dev->context_sareas[bit] = NULL;
} else {
/* max_context == 1 at this point */
dev->context_sareas =
drm_alloc(dev->max_context *
sizeof(*dev->context_sareas),
DRM_MEM_MAPS);
if (!dev->context_sareas) {
clear_bit(bit, dev->ctx_bitmap);
mutex_unlock(&dev->struct_mutex);
return -1;
}
dev->context_sareas[bit] = NULL;
}
}
mutex_unlock(&dev->struct_mutex);
return bit;
}
mutex_unlock(&dev->struct_mutex);
return -1;
return new_id;
}
/**
@ -144,31 +112,20 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
*
* \param dev DRM device.
*
* Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
* the drm_device::struct_mutex lock.
* Initialise the drm_device::ctx_idr
*/
int drm_ctxbitmap_init(drm_device_t * dev)
{
int i;
int temp;
idr_init(&dev->ctx_idr);
return 0;
}
mutex_lock(&dev->struct_mutex);
dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE,
DRM_MEM_CTXBITMAP);
if (dev->ctx_bitmap == NULL) {
mutex_unlock(&dev->struct_mutex);
return -ENOMEM;
}
memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE);
dev->context_sareas = NULL;
dev->max_context = -1;
mutex_unlock(&dev->struct_mutex);
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
temp = drm_ctxbitmap_next(dev);
DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
}
static int drm_ctx_sarea_free(int id, void *p, void *data)
{
struct drm_ctx_sarea_list *ctx_entry = p;
drm_free(ctx_entry, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
return 0;
}
@ -177,17 +134,14 @@ int drm_ctxbitmap_init(drm_device_t * dev)
*
* \param dev DRM device.
*
* Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
* the drm_device::struct_mutex lock.
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
void drm_ctxbitmap_cleanup(drm_device_t * dev)
{
mutex_lock(&dev->struct_mutex);
if (dev->context_sareas)
drm_free(dev->context_sareas,
sizeof(*dev->context_sareas) *
dev->max_context, DRM_MEM_MAPS);
drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP);
idr_for_each(&dev->ctx_idr, drm_ctx_sarea_free, NULL);
idr_remove_all(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
@ -206,7 +160,7 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev)
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
* Gets the map from drm_device::context_sareas with the handle specified and
* Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
int drm_getsareactx(struct inode *inode, struct file *filp,
@ -218,22 +172,24 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
drm_ctx_priv_map_t request;
drm_map_t *map;
drm_map_list_t *_entry;
struct drm_ctx_sarea_list *ctx_sarea;
if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
if (dev->max_context < 0
|| request.ctx_id >= (unsigned)dev->max_context) {
ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id);
if (!ctx_sarea) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
map = ctx_sarea->map;
map = dev->context_sareas[request.ctx_id];
mutex_unlock(&dev->struct_mutex);
request.handle = NULL;
list_for_each_entry(_entry, &dev->maplist->head,head) {
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
request.handle =
(void *)(unsigned long)_entry->user_token;
@ -258,7 +214,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
* \return zero on success or a negative number on failure.
*
* Searches the mapping specified in \p arg and update the entry in
* drm_device::context_sareas with it.
* drm_device::ctx_idr with it.
*/
int drm_setsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
@ -268,15 +224,14 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
drm_ctx_priv_map_t request;
drm_map_t *map = NULL;
drm_map_list_t *r_list = NULL;
struct list_head *list;
struct drm_ctx_sarea_list *ctx_sarea;
if (copy_from_user(&request,
(drm_ctx_priv_map_t __user *) arg, sizeof(request)))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
&& r_list->user_token == (unsigned long) request.handle)
goto found;
@ -289,11 +244,14 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
map = r_list->map;
if (!map)
goto bad;
if (dev->max_context < 0)
mutex_lock(&dev->struct_mutex);
ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id);
if (!ctx_sarea)
goto bad;
if (request.ctx_id >= (unsigned)dev->max_context)
goto bad;
dev->context_sareas[request.ctx_id] = map;
ctx_sarea->map = map;
mutex_unlock(&dev->struct_mutex);
return 0;
}
@ -449,7 +407,7 @@ int drm_addctx(struct inode *inode, struct file *filp,
ctx_entry->tag = priv;
mutex_lock(&dev->ctxlist_mutex);
list_add(&ctx_entry->head, &dev->ctxlist->head);
list_add(&ctx_entry->head, &dev->ctxlist);
++dev->ctx_count;
mutex_unlock(&dev->ctxlist_mutex);
@ -575,10 +533,10 @@ int drm_rmctx(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->ctxlist_mutex);
if (!list_empty(&dev->ctxlist->head)) {
if (!list_empty(&dev->ctxlist)) {
drm_ctx_list_t *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->handle == ctx.handle) {
list_del(&pos->head);
drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);

212
linux-core/drm_drawable.c Normal file
View file

@ -0,0 +1,212 @@
/**
* \file drm_drawable.c
* IOCTLs for drawables
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
* \author Michel Dänzer <michel@tungstengraphics.com>
*/
/*
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
/**
* Allocate drawable ID and memory to store information about it.
*/
int drm_adddraw(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
unsigned long irqflags;
struct drm_drawable_list *draw_info;
drm_draw_t draw;
int new_id = 0;
int ret;
draw_info = drm_calloc(1, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
if (!draw_info)
return -ENOMEM;
again:
if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Out of memory expanding drawable idr\n");
drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
return -ENOMEM;
}
spin_lock_irqsave(&dev->drw_lock, irqflags);
ret = idr_get_new_above(&dev->drw_idr, draw_info, 1, &new_id);
if (ret == -EAGAIN) {
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
goto again;
}
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
draw.handle = new_id;
DRM_DEBUG("%d\n", draw.handle);
DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw));
return 0;
}
/**
* Free drawable ID and memory to store information about it.
*/
int drm_rmdraw(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_draw_t draw;
unsigned long irqflags;
struct drm_drawable_list *draw_info;
DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data,
sizeof(draw));
draw_info = idr_find(&dev->drw_idr, draw.handle);
if (!draw_info) {
DRM_DEBUG("No such drawable %d\n", draw.handle);
return -EINVAL;
}
spin_lock_irqsave(&dev->drw_lock, irqflags);
idr_remove(&dev->drw_idr, draw.handle);
drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
DRM_DEBUG("%d\n", draw.handle);
return 0;
}
int drm_update_drawable_info(DRM_IOCTL_ARGS) {
DRM_DEVICE;
drm_update_draw_t update;
unsigned long irqflags;
drm_drawable_info_t *info;
drm_clip_rect_t *rects;
struct drm_drawable_list *draw_info;
int err;
DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data,
sizeof(update));
draw_info = idr_find(&dev->drw_idr, update.handle);
if (!draw_info) {
DRM_ERROR("No such drawable %d\n", update.handle);
return DRM_ERR(EINVAL);
}
info = &draw_info->info;
switch (update.type) {
case DRM_DRAWABLE_CLIPRECTS:
if (update.num != info->num_rects) {
rects = drm_alloc(update.num * sizeof(drm_clip_rect_t),
DRM_MEM_BUFS);
} else
rects = info->rects;
if (update.num && !rects) {
DRM_ERROR("Failed to allocate cliprect memory\n");
err = DRM_ERR(ENOMEM);
goto error;
}
if (update.num && DRM_COPY_FROM_USER(rects,
(drm_clip_rect_t __user *)
(unsigned long)update.data,
update.num *
sizeof(*rects))) {
DRM_ERROR("Failed to copy cliprects from userspace\n");
err = DRM_ERR(EFAULT);
goto error;
}
spin_lock_irqsave(&dev->drw_lock, irqflags);
if (rects != info->rects) {
drm_free(info->rects, info->num_rects *
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
}
info->rects = rects;
info->num_rects = update.num;
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
DRM_DEBUG("Updated %d cliprects for drawable %d\n",
info->num_rects, update.handle);
break;
default:
DRM_ERROR("Invalid update type %d\n", update.type);
return DRM_ERR(EINVAL);
}
return 0;
error:
if (rects != info->rects)
drm_free(rects, update.num * sizeof(drm_clip_rect_t),
DRM_MEM_BUFS);
return err;
}
/**
* Caller must hold the drawable spinlock!
*/
drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
struct drm_drawable_list *draw_info;
draw_info = idr_find(&dev->drw_idr, id);
if (!draw_info) {
DRM_DEBUG("No such drawable %d\n", id);
return NULL;
}
return &draw_info->info;
}
EXPORT_SYMBOL(drm_get_drawable_info);
static int drm_drawable_free(int idr, void *p, void *data)
{
struct drm_drawable_list *drw_entry = p;
drm_free(drw_entry->info.rects, drw_entry->info.num_rects *
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
drm_free(drw_entry, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
return 0;
}
void drm_drawable_free_all(drm_device_t *dev)
{
idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
idr_remove_all(&dev->drw_idr);
}

View file

@ -15,8 +15,6 @@
* #define DRIVER_DESC "Matrox G200/G400"
* #define DRIVER_DATE "20001127"
*
* #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls )
*
* #define drm_x mga_##x
* \endcode
*/
@ -127,7 +125,7 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
};
#define DRIVER_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
/**
@ -142,16 +140,17 @@ static drm_ioctl_desc_t drm_ioctls[] = {
int drm_lastclose(drm_device_t * dev)
{
drm_magic_entry_t *pt, *next;
drm_map_list_t *r_list;
drm_vma_entry_t *vma, *vma_next;
drm_map_list_t *r_list, *list_t;
drm_vma_entry_t *vma, *vma_temp;
int i;
DRM_DEBUG("\n");
if (drm_bo_driver_finish(dev)) {
DRM_ERROR("DRM memory manager still busy. "
"System is unstable. Please reboot.\n");
}
/*
* We can't do much about this function failing.
*/
drm_bo_driver_finish(dev);
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
@ -167,18 +166,9 @@ int drm_lastclose(drm_device_t * dev)
drm_irq_uninstall(dev);
/* Free drawable information memory */
for (i = 0; i < dev->drw_bitfield_length / sizeof(*dev->drw_bitfield);
i++) {
drm_drawable_info_t *info = drm_get_drawable_info(dev, i);
if (info) {
drm_free(info->rects, info->num_rects *
sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
drm_free(info, sizeof(*info), DRM_MEM_BUFS);
}
}
mutex_lock(&dev->struct_mutex);
drm_drawable_free_all(dev);
del_timer(&dev->timer);
if (dev->unique) {
@ -199,19 +189,17 @@ int drm_lastclose(drm_device_t * dev)
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
drm_agp_mem_t *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
intact until drv_cleanup is called. */
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
drm_agp_release(dev);
@ -225,20 +213,14 @@ int drm_lastclose(drm_device_t * dev)
}
/* Clear vma list (only built for debugging) */
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
list_del(&vma->head);
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
if (dev->maplist) {
while (!list_empty(&dev->maplist->head)) {
struct list_head *list = dev->maplist->head.next;
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
drm_rmmap_locked(dev, r_list->map);
}
r_list = NULL;
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
@ -373,13 +355,9 @@ static void drm_cleanup(drm_device_t * dev)
drm_lastclose(dev);
drm_fence_manager_takedown(dev);
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
}
if (!drm_fb_loaded)
pci_disable_device(dev->pdev);
@ -441,67 +419,37 @@ void drm_exit(struct drm_driver *driver)
EXPORT_SYMBOL(drm_exit);
/** File operations structure */
static struct file_operations drm_stub_fops = {
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
.open = drm_stub_open
};
static int drm_create_memory_caches(void)
{
drm_cache.mm = kmem_cache_create("drm_mm_node_t",
sizeof(drm_mm_node_t),
0,
SLAB_HWCACHE_ALIGN,
NULL,NULL);
if (!drm_cache.mm)
return -ENOMEM;
drm_cache.fence_object= kmem_cache_create("drm_fence_object_t",
sizeof(drm_fence_object_t),
0,
SLAB_HWCACHE_ALIGN,
NULL,NULL);
if (!drm_cache.fence_object)
return -ENOMEM;
return 0;
}
static void drm_free_mem_cache(kmem_cache_t *cache,
const char *name)
{
if (!cache)
return;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
if (kmem_cache_destroy(cache)) {
DRM_ERROR("Warning! DRM is leaking %s memory.\n",
name);
}
#else
kmem_cache_destroy(cache);
#endif
}
static void drm_free_memory_caches(void )
{
drm_free_mem_cache(drm_cache.fence_object, "fence object");
drm_cache.fence_object = NULL;
drm_free_mem_cache(drm_cache.mm, "memory manager block");
drm_cache.mm = NULL;
}
static int __init drm_core_init(void)
{
int ret;
struct sysinfo si;
unsigned long avail_memctl_mem;
unsigned long max_memctl_mem;
si_meminfo(&si);
drm_init_memctl(si.totalram/2, si.totalram*3/4);
ret = drm_create_memory_caches();
if (ret)
goto err_p1;
/*
* AGP only allows low / DMA32 memory ATM.
*/
avail_memctl_mem = si.totalram - si.totalhigh;
/*
* Avoid overflows
*/
max_memctl_mem = 1UL << (32 - PAGE_SHIFT);
max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
if (avail_memctl_mem >= max_memctl_mem)
avail_memctl_mem = max_memctl_mem;
drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit);
ret = -ENOMEM;
drm_cards_limit =
@ -539,13 +487,11 @@ err_p2:
unregister_chrdev(DRM_MAJOR, "drm");
drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
err_p1:
drm_free_memory_caches();
return ret;
}
static void __exit drm_core_exit(void)
{
drm_free_memory_caches();
remove_proc_entry("dri", NULL);
drm_sysfs_destroy(drm_class);
@ -622,21 +568,20 @@ int drm_ioctl(struct inode *inode, struct file *filp,
current->pid, cmd, nr, (long)old_encode_dev(priv->head->device),
priv->authenticated);
if (nr >= DRIVER_IOCTL_COUNT &&
(nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END))
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
else if (nr >= DRM_COMMAND_END || nr < DRM_COMMAND_BASE)
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
ioctl = &drm_ioctls[nr];
else
goto err_i1;
func = ioctl->func;
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) /* Local override? */
/* is there a local override? */
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
func = dev->driver->dma_ioctl;
if (!func) {
@ -656,3 +601,17 @@ err_i1:
return retcode;
}
EXPORT_SYMBOL(drm_ioctl);
drm_local_map_t *drm_getsarea(struct drm_device *dev)
{
drm_map_list_t *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && entry->map->type == _DRM_SHM &&
(entry->map->flags & _DRM_CONTAINS_LOCK)) {
return entry->map;
}
}
return NULL;
}
EXPORT_SYMBOL(drm_getsarea);

View file

@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -11,6 +11,10 @@
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
@ -19,11 +23,6 @@
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
@ -35,21 +34,40 @@
* Typically called by the IRQ handler.
*/
void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
void drm_fence_handler(drm_device_t * dev, uint32_t class,
uint32_t sequence, uint32_t type)
{
int wake = 0;
uint32_t diff;
uint32_t relevant;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
struct list_head *list, *prev;
drm_fence_object_t *fence;
struct list_head *head;
drm_fence_object_t *fence, *next;
int found = 0;
int is_exe = (type & DRM_FENCE_TYPE_EXE);
int ge_last_exe;
if (list_empty(&fm->ring))
diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
fc->pending_exe_flush = 0;
diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
ge_last_exe = diff < driver->wrap_diff;
if (ge_last_exe)
fc->pending_flush &= ~type;
if (is_exe && ge_last_exe) {
fc->last_exe_flush = sequence;
}
if (list_empty(&fc->ring))
return;
list_for_each_entry(fence, &fm->ring, ring) {
list_for_each_entry(fence, &fc->ring, ring) {
diff = (sequence - fence->sequence) & driver->sequence_mask;
if (diff > driver->wrap_diff) {
found = 1;
@ -57,11 +75,11 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
}
}
list = (found) ? fence->ring.prev : fm->ring.prev;
prev = list->prev;
head = (found) ? &fence->ring : &fc->ring;
for (; list != &fm->ring; list = prev, prev = list->prev) {
fence = list_entry(list, drm_fence_object_t, ring);
list_for_each_entry_safe_reverse(fence, next, head, ring) {
if (&fence->ring == &fc->ring)
break;
type |= fence->native_type;
relevant = type & fence->type;
@ -78,7 +96,7 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
~(fence->signaled | fence->submitted_flush);
if (relevant) {
fm->pending_flush |= relevant;
fc->pending_flush |= relevant;
fence->submitted_flush = fence->flush_mask;
}
@ -87,11 +105,10 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
fence->base.hash.key);
list_del_init(&fence->ring);
}
}
if (wake) {
DRM_WAKEUP(&fm->fence_queue);
DRM_WAKEUP(&fc->fence_queue);
}
}
@ -112,13 +129,15 @@ void drm_fence_usage_deref_locked(drm_device_t * dev,
{
drm_fence_manager_t *fm = &dev->fm;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
if (atomic_dec_and_test(&fence->usage)) {
drm_fence_unring(dev, &fence->ring);
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
fence->base.hash.key);
atomic_dec(&fm->count);
drm_ctl_cache_free(drm_cache.fence_object, sizeof(*fence),
fence);
BUG_ON(!list_empty(&fence->base.list));
drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
}
}
@ -132,8 +151,8 @@ void drm_fence_usage_deref_unlocked(drm_device_t * dev,
if (atomic_read(&fence->usage) == 0) {
drm_fence_unring(dev, &fence->ring);
atomic_dec(&fm->count);
drm_ctl_cache_free(drm_cache.fence_object,
sizeof(*fence), fence);
BUG_ON(!list_empty(&fence->base.list));
drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
}
mutex_unlock(&dev->struct_mutex);
}
@ -149,7 +168,7 @@ static void drm_fence_object_destroy(drm_file_t * priv,
drm_fence_usage_deref_locked(dev, fence);
}
static int fence_signaled(drm_device_t * dev, volatile
int drm_fence_object_signaled(drm_device_t * dev,
drm_fence_object_t * fence,
uint32_t mask, int poke_flush)
{
@ -159,7 +178,7 @@ static int fence_signaled(drm_device_t * dev, volatile
drm_fence_driver_t *driver = dev->driver->fence_driver;
if (poke_flush)
driver->poke_flush(dev);
driver->poke_flush(dev, fence->class);
read_lock_irqsave(&fm->lock, flags);
signaled =
(fence->type & mask & fence->signaled) == (fence->type & mask);
@ -168,52 +187,29 @@ static int fence_signaled(drm_device_t * dev, volatile
return signaled;
}
static void drm_fence_flush_exe(drm_fence_manager_t * fm,
static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
drm_fence_driver_t * driver, uint32_t sequence)
{
uint32_t diff;
if (!fm->pending_exe_flush) {
volatile struct list_head *list;
/*
* Last_exe_flush is invalid. Find oldest sequence.
*/
/* list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
list = &fm->ring;
if (list->next == &fm->ring) {
return;
} else {
drm_fence_object_t *fence =
list_entry(list->next, drm_fence_object_t, ring);
fm->last_exe_flush = (fence->sequence - 1) &
driver->sequence_mask;
}
diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
if (diff >= driver->wrap_diff)
return;
fm->exe_flush_sequence = sequence;
fm->pending_exe_flush = 1;
if (!fc->pending_exe_flush) {
fc->exe_flush_sequence = sequence;
fc->pending_exe_flush = 1;
} else {
diff =
(sequence - fm->exe_flush_sequence) & driver->sequence_mask;
(sequence - fc->exe_flush_sequence) & driver->sequence_mask;
if (diff < driver->wrap_diff) {
fm->exe_flush_sequence = sequence;
fc->exe_flush_sequence = sequence;
}
}
}
int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
uint32_t type)
{
return ((fence->signaled & type) == type);
}
int drm_fence_object_flush(drm_device_t * dev,
volatile drm_fence_object_t * fence, uint32_t type)
drm_fence_object_t * fence,
uint32_t type)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
unsigned long flags;
@ -228,16 +224,16 @@ int drm_fence_object_flush(drm_device_t * dev,
if (fence->submitted_flush == fence->signaled) {
if ((fence->type & DRM_FENCE_TYPE_EXE) &&
!(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
drm_fence_flush_exe(fm, driver, fence->sequence);
drm_fence_flush_exe(fc, driver, fence->sequence);
fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
} else {
fm->pending_flush |= (fence->flush_mask &
fc->pending_flush |= (fence->flush_mask &
~fence->submitted_flush);
fence->submitted_flush = fence->flush_mask;
}
}
write_unlock_irqrestore(&fm->lock, flags);
driver->poke_flush(dev);
driver->poke_flush(dev, fence->class);
return 0;
}
@ -246,24 +242,35 @@ int drm_fence_object_flush(drm_device_t * dev,
* wrapped around and reused.
*/
void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
uint32_t old_sequence;
unsigned long flags;
drm_fence_object_t *fence;
uint32_t diff;
write_lock_irqsave(&fm->lock, flags);
old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
fc->pending_exe_flush = 1;
fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
}
write_unlock_irqrestore(&fm->lock, flags);
mutex_lock(&dev->struct_mutex);
read_lock_irqsave(&fm->lock, flags);
if (fm->ring.next == &fm->ring) {
if (list_empty(&fc->ring)) {
read_unlock_irqrestore(&fm->lock, flags);
mutex_unlock(&dev->struct_mutex);
return;
}
old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
fence = list_entry(fc->ring.next, drm_fence_object_t, ring);
atomic_inc(&fence->usage);
mutex_unlock(&dev->struct_mutex);
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
@ -276,11 +283,44 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
EXPORT_SYMBOL(drm_fence_flush_old);
int drm_fence_object_wait(drm_device_t * dev,
volatile drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask)
static int drm_fence_lazy_wait(drm_device_t *dev,
drm_fence_object_t *fence,
int ignore_signals,
uint32_t mask)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
int signaled;
unsigned long _end = jiffies + 3*DRM_HZ;
int ret = 0;
do {
DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
(signaled = drm_fence_object_signaled(dev, fence, mask, 1)));
if (signaled)
return 0;
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
if (drm_fence_object_signaled(dev, fence, mask, 0))
return 0;
if (time_after_eq(jiffies, _end))
ret = -EBUSY;
if (ret) {
if (ret == -EBUSY) {
DRM_ERROR("Fence timeout. "
"GPU lockup or fence driver was "
"taken down.\n");
}
return ((ret == -EINTR) ? -EAGAIN : ret);
}
return 0;
}
int drm_fence_object_wait(drm_device_t * dev,
drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask)
{
drm_fence_driver_t *driver = dev->driver->fence_driver;
int ret = 0;
unsigned long _end;
@ -292,7 +332,7 @@ int drm_fence_object_wait(drm_device_t * dev,
return -EINVAL;
}
if (fence_signaled(dev, fence, mask, 0))
if (drm_fence_object_signaled(dev, fence, mask, 0))
return 0;
_end = jiffies + 3 * DRM_HZ;
@ -301,44 +341,29 @@ int drm_fence_object_wait(drm_device_t * dev,
if (lazy && driver->lazy_capable) {
do {
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
fence_signaled(dev, fence, mask, 1));
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
if (time_after_eq(jiffies, _end) && (ret != 0))
ret = -EBUSY;
if (ret) {
if (ret == -EBUSY) {
DRM_ERROR("Fence timeout. "
"GPU lockup or fence driver was "
"taken down.\n");
}
return ((ret == -EINTR) ? -EAGAIN : ret);
}
} else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) &&
driver->lazy_capable) {
/*
* We use IRQ wait for EXE fence if available to gain
* CPU in some cases.
*/
do {
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
fence_signaled(dev, fence,
DRM_FENCE_TYPE_EXE, 1));
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
if (time_after_eq(jiffies, _end) && (ret != 0))
ret = -EBUSY;
ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask);
if (ret)
return ((ret == -EINTR) ? -EAGAIN : ret);
return ret;
} else {
if (driver->has_irq(dev, fence->class,
DRM_FENCE_TYPE_EXE)) {
ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
DRM_FENCE_TYPE_EXE);
if (ret)
return ret;
}
if (fence_signaled(dev, fence, mask, 0))
if (driver->has_irq(dev, fence->class,
mask & ~DRM_FENCE_TYPE_EXE)) {
ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
mask);
if (ret)
return ret;
}
}
if (drm_fence_object_signaled(dev, fence, mask, 0))
return 0;
/*
@ -350,7 +375,7 @@ int drm_fence_object_wait(drm_device_t * dev,
#endif
do {
schedule();
signaled = fence_signaled(dev, fence, mask, 1);
signaled = drm_fence_object_signaled(dev, fence, mask, 1);
} while (!signaled && !time_after_eq(jiffies, _end));
if (!signaled)
@ -360,33 +385,38 @@ int drm_fence_object_wait(drm_device_t * dev,
}
int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
uint32_t fence_flags, uint32_t type)
uint32_t fence_flags, uint32_t class, uint32_t type)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
unsigned long flags;
uint32_t sequence;
uint32_t native_type;
int ret;
drm_fence_unring(dev, &fence->ring);
ret = driver->emit(dev, fence_flags, &sequence, &native_type);
ret = driver->emit(dev, class, fence_flags, &sequence, &native_type);
if (ret)
return ret;
write_lock_irqsave(&fm->lock, flags);
fence->class = class;
fence->type = type;
fence->flush_mask = 0x00;
fence->submitted_flush = 0x00;
fence->signaled = 0x00;
fence->sequence = sequence;
fence->native_type = native_type;
list_add_tail(&fence->ring, &fm->ring);
if (list_empty(&fc->ring))
fc->last_exe_flush = sequence - 1;
list_add_tail(&fence->ring, &fc->ring);
write_unlock_irqrestore(&fm->lock, flags);
return 0;
}
static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
uint32_t type,
uint32_t fence_flags,
drm_fence_object_t * fence)
{
@ -400,7 +430,7 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
write_lock_irqsave(&fm->lock, flags);
INIT_LIST_HEAD(&fence->ring);
fence->class = 0;
fence->class = class;
fence->type = type;
fence->flush_mask = 0;
fence->submitted_flush = 0;
@ -408,7 +438,8 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
fence->sequence = 0;
write_unlock_irqrestore(&fm->lock, flags);
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
ret = drm_fence_object_emit(dev, fence, fence_flags, type);
ret = drm_fence_object_emit(dev, fence, fence_flags,
fence->class, type);
}
return ret;
}
@ -421,29 +452,29 @@ int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
mutex_lock(&dev->struct_mutex);
ret = drm_add_user_object(priv, &fence->base, shareable);
mutex_unlock(&dev->struct_mutex);
if (ret)
return ret;
goto out;
atomic_inc(&fence->usage);
fence->base.type = drm_fence_type;
fence->base.remove = &drm_fence_object_destroy;
DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
return 0;
out:
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_fence_add_user_object);
int drm_fence_object_create(drm_device_t * dev, uint32_t type,
int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
unsigned flags, drm_fence_object_t ** c_fence)
{
drm_fence_object_t *fence;
int ret;
drm_fence_manager_t *fm = &dev->fm;
fence = drm_ctl_cache_alloc(drm_cache.fence_object,
sizeof(*fence), GFP_KERNEL);
fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
if (!fence)
return -ENOMEM;
ret = drm_fence_object_init(dev, type, flags, fence);
ret = drm_fence_object_init(dev, class, type, flags, fence);
if (ret) {
drm_fence_usage_deref_unlocked(dev, fence);
return ret;
@ -459,22 +490,30 @@ EXPORT_SYMBOL(drm_fence_object_create);
void drm_fence_manager_init(drm_device_t * dev)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *class;
drm_fence_driver_t *fed = dev->driver->fence_driver;
int i;
fm->lock = RW_LOCK_UNLOCKED;
rwlock_init(&fm->lock);
write_lock(&fm->lock);
INIT_LIST_HEAD(&fm->ring);
fm->pending_flush = 0;
DRM_INIT_WAITQUEUE(&fm->fence_queue);
fm->initialized = 0;
if (fed) {
if (!fed)
goto out_unlock;
fm->initialized = 1;
fm->num_classes = fed->num_classes;
BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
for (i=0; i<fm->num_classes; ++i) {
class = &fm->class[i];
INIT_LIST_HEAD(&class->ring);
class->pending_flush = 0;
DRM_INIT_WAITQUEUE(&class->fence_queue);
}
atomic_set(&fm->count, 0);
for (i = 0; i < fed->no_types; ++i) {
fm->fence_types[i] = &fm->ring;
}
}
out_unlock:
write_unlock(&fm->lock);
}
@ -521,7 +560,8 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
case drm_fence_create:
if (arg.flags & DRM_FENCE_FLAG_EMIT)
LOCK_TEST_WITH_RETURN(dev, filp);
ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence);
ret = drm_fence_object_create(dev, arg.class,
arg.type, arg.flags, &fence);
if (ret)
return ret;
ret = drm_fence_add_user_object(priv, fence,
@ -531,13 +571,8 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
drm_fence_usage_deref_unlocked(dev, fence);
return ret;
}
/*
* usage > 0. No need to lock dev->struct_mutex;
*/
atomic_inc(&fence->usage);
arg.handle = fence->base.hash.key;
break;
case drm_fence_destroy:
mutex_lock(&dev->struct_mutex);
@ -584,7 +619,8 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class,
arg.type);
break;
case drm_fence_buffers:
if (!dev->bm.initialized) {
@ -601,7 +637,6 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
DRM_FENCE_FLAG_SHAREABLE);
if (ret)
return ret;
atomic_inc(&fence->usage);
arg.handle = fence->base.hash.key;
break;
default:

View file

@ -46,7 +46,7 @@ static int drm_setup(drm_device_t * dev)
drm_local_map_t *map;
int i;
int ret;
int sareapage;
if (dev->driver->firstopen) {
ret = dev->driver->firstopen(dev);
@ -57,8 +57,8 @@ static int drm_setup(drm_device_t * dev)
dev->magicfree.next = NULL;
/* prebuild the SAREA */
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
sareapage = max(SAREA_MAX, PAGE_SIZE);
i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
if (i != 0)
return i;
@ -79,13 +79,6 @@ static int drm_setup(drm_device_t * dev)
drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
INIT_LIST_HEAD(&dev->magicfree);
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
if (dev->ctxlist == NULL)
return -ENOMEM;
memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
INIT_LIST_HEAD(&dev->ctxlist->head);
dev->vmalist = NULL;
dev->sigdata.lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
@ -154,10 +147,13 @@ int drm_open(struct inode *inode, struct file *filp)
spin_lock(&dev->count_lock);
if (!dev->open_count++) {
spin_unlock(&dev->count_lock);
return drm_setup(dev);
retcode = drm_setup(dev);
goto out;
}
spin_unlock(&dev->count_lock);
}
out:
mutex_lock(&dev->struct_mutex);
BUG_ON((dev->dev_mapping != NULL) &&
(dev->dev_mapping != inode->i_mapping));
@ -265,6 +261,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->user_objects);
INIT_LIST_HEAD(&priv->refd_objects);
@ -288,19 +285,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->struct_mutex);
if (!dev->file_last) {
priv->next = NULL;
priv->prev = NULL;
dev->file_first = priv;
dev->file_last = priv;
/* first opener automatically becomes master */
if (list_empty(&dev->filelist))
priv->master = 1;
} else {
priv->next = NULL;
priv->prev = dev->file_last;
dev->file_last->next = priv;
dev->file_last = priv;
}
list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->struct_mutex);
#ifdef __alpha__
@ -379,19 +367,11 @@ static void drm_object_release(struct file *filp) {
head = &priv->user_objects;
}
for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
drm_ht_remove(&priv->refd_object_hash[i]);
}
}
/**
* Release file.
*
@ -426,39 +406,52 @@ int drm_release(struct inode *inode, struct file *filp)
current->pid, (long)old_encode_dev(priv->head->device),
dev->open_count);
if (dev->driver->reclaim_buffers_locked) {
unsigned long _end = jiffies + DRM_HZ*3;
do {
retcode = drm_kernel_take_hw_lock(filp);
} while(retcode && !time_after_eq(jiffies,_end));
if (!retcode) {
if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
if (drm_i_have_hw_lock(filp)) {
dev->driver->reclaim_buffers_locked(dev, filp);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
} else {
unsigned long _end=jiffies + 3*DRM_HZ;
int locked = 0;
drm_idlelock_take(&dev->lock);
/*
* FIXME: This is not a good solution. We should perhaps associate the
* DRM lock with a process context, and check whether the current process
* holds the lock. Then we can run reclaim buffers locked anyway.
* Wait for a while.
*/
DRM_ERROR("Reclaim buffers locked deadlock.\n");
DRM_ERROR("This is probably a single thread having multiple\n");
DRM_ERROR("DRM file descriptors open either dying or "
"closing file descriptors\n");
DRM_ERROR("while having the lock. I will not reclaim buffers.\n");
DRM_ERROR("Locking context is 0x%08x\n",
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
do{
spin_lock(&dev->lock.spinlock);
locked = dev->lock.idle_has_lock;
spin_unlock(&dev->lock.spinlock);
if (locked)
break;
schedule();
} while (!time_after_eq(jiffies, _end));
if (!locked) {
DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
"\tdriver to use reclaim_buffers_idlelocked() instead.\n"
"\tI will go on reclaiming the buffers anyway.\n");
}
} else if (drm_i_have_hw_lock(filp)) {
dev->driver->reclaim_buffers_locked(dev, filp);
drm_idlelock_release(&dev->lock);
}
}
if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
drm_idlelock_take(&dev->lock);
dev->driver->reclaim_buffers_idlelocked(dev, filp);
drm_idlelock_release(&dev->lock);
}
if (drm_i_have_hw_lock(filp)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
drm_lock_free(dev, &dev->lock.hw_lock->lock,
drm_lock_free(&dev->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
}
@ -472,10 +465,10 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&dev->ctxlist_mutex);
if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
if (!list_empty(&dev->ctxlist)) {
drm_ctx_list_t *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->tag == priv &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
@ -495,22 +488,12 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&dev->struct_mutex);
drm_object_release(filp);
if (priv->remove_auth_on_close == 1) {
drm_file_t *temp = dev->file_first;
while (temp) {
drm_file_t *temp;
list_for_each_entry(temp, &dev->filelist, lhead)
temp->authenticated = 0;
temp = temp->next;
}
}
if (priv->prev) {
priv->prev->next = priv->next;
} else {
dev->file_first = priv->next;
}
if (priv->next) {
priv->next->prev = priv->prev;
} else {
dev->file_last = priv->prev;
}
list_del(&priv->lhead);
mutex_unlock(&dev->struct_mutex);
if (dev->driver->postclose)

View file

@ -28,7 +28,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm_core.h"

View file

@ -138,12 +138,12 @@ static int drm_set_busid(drm_device_t * dev)
{
int len;
if (dev->unique != NULL)
return EBUSY;
return -EBUSY;
dev->unique_len = 40;
dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
if (dev->unique == NULL)
return ENOMEM;
return -ENOMEM;
len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
@ -156,7 +156,7 @@ static int drm_set_busid(drm_device_t * dev)
dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + 2,
DRM_MEM_DRIVER);
if (dev->devname == NULL)
return ENOMEM;
return -ENOMEM;
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique);
@ -199,7 +199,7 @@ int drm_getmap(struct inode *inode, struct file *filp,
}
i = 0;
list_for_each(list, &dev->maplist->head) {
list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, drm_map_list_t, head);
break;
@ -252,12 +252,18 @@ int drm_getclient(struct inode *inode, struct file *filp,
return -EFAULT;
idx = client.idx;
mutex_lock(&dev->struct_mutex);
for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ;
if (!pt) {
if (list_empty(&dev->filelist)) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
i = 0;
list_for_each_entry(pt, &dev->filelist, lhead) {
if (i++ >= idx)
break;
}
client.auth = pt->authenticated;
client.pid = pt->pid;
client.uid = pt->uid;
@ -337,13 +343,13 @@ int drm_setversion(DRM_IOCTL_ARGS)
retv.drm_dd_major = dev->driver->major;
retv.drm_dd_minor = dev->driver->minor;
if (copy_to_user(argp, &retv, sizeof(sv)))
if (copy_to_user(argp, &retv, sizeof(retv)))
return -EFAULT;
if (sv.drm_di_major != -1) {
if (sv.drm_di_major != DRM_IF_MAJOR ||
sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
return EINVAL;
return -EINVAL;
if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
dev->if_version = max(if_version, dev->if_version);
if (sv.drm_di_minor >= 1) {
@ -357,7 +363,7 @@ int drm_setversion(DRM_IOCTL_ARGS)
if (sv.drm_dd_major != -1) {
if (sv.drm_dd_major != dev->driver->major ||
sv.drm_dd_minor < 0 || sv.drm_dd_minor > dev->driver->minor)
return EINVAL;
return -EINVAL;
if (dev->driver->set_version)
dev->driver->set_version(dev, &sv);

View file

@ -118,10 +118,9 @@ static int drm_irq_install(drm_device_t * dev)
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->tasklet_lock);
INIT_LIST_HEAD(&dev->vbl_sigs.head);
INIT_LIST_HEAD(&dev->vbl_sigs2.head);
INIT_LIST_HEAD(&dev->vbl_sigs);
INIT_LIST_HEAD(&dev->vbl_sigs2);
dev->vbl_pending = 0;
}
@ -131,7 +130,7 @@ static int drm_irq_install(drm_device_t * dev)
/* Install handler */
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = SA_SHIRQ;
sh_flags = IRQF_SHARED;
ret = request_irq(dev->irq, dev->driver->irq_handler,
sh_flags, dev->devname, dev);
@ -291,7 +290,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
drm_vbl_sig_t *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
? &dev->vbl_sigs2 : &dev->vbl_sigs;
drm_vbl_sig_t *vbl_sig;
@ -301,7 +300,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
* for the same vblank sequence number; nothing to be done in
* that case
*/
list_for_each_entry(vbl_sig, &vbl_sigs->head, head) {
list_for_each_entry(vbl_sig, vbl_sigs, head) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current) {
@ -335,7 +334,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
list_add_tail((struct list_head *)vbl_sig, &vbl_sigs->head);
list_add_tail(&vbl_sig->head, vbl_sigs);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@ -378,20 +377,18 @@ void drm_vbl_send_signals(drm_device_t * dev)
spin_lock_irqsave(&dev->vbl_lock, flags);
for (i = 0; i < 2; i++) {
struct list_head *list, *tmp;
drm_vbl_sig_t *vbl_sig;
drm_vbl_sig_t *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
drm_vbl_sig_t *vbl_sig, *tmp;
struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
&dev->vbl_received);
list_for_each_safe(list, tmp, &vbl_sigs->head) {
vbl_sig = list_entry(list, drm_vbl_sig_t, head);
list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info(vbl_sig->info.si_signo,
&vbl_sig->info, vbl_sig->task);
list_del(list);
list_del(&vbl_sig->head);
drm_free(vbl_sig, sizeof(*vbl_sig),
DRM_MEM_DRIVER);
@ -422,7 +419,7 @@ static void drm_locked_tasklet_func(unsigned long data)
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
if (!dev->locked_tasklet_func ||
!drm_lock_take(&dev->lock.hw_lock->lock,
!drm_lock_take(&dev->lock,
DRM_KERNEL_CONTEXT)) {
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
return;
@ -433,7 +430,7 @@ static void drm_locked_tasklet_func(unsigned long data)
dev->locked_tasklet_func(dev);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
drm_lock_free(&dev->lock,
DRM_KERNEL_CONTEXT);
dev->locked_tasklet_func = NULL;

View file

@ -35,12 +35,6 @@
#include "drmP.h"
#if 0
static int drm_lock_transfer(drm_device_t * dev,
__volatile__ unsigned int *lock,
unsigned int context);
#endif
static int drm_notifier(void *priv);
/**
@ -83,6 +77,9 @@ int drm_lock(struct inode *inode, struct file *filp,
return -EINVAL;
add_wait_queue(&dev->lock.lock_queue, &entry);
spin_lock(&dev->lock.spinlock);
dev->lock.user_waiters++;
spin_unlock(&dev->lock.spinlock);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) {
@ -90,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp,
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) {
if (drm_lock_take(&dev->lock, lock.context)) {
dev->lock.filp = filp;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
@ -104,6 +101,9 @@ int drm_lock(struct inode *inode, struct file *filp,
break;
}
}
spin_lock(&dev->lock.spinlock);
dev->lock.user_waiters--;
spin_unlock(&dev->lock.spinlock);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
@ -184,8 +184,7 @@ int drm_unlock(struct inode *inode, struct file *filp,
if (dev->driver->kernel_context_switch_unlock)
dev->driver->kernel_context_switch_unlock(dev);
else {
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
lock.context)) {
if (drm_lock_free(&dev->lock,lock.context)) {
/* FIXME: Should really bail out here. */
}
}
@ -203,18 +202,26 @@ int drm_unlock(struct inode *inode, struct file *filp,
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
int drm_lock_take(drm_lock_data_t *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock(&lock_data->spinlock);
do {
old = *lock;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
else
new = context | _DRM_LOCK_HELD | _DRM_LOCK_CONT;
else {
new = context | _DRM_LOCK_HELD |
((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
_DRM_LOCK_CONT : 0);
}
prev = cmpxchg(lock, old, new);
} while (prev != old);
spin_unlock(&lock_data->spinlock);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
@ -224,14 +231,15 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
return 0;
}
}
if (new == (context | _DRM_LOCK_HELD | _DRM_LOCK_CONT)) {
if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
return 0;
}
#if 0
/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
@ -244,13 +252,13 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
* Resets the lock file pointer.
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
static int drm_lock_transfer(drm_device_t * dev,
__volatile__ unsigned int *lock,
static int drm_lock_transfer(drm_lock_data_t *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
dev->lock.filp = NULL;
lock_data->filp = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
@ -258,7 +266,6 @@ static int drm_lock_transfer(drm_device_t * dev,
} while (prev != old);
return 1;
}
#endif
/**
* Free lock.
@ -271,10 +278,19 @@ static int drm_lock_transfer(drm_device_t * dev,
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
* waiting on the lock queue.
*/
int drm_lock_free(drm_device_t * dev,
__volatile__ unsigned int *lock, unsigned int context)
int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock(&lock_data->spinlock);
if (lock_data->kernel_waiters != 0) {
drm_lock_transfer(lock_data, 0);
lock_data->idle_has_lock = 1;
spin_unlock(&lock_data->spinlock);
return 1;
}
spin_unlock(&lock_data->spinlock);
do {
old = *lock;
@ -287,7 +303,7 @@ int drm_lock_free(drm_device_t * dev,
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
wake_up_interruptible(&dev->lock.lock_queue);
wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
@ -322,11 +338,59 @@ static int drm_notifier(void *priv)
return 0;
}
/*
* Can be used by drivers to take the hardware lock if necessary.
* (Waiting for idle before reclaiming buffers etc.)
/**
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
*
* This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
* by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
* a deadlock, which is why the "idlelock" was invented).
*
* This should be sufficient to wait for GPU idle without
* having to worry about starvation.
*/
void drm_idlelock_take(drm_lock_data_t *lock_data)
{
int ret = 0;
spin_lock(&lock_data->spinlock);
lock_data->kernel_waiters++;
if (!lock_data->idle_has_lock) {
spin_unlock(&lock_data->spinlock);
ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
spin_lock(&lock_data->spinlock);
if (ret == 1)
lock_data->idle_has_lock = 1;
}
spin_unlock(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_take);
void drm_idlelock_release(drm_lock_data_t *lock_data)
{
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
spin_lock(&lock_data->spinlock);
if (--lock_data->kernel_waiters == 0) {
if (lock_data->idle_has_lock) {
do {
old = *lock;
prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
} while (prev != old);
wake_up_interruptible(&lock_data->lock_queue);
lock_data->idle_has_lock = 0;
}
}
spin_unlock(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_release);
int drm_i_have_hw_lock(struct file *filp)
{
DRM_DEVICE;
@ -337,50 +401,3 @@ int drm_i_have_hw_lock(struct file *filp)
}
EXPORT_SYMBOL(drm_i_have_hw_lock);
int drm_kernel_take_hw_lock(struct file *filp)
{
DRM_DEVICE;
int ret = 0;
unsigned long _end = jiffies + 3*DRM_HZ;
if (!drm_i_have_hw_lock(filp)) {
DECLARE_WAITQUEUE(entry, current);
add_wait_queue(&dev->lock.lock_queue, &entry);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
ret = -EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
dev->lock.filp = filp;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
/* Contention */
if (time_after_eq(jiffies,_end)) {
ret = -EBUSY;
break;
}
schedule_timeout(1);
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
}
return ret;
}
EXPORT_SYMBOL(drm_kernel_take_hw_lock);

View file

@ -47,7 +47,7 @@ static struct {
static inline size_t drm_size_align(size_t size) {
register size_t tmpSize = 4;
size_t tmpSize = 4;
if (size > PAGE_SIZE)
return PAGE_ALIGN(size);
@ -95,12 +95,13 @@ void drm_query_memctl(drm_u64_t *cur_used,
EXPORT_SYMBOL(drm_query_memctl);
void drm_init_memctl(size_t p_low_threshold,
size_t p_high_threshold)
size_t p_high_threshold,
size_t unit_size)
{
spin_lock(&drm_memctl.lock);
drm_memctl.cur_used = 0;
drm_memctl.low_threshold = p_low_threshold << PAGE_SHIFT;
drm_memctl.high_threshold = p_high_threshold << PAGE_SHIFT;
drm_memctl.low_threshold = p_low_threshold * unit_size;
drm_memctl.high_threshold = p_high_threshold * unit_size;
spin_unlock(&drm_memctl.lock);
}
@ -134,13 +135,7 @@ int drm_mem_info(char *buf, char **start, off_t offset,
/** Wrapper around kmalloc() */
void *drm_calloc(size_t nmemb, size_t size, int area)
{
void *addr;
addr = kmalloc(size * nmemb, GFP_KERNEL);
if (addr != NULL)
memset((void *)addr, 0, size * nmemb);
return addr;
return kcalloc(nmemb, size, GFP_KERNEL);
}
EXPORT_SYMBOL(drm_calloc);
@ -218,6 +213,49 @@ void drm_free_pages(unsigned long address, int order, int area)
}
#if __OS_HAS_AGP
static void *agp_remap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
unsigned long *phys_addr_map, i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
struct drm_agp_mem *agpmem;
struct page **page_map;
void *addr;
size = PAGE_ALIGN(size);
#ifdef __alpha__
offset -= dev->hose->mem_space->start;
#endif
list_for_each_entry(agpmem, &dev->agp->memory, head)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
break;
if (!agpmem)
return NULL;
/*
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
* the CPU do not get remapped by the GART. We fix this by using the kernel's
* page-table instead (that's probably faster anyhow...).
*/
/* note: use vmalloc() because num_pages could be large... */
page_map = vmalloc(num_pages * sizeof(struct page *));
if (!page_map)
return NULL;
phys_addr_map =
agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
for (i = 0; i < num_pages; ++i)
page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
vfree(page_map);
return addr;
}
/** Wrapper around agp_allocate_memory() */
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
@ -248,5 +286,36 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
#else /* __OS_HAS_AGP*/
static void *agp_remap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
return NULL;
}
#endif /* agp */
#endif /* debug_memory */
void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
{
if (drm_core_has_AGP(dev) &&
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = ioremap(map->offset, map->size);
}
EXPORT_SYMBOL_GPL(drm_core_ioremap);
void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
{
if (!map->handle || !map->size)
return;
if (drm_core_has_AGP(dev) &&
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
vunmap(map->handle);
else
iounmap(map->handle);
}
EXPORT_SYMBOL_GPL(drm_core_ioremapfree);

View file

@ -43,7 +43,7 @@
*/
/* Need the 4-argument version of vmap(). */
#if __OS_HAS_AGP && defined(VMAP_4_ARGS)
#if __OS_HAS_AGP
#include <linux/vmalloc.h>
@ -57,177 +57,6 @@
# endif
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
#ifndef pte_offset_kernel
# define pte_offset_kernel(dir, address) pte_offset(dir, address)
#endif
#ifndef pte_pfn
# define pte_pfn(pte) (pte_page(pte) - mem_map)
#endif
#ifndef pfn_to_page
# define pfn_to_page(pfn) (mem_map + (pfn))
#endif
#endif
/*
* Find the drm_map that covers the range [offset, offset+size).
*/
static inline drm_map_t *drm_lookup_map(unsigned long offset,
unsigned long size, drm_device_t * dev)
{
struct list_head *list;
drm_map_list_t *r_list;
drm_map_t *map;
list_for_each(list, &dev->maplist->head) {
r_list = (drm_map_list_t *) list;
map = r_list->map;
if (!map)
continue;
if (map->offset <= offset
&& (offset + size) <= (map->offset + map->size))
return map;
}
return NULL;
}
static inline void *agp_remap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
unsigned long *phys_addr_map, i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
struct drm_agp_mem *agpmem;
struct page **page_map;
void *addr;
size = PAGE_ALIGN(size);
#ifdef __alpha__
offset -= dev->hose->mem_space->start;
#endif
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
break;
if (!agpmem)
return NULL;
/*
* OK, we're mapping AGP space on a chipset/platform on which memory accesses by
* the CPU do not get remapped by the GART. We fix this by using the kernel's
* page-table instead (that's probably faster anyhow...).
*/
/* note: use vmalloc() because num_pages could be large... */
page_map = vmalloc(num_pages * sizeof(struct page *));
if (!page_map)
return NULL;
phys_addr_map =
agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
for (i = 0; i < num_pages; ++i)
page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
vfree(page_map);
return addr;
}
static inline unsigned long drm_follow_page(void *vaddr)
{
pgd_t *pgd = pgd_offset_k((unsigned long) vaddr);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)
pmd_t *pmd = pmd_offset(pgd, (unsigned long)vaddr);
#else
pud_t *pud = pud_offset(pgd, (unsigned long) vaddr);
pmd_t *pmd = pmd_offset(pud, (unsigned long) vaddr);
#endif
pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr);
return pte_pfn(*ptep) << PAGE_SHIFT;
}
#else /* __OS_HAS_AGP */
static inline drm_map_t *drm_lookup_map(unsigned long offset,
unsigned long size, drm_device_t * dev)
{
return NULL;
}
static inline void *agp_remap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
return NULL;
}
static inline unsigned long drm_follow_page(void *vaddr)
{
return 0;
}
#endif
#ifndef DEBUG_MEMORY
static inline void *drm_ioremap(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
#if defined(VMAP_4_ARGS)
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
drm_map_t *map = drm_lookup_map(offset, size, dev);
if (map && map->type == _DRM_AGP)
return agp_remap(offset, size, dev);
}
#endif
return ioremap(offset, size);
}
static inline void *drm_ioremap_nocache(unsigned long offset,
unsigned long size, drm_device_t * dev)
{
#if defined(VMAP_4_ARGS)
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
drm_map_t *map = drm_lookup_map(offset, size, dev);
if (map && map->type == _DRM_AGP)
return agp_remap(offset, size, dev);
}
#endif
return ioremap_nocache(offset, size);
}
static inline void drm_ioremapfree(void *pt, unsigned long size,
drm_device_t * dev)
{
#if defined(VMAP_4_ARGS)
/*
* This is a bit ugly. It would be much cleaner if the DRM API would use separate
* routines for handling mappings in the AGP space. Hopefully this can be done in
* a future revision of the interface...
*/
if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
&& ((unsigned long)pt >= VMALLOC_START
&& (unsigned long)pt < VMALLOC_END)) {
unsigned long offset;
drm_map_t *map;
offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
map = drm_lookup_map(offset, size, dev);
if (map && map->type == _DRM_AGP) {
vunmap(pt);
return;
}
}
#endif
iounmap(pt);
}
#else
extern void *drm_ioremap(unsigned long offset, unsigned long size,
drm_device_t * dev);
extern void *drm_ioremap_nocache(unsigned long offset,
unsigned long size, drm_device_t * dev);
extern void drm_ioremapfree(void *pt, unsigned long size,
drm_device_t * dev);
#endif

View file

@ -289,79 +289,6 @@ void drm_free_pages(unsigned long address, int order, int area)
}
}
void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t * dev)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = drm_ioremap(offset, size, dev))) {
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&drm_mem_lock);
return NULL;
}
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&drm_mem_lock);
return pt;
}
EXPORT_SYMBOL(drm_ioremap);
void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
drm_device_t * dev)
{
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&drm_mem_lock);
return NULL;
}
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&drm_mem_lock);
return pt;
}
EXPORT_SYMBOL(drm_ioremap_nocache);
void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev)
{
int alloc_count;
int free_count;
if (!pt)
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Attempt to free NULL pointer\n");
else
drm_ioremapfree(pt, size, dev);
spin_lock(&drm_mem_lock);
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
EXPORT_SYMBOL(drm_ioremapfree);
#if __OS_HAS_AGP
DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)

View file

@ -275,74 +275,6 @@ void drm_free_pages (unsigned long address, int order, int area) {
}
}
void *drm_ioremap (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = drm_ioremap(offset, size, dev))) {
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&drm_mem_lock);
return NULL;
}
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&drm_mem_lock);
return pt;
}
void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
if (!size) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Mapping 0 bytes at 0x%08lx\n", offset);
return NULL;
}
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
spin_unlock(&drm_mem_lock);
return NULL;
}
spin_lock(&drm_mem_lock);
++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
spin_unlock(&drm_mem_lock);
return pt;
}
void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
int alloc_count;
int free_count;
if (!pt)
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Attempt to free NULL pointer\n");
else
drm_ioremapfree(pt, size, dev);
spin_lock(&drm_mem_lock);
drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
#if __OS_HAS_AGP
DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {

View file

@ -49,7 +49,7 @@ unsigned long drm_mm_tail_space(drm_mm_t *mm)
struct list_head *tail_node;
drm_mm_node_t *entry;
tail_node = mm->root_node.ml_entry.prev;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
if (!entry->free)
return 0;
@ -62,7 +62,7 @@ int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
struct list_head *tail_node;
drm_mm_node_t *entry;
tail_node = mm->root_node.ml_entry.prev;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
if (!entry->free)
return -ENOMEM;
@ -82,8 +82,7 @@ static int drm_mm_create_tail_node(drm_mm_t *mm,
drm_mm_node_t *child;
child = (drm_mm_node_t *)
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
GFP_KERNEL);
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return -ENOMEM;
@ -92,8 +91,8 @@ static int drm_mm_create_tail_node(drm_mm_t *mm,
child->start = start;
child->mm = mm;
list_add_tail(&child->ml_entry, &mm->root_node.ml_entry);
list_add_tail(&child->fl_entry, &mm->root_node.fl_entry);
list_add_tail(&child->ml_entry, &mm->ml_entry);
list_add_tail(&child->fl_entry, &mm->fl_entry);
return 0;
}
@ -104,7 +103,7 @@ int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
struct list_head *tail_node;
drm_mm_node_t *entry;
tail_node = mm->root_node.ml_entry.prev;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
if (!entry->free) {
return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
@ -119,8 +118,7 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
drm_mm_node_t *child;
child = (drm_mm_node_t *)
drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
GFP_KERNEL);
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return NULL;
@ -139,8 +137,6 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
return child;
}
drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
unsigned long size, unsigned alignment)
{
@ -150,7 +146,7 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
unsigned tmp = 0;
if (alignment)
tmp = size % alignment;
tmp = parent->start % alignment;
if (tmp) {
align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
@ -164,12 +160,8 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
return parent;
} else {
child = drm_mm_split_at_start(parent, size);
if (!child) {
if (align_splitoff)
drm_mm_put_block(align_splitoff);
return NULL;
}
}
if (align_splitoff)
drm_mm_put_block(align_splitoff);
@ -185,9 +177,8 @@ void drm_mm_put_block(drm_mm_node_t * cur)
{
drm_mm_t *mm = cur->mm;
drm_mm_node_t *list_root = &mm->root_node;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &list_root->ml_entry;
struct list_head *root_head = &mm->ml_entry;
drm_mm_node_t *prev_node = NULL;
drm_mm_node_t *next_node;
@ -207,9 +198,8 @@ void drm_mm_put_block(drm_mm_node_t * cur)
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
drm_ctl_cache_free(drm_cache.mm,
sizeof(*next_node),
next_node);
drm_ctl_free(next_node, sizeof(*next_node),
DRM_MEM_MM);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
@ -219,19 +209,20 @@ void drm_mm_put_block(drm_mm_node_t * cur)
}
if (!merged) {
cur->free = 1;
list_add(&cur->fl_entry, &list_root->fl_entry);
list_add(&cur->fl_entry, &mm->fl_entry);
} else {
list_del(&cur->ml_entry);
drm_ctl_cache_free(drm_cache.mm, sizeof(*cur), cur);
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
}
}
EXPORT_SYMBOL(drm_mm_put_block);
drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
unsigned long size,
unsigned alignment, int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->root_node.fl_entry;
const struct list_head *free_stack = &mm->fl_entry;
drm_mm_node_t *entry;
drm_mm_node_t *best;
unsigned long best_size;
@ -244,8 +235,11 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
entry = list_entry(list, drm_mm_node_t, fl_entry);
wasted = 0;
if (entry->size < size)
continue;
if (alignment) {
register unsigned tmp = size % alignment;
register unsigned tmp = entry->start % alignment;
if (tmp)
wasted += alignment - tmp;
}
@ -266,15 +260,15 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
int drm_mm_clean(drm_mm_t * mm)
{
struct list_head *head = &mm->root_node.ml_entry;
struct list_head *head = &mm->ml_entry;
return (head->next->next == head);
}
int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->root_node.ml_entry);
INIT_LIST_HEAD(&mm->root_node.fl_entry);
INIT_LIST_HEAD(&mm->ml_entry);
INIT_LIST_HEAD(&mm->fl_entry);
return drm_mm_create_tail_node(mm, start, size);
}
@ -283,20 +277,20 @@ EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(drm_mm_t * mm)
{
struct list_head *bnode = mm->root_node.fl_entry.next;
struct list_head *bnode = mm->fl_entry.next;
drm_mm_node_t *entry;
entry = list_entry(bnode, drm_mm_node_t, fl_entry);
if (entry->ml_entry.next != &mm->root_node.ml_entry ||
entry->fl_entry.next != &mm->root_node.fl_entry) {
if (entry->ml_entry.next != &mm->ml_entry ||
entry->fl_entry.next != &mm->fl_entry) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
list_del(&entry->fl_entry);
list_del(&entry->ml_entry);
drm_ctl_cache_free(drm_cache.mm, sizeof(*entry), entry);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
}
EXPORT_SYMBOL(drm_mm_takedown);

View file

@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -11,6 +11,10 @@
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
@ -19,12 +23,10 @@
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
@ -34,6 +36,8 @@ int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
drm_device_t *dev = priv->head->dev;
int ret;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
atomic_set(&item->refcount, 1);
item->shareable = shareable;
item->owner = priv;
@ -54,6 +58,8 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key)
int ret;
drm_user_object_t *item;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
if (ret) {
return NULL;
@ -86,6 +92,8 @@ static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item)
int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item)
{
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
if (item->owner != priv) {
DRM_ERROR("Cannot destroy object not owned by you.\n");
return -EINVAL;
@ -123,6 +131,7 @@ int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object,
drm_ref_object_t *item;
drm_open_hash_t *ht = &priv->refd_object_hash[ref_action];
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
if (!referenced_object->shareable && priv != referenced_object->owner) {
DRM_ERROR("Not allowed to reference this object\n");
return -EINVAL;
@ -179,6 +188,7 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
drm_hash_item_t *hash;
int ret;
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
(unsigned long)referenced_object, &hash);
if (ret)
@ -211,6 +221,7 @@ void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item)
drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action];
drm_ref_t unref_action;
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
unref_action = item->unref_action;
if (atomic_dec_and_test(&item->refcount)) {
ret = drm_ht_remove_item(ht, &item->hash);
@ -238,11 +249,17 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
{
drm_device_t *dev = priv->head->dev;
drm_user_object_t *uo;
drm_hash_item_t *hash;
int ret;
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, user_token);
if (!uo || (uo->type != type)) {
ret = drm_ht_find_item(&dev->object_hash, user_token, &hash);
if (ret) {
DRM_ERROR("Could not find user object to reference.\n");
goto out_err;
}
uo = drm_hash_entry(hash, drm_user_object_t, hash);
if (uo->type != type) {
ret = -EINVAL;
goto out_err;
}
@ -251,7 +268,6 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
goto out_err;
mutex_unlock(&dev->struct_mutex);
*object = uo;
DRM_ERROR("Referenced an object\n");
return 0;
out_err:
mutex_unlock(&dev->struct_mutex);
@ -279,7 +295,6 @@ int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
}
drm_remove_ref_object(priv, ro);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("Unreferenced an object\n");
return 0;
out_err:
mutex_unlock(&dev->struct_mutex);

484
linux-core/drm_objects.h Normal file
View file

@ -0,0 +1,484 @@
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _DRM_OBJECTS_H
#define _DRM_OBJECTS_H
struct drm_device;
/***************************************************
* User space objects. (drm_object.c)
*/
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
typedef enum {
drm_fence_type,
drm_buffer_type,
drm_ttm_type
/*
* Add other user space object types here.
*/
} drm_object_type_t;
/*
* A user object is a structure that helps the drm give out user handles
* to kernel internal objects and to keep track of these objects so that
* they can be destroyed, for example when the user space process exits.
* Designed to be accessible using a user space 32-bit handle.
*/
typedef struct drm_user_object {
drm_hash_item_t hash;
struct list_head list;
drm_object_type_t type;
atomic_t refcount;
int shareable;
drm_file_t *owner;
void (*ref_struct_locked) (drm_file_t * priv,
struct drm_user_object * obj,
drm_ref_t ref_action);
void (*unref) (drm_file_t * priv, struct drm_user_object * obj,
drm_ref_t unref_action);
void (*remove) (drm_file_t * priv, struct drm_user_object * obj);
} drm_user_object_t;
/*
* A ref object is a structure which is used to
* keep track of references to user objects and to keep track of these
* references so that they can be destroyed for example when the user space
* process exits. Designed to be accessible using a pointer to the _user_ object.
*/
typedef struct drm_ref_object {
drm_hash_item_t hash;
struct list_head list;
atomic_t refcount;
drm_ref_t unref_action;
} drm_ref_object_t;
/**
* Must be called with the struct_mutex held.
*/
extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
int shareable);
/**
* Must be called with the struct_mutex held.
*/
extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv,
uint32_t key);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_user_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item);
/*
* Must be called with the struct_mutex held. May temporarily release it.
*/
extern int drm_add_ref_object(drm_file_t * priv,
drm_user_object_t * referenced_object,
drm_ref_t ref_action);
/*
* Must be called with the struct_mutex held.
*/
drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
drm_user_object_t * referenced_object,
drm_ref_t ref_action);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
* release the struct_mutex before calling drm_remove_ref_object.
* This function may temporarily release the struct_mutex.
*/
extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item);
extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
drm_object_type_t type,
drm_user_object_t ** object);
extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
drm_object_type_t type);
/***************************************************
* Fence objects. (drm_fence.c)
*/
typedef struct drm_fence_object {
drm_user_object_t base;
atomic_t usage;
/*
* The below three fields are protected by the fence manager spinlock.
*/
struct list_head ring;
int class;
uint32_t native_type;
uint32_t type;
uint32_t signaled;
uint32_t sequence;
uint32_t flush_mask;
uint32_t submitted_flush;
} drm_fence_object_t;
#define _DRM_FENCE_CLASSES 8
#define _DRM_FENCE_TYPE_EXE 0x00
typedef struct drm_fence_class_manager {
struct list_head ring;
uint32_t pending_flush;
wait_queue_head_t fence_queue;
int pending_exe_flush;
uint32_t last_exe_flush;
uint32_t exe_flush_sequence;
} drm_fence_class_manager_t;
typedef struct drm_fence_manager {
int initialized;
rwlock_t lock;
drm_fence_class_manager_t class[_DRM_FENCE_CLASSES];
uint32_t num_classes;
atomic_t count;
} drm_fence_manager_t;
typedef struct drm_fence_driver {
uint32_t num_classes;
uint32_t wrap_diff;
uint32_t flush_diff;
uint32_t sequence_mask;
int lazy_capable;
int (*has_irq) (struct drm_device * dev, uint32_t class,
uint32_t flags);
int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags,
uint32_t * breadcrumb, uint32_t * native_type);
void (*poke_flush) (struct drm_device * dev, uint32_t class);
} drm_fence_driver_t;
extern void drm_fence_handler(struct drm_device *dev, uint32_t class,
uint32_t sequence, uint32_t type);
extern void drm_fence_manager_init(struct drm_device *dev);
extern void drm_fence_manager_takedown(struct drm_device *dev);
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
uint32_t sequence);
extern int drm_fence_object_flush(struct drm_device *dev,
drm_fence_object_t * fence, uint32_t type);
extern int drm_fence_object_signaled(struct drm_device *dev,
drm_fence_object_t * fence,
uint32_t type, int flush);
extern void drm_fence_usage_deref_locked(struct drm_device *dev,
drm_fence_object_t * fence);
extern void drm_fence_usage_deref_unlocked(struct drm_device *dev,
drm_fence_object_t * fence);
extern int drm_fence_object_wait(struct drm_device *dev,
drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask);
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
uint32_t fence_flags, uint32_t class,
drm_fence_object_t ** c_fence);
extern int drm_fence_add_user_object(drm_file_t * priv,
drm_fence_object_t * fence, int shareable);
extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
/**************************************************
*TTMs
*/
/*
* The ttm backend GTT interface. (In our case AGP).
* Any similar type of device (PCIE?)
* needs only to implement these functions to be usable with the "TTM" interface.
* The AGP backend implementation lives in drm_agpsupport.c
* basically maps these calls to available functions in agpgart.
* Each drm device driver gets an
* additional function pointer that creates these types,
* so that the device can choose the correct aperture.
* (Multiple AGP apertures, etc.)
* Most device drivers will let this point to the standard AGP implementation.
*/
#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
struct drm_ttm_backend;
typedef struct drm_ttm_backend_func {
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
int (*populate) (struct drm_ttm_backend * backend,
unsigned long num_pages, struct page ** pages);
void (*clear) (struct drm_ttm_backend * backend);
int (*bind) (struct drm_ttm_backend * backend,
unsigned long offset, int cached);
int (*unbind) (struct drm_ttm_backend * backend);
void (*destroy) (struct drm_ttm_backend * backend);
} drm_ttm_backend_func_t;
typedef struct drm_ttm_backend {
uint32_t flags;
int mem_type;
drm_ttm_backend_func_t *func;
} drm_ttm_backend_t;
typedef struct drm_ttm {
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
unsigned long aper_offset;
atomic_t vma_count;
struct drm_device *dev;
int destroy;
uint32_t mapping_offset;
drm_ttm_backend_t *be;
enum {
ttm_bound,
ttm_evicted,
ttm_unbound,
ttm_unpopulated,
} state;
} drm_ttm_t;
extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size);
extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
extern void drm_ttm_unbind(drm_ttm_t * ttm);
extern void drm_ttm_evict(drm_ttm_t * ttm);
extern void drm_ttm_fixup_caching(drm_ttm_t * ttm);
extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
* when the last vma exits.
*/
extern int drm_destroy_ttm(drm_ttm_t * ttm);
#define DRM_FLAG_MASKED(_old, _new, _mask) {\
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
}
#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
/*
* Page flags.
*/
#define DRM_TTM_PAGE_UNCACHED 0x01
#define DRM_TTM_PAGE_USED 0x02
#define DRM_TTM_PAGE_BOUND 0x04
#define DRM_TTM_PAGE_PRESENT 0x08
#define DRM_TTM_PAGE_VMALLOC 0x10
/***************************************************
* Buffer objects. (drm_bo.c, drm_bo_move.c)
*/
typedef struct drm_bo_mem_reg {
drm_mm_node_t *mm_node;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
uint32_t flags;
uint32_t mask;
} drm_bo_mem_reg_t;
typedef struct drm_buffer_object {
struct drm_device *dev;
drm_user_object_t base;
/*
* If there is a possibility that the usage variable is zero,
* then dev->struct_mutext should be locked before incrementing it.
*/
atomic_t usage;
unsigned long buffer_start;
drm_bo_type_t type;
unsigned long offset;
atomic_t mapped;
drm_bo_mem_reg_t mem;
struct list_head lru;
struct list_head ddestroy;
uint32_t fence_type;
uint32_t fence_class;
drm_fence_object_t *fence;
uint32_t priv_flags;
wait_queue_head_t event_queue;
struct mutex mutex;
/* For pinned buffers */
drm_mm_node_t *pinned_node;
uint32_t pinned_mem_type;
struct list_head pinned_lru;
/* For vm */
drm_ttm_t *ttm;
drm_map_list_t map_list;
uint32_t memory_type;
unsigned long bus_offset;
uint32_t vm_flags;
void *iomap;
#ifdef DRM_ODD_MM_COMPAT
/* dev->struct_mutex only protected. */
struct list_head vma_list;
struct list_head p_mm_list;
#endif
} drm_buffer_object_t;
#define _DRM_BO_FLAG_UNFENCED 0x00000001
#define _DRM_BO_FLAG_EVICTED 0x00000002
typedef struct drm_mem_type_manager {
int has_type;
int use_type;
drm_mm_t manager;
struct list_head lru;
struct list_head pinned;
uint32_t flags;
uint32_t drm_bus_maptype;
unsigned long io_offset;
unsigned long io_size;
void *io_addr;
} drm_mem_type_manager_t;
#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
before kernel access. */
#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
typedef struct drm_buffer_manager {
struct mutex init_mutex;
struct mutex evict_mutex;
int nice_mode;
int initialized;
drm_file_t *last_to_validate;
drm_mem_type_manager_t man[DRM_BO_MEM_TYPES];
struct list_head unfenced;
struct list_head ddestroy;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
struct work_struct wq;
#else
struct delayed_work wq;
#endif
uint32_t fence_type;
unsigned long cur_pages;
atomic_t count;
} drm_buffer_manager_t;
typedef struct drm_bo_driver {
const uint32_t *mem_type_prio;
const uint32_t *mem_busy_prio;
uint32_t num_mem_type_prio;
uint32_t num_mem_busy_prio;
drm_ttm_backend_t *(*create_ttm_backend_entry)
(struct drm_device * dev);
int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type);
int (*invalidate_caches) (struct drm_device * dev, uint32_t flags);
int (*init_mem_type) (struct drm_device * dev, uint32_t type,
drm_mem_type_manager_t * man);
uint32_t(*evict_mask) (struct drm_buffer_object *bo);
int (*move) (struct drm_buffer_object * bo,
int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
} drm_bo_driver_t;
/*
* buffer objects (drm_bo.c)
*/
extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
extern int drm_bo_driver_finish(struct drm_device *dev);
extern int drm_bo_driver_init(struct drm_device *dev);
extern int drm_bo_pci_offset(struct drm_device *dev,
drm_bo_mem_reg_t * mem,
unsigned long *bus_base,
unsigned long *bus_offset,
unsigned long *bus_size);
extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem);
extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo);
extern int drm_fence_buffer_objects(drm_file_t * priv,
struct list_head *list,
uint32_t fence_flags,
drm_fence_object_t * fence,
drm_fence_object_t ** used_fence);
extern void drm_bo_add_to_lru(drm_buffer_object_t * bo);
extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
int no_wait);
extern int drm_bo_mem_space(drm_buffer_object_t * bo,
drm_bo_mem_reg_t * mem, int no_wait);
extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
int no_wait, int move_unfenced);
/*
* Buffer object memory move helpers.
* drm_bo_move.c
*/
extern int drm_bo_move_ttm(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem);
extern int drm_bo_move_memcpy(drm_buffer_object_t * bo,
int evict,
int no_wait, drm_bo_mem_reg_t * new_mem);
extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
int evict,
int no_wait,
uint32_t fence_class,
uint32_t fence_type,
uint32_t fence_flags,
drm_bo_mem_reg_t * new_mem);
#ifdef CONFIG_DEBUG_MUTEXES
#define DRM_ASSERT_LOCKED(_mutex) \
BUG_ON(!mutex_is_locked(_mutex) || \
((_mutex)->owner != current_thread_info()))
#else
#define DRM_ASSERT_LOCKED(_mutex)
#endif
#endif

View file

@ -56,7 +56,7 @@
drm_device_t *dev = priv->head->dev
/** IRQ handler arguments and return type and values */
#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs
#define DRM_IRQ_ARGS int irq, void *arg
/** backwards compatibility with old irq return values */
#ifndef IRQ_HANDLED
typedef void irqreturn_t;
@ -66,13 +66,8 @@ typedef void irqreturn_t;
/** AGP types */
#if __OS_HAS_AGP
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,70)
#define DRM_AGP_MEM agp_memory
#define DRM_AGP_KERN agp_kern_info
#else
#define DRM_AGP_MEM struct agp_memory
#define DRM_AGP_KERN struct agp_kern_info
#endif
#else
/* define some dummy types for non AGP supporting kernels */
struct no_agp_kern {
@ -98,9 +93,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#define MTRR_TYPE_WRCOMB 1
#endif
/** Task queue handler arguments */
#define DRM_TASKQUEUE_ARGS void *arg
/** For data going into the kernel through the ioctl argument */
#define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \
if ( copy_from_user(&arg1, arg2, arg3) ) \
@ -127,24 +119,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data
/**
* Get the pointer to the SAREA.
*
* Searches the SAREA on the mapping lists and points drm_device::sarea to it.
*/
#define DRM_GETSAREA() \
do { \
drm_map_list_t *entry; \
list_for_each_entry( entry, &dev->maplist->head, head ) { \
if ( entry->map && \
entry->map->type == _DRM_SHM && \
(entry->map->flags & _DRM_CONTAINS_LOCK) ) { \
dev_priv->sarea = entry->map; \
break; \
} \
} \
} while (0)
#define DRM_HZ HZ
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \

View file

@ -51,10 +51,8 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
dma_addr_t maxaddr)
{
drm_dma_handle_t *dmah;
#if 1
unsigned long addr;
size_t sz;
#endif
#ifdef DRM_DEBUG_MEMORY
int area = DRM_MEM_DMA;

View file

@ -75,7 +75,7 @@ static struct drm_proc_list {
#endif
};
#define DRM_PROC_ENTRIES (sizeof(drm_proc_list)/sizeof(drm_proc_list[0]))
#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
/**
* Initialize the DRI proc filesystem for a device.
@ -211,7 +211,6 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
int len = 0;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP,
@ -231,9 +230,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
i = 0;
if (dev->maplist != NULL)
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
map = r_list->map;
if (!map)
continue;
@ -452,19 +449,23 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Object accounting:\n\n");
if (fm->initialized) {
DRM_PROC_PRINT("Number of active fence objects: %d.\n\n",
DRM_PROC_PRINT("Number of active fence objects: %d.\n",
atomic_read(&fm->count));
} else {
DRM_PROC_PRINT("Fence objects are not supported by this driver\n\n");
DRM_PROC_PRINT("Fence objects are not supported by this driver\n");
}
if (bm->initialized) {
DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
atomic_read(&bm->count));
}
DRM_PROC_PRINT("Memory accounting:\n\n");
if (bm->initialized) {
DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
} else {
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n");
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n");
}
drm_query_memctl(&used_mem, &low_mem, &high_mem);
@ -531,7 +532,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
*eof = 0;
DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
for (priv = dev->file_first; priv; priv = priv->next) {
list_for_each_entry(priv, &dev->filelist, lhead) {
DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
priv->minor,
@ -584,7 +585,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
atomic_read(&dev->vma_count),
high_memory, virt_to_phys(high_memory));
for (pt = dev->vmalist; pt; pt = pt->next) {
list_for_each_entry(pt, &dev->vmalist, head) {
if (!(vma = pt->vma))
continue;
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",

1
linux-core/drm_sarea.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/drm_sarea.h

View file

@ -161,6 +161,7 @@ drm_sman_set_manager(drm_sman_t * sman, unsigned int manager,
return 0;
}
EXPORT_SYMBOL(drm_sman_set_manager);
static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman,
unsigned long owner)

View file

@ -47,31 +47,35 @@ MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards");
MODULE_PARM_DESC(debug, "Enable debug output");
module_param_named(cards_limit, drm_cards_limit, int, S_IRUGO);
module_param_named(debug, drm_debug, int, S_IRUGO|S_IWUGO);
module_param_named(cards_limit, drm_cards_limit, int, 0444);
module_param_named(debug, drm_debug, int, 0600);
drm_head_t **drm_heads;
struct drm_sysfs_class *drm_class;
struct proc_dir_entry *drm_proc_root;
drm_cache_t drm_cache =
{ .mm = NULL,
.fence_object = NULL
};
static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
int retcode;
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->tasklet_lock);
spin_lock_init(&dev->lock.spinlock);
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->bm.init_mutex);
mutex_init(&dev->bm.evict_mutex);
idr_init(&dev->drw_idr);
dev->pdev = pdev;
dev->pci_device = pdev->device;
@ -83,28 +87,20 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
dev->irq = pdev->irq;
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
return -ENOMEM;
}
if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
drm_ht_remove(&dev->map_hash);
return -ENOMEM;
}
if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
return -ENOMEM;
}
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
if (dev->maplist == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&dev->maplist->head);
/* the DRM has 6 counters */
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;
@ -249,9 +245,9 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
if ((ret = drm_get_head(dev, &dev->primary)))
goto err_g1;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, dev->primary.minor, pci_pretty_name(dev->pdev));
driver->date, dev->primary.minor);
return 0;

View file

@ -162,7 +162,7 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
memset(s_dev, 0x00, sizeof(*s_dev));
s_dev->dev = MKDEV(DRM_MAJOR, head->minor);
s_dev->class_dev.dev = DRM_PCI_DEV(head->dev->pdev);
s_dev->class_dev.dev = &head->dev->pdev->dev;
s_dev->class_dev.class = &cs->class;
snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor);

View file

@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -11,6 +11,10 @@
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
@ -19,12 +23,10 @@
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
@ -39,62 +41,63 @@ static void drm_ttm_cache_flush(void)
DRM_ERROR("Timed out waiting for drm cache flush.\n");
}
/*
* Use kmalloc if possible. Otherwise fall back to vmalloc.
*/
static void *ttm_alloc(unsigned long size, int type)
static void ttm_alloc_pages(drm_ttm_t * ttm)
{
void *ret = NULL;
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
ttm->pages = NULL;
if (drm_alloc_memctl(size))
return NULL;
return;
if (size <= PAGE_SIZE) {
ret = drm_alloc(size, type);
ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
}
if (!ret) {
ret = vmalloc(size);
if (!ttm->pages) {
ttm->pages = vmalloc_user(size);
if (ttm->pages)
ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
}
if (!ret) {
if (!ttm->pages) {
drm_free_memctl(size);
}
return ret;
}
static void ttm_free(void *pointer, unsigned long size, int type)
static void ttm_free_pages(drm_ttm_t * ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
if ((unsigned long)pointer >= VMALLOC_START &&
(unsigned long)pointer <= VMALLOC_END) {
vfree(pointer);
if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
vfree(ttm->pages);
ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
} else {
drm_free(pointer, size, type);
drm_free(ttm->pages, size, DRM_MEM_TTM);
}
drm_free_memctl(size);
ttm->pages = NULL;
}
/*
* Unmap all vma pages from vmas mapping this ttm.
*/
static int unmap_vma_pages(drm_ttm_t * ttm)
static struct page *drm_ttm_alloc_page(void)
{
drm_device_t *dev = ttm->dev;
loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
struct page *page;
#ifdef DRM_ODD_MM_COMPAT
int ret;
ret = drm_ttm_lock_mm(ttm);
if (ret)
return ret;
if (drm_alloc_memctl(PAGE_SIZE)) {
return NULL;
}
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
drm_free_memctl(PAGE_SIZE);
return NULL;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
SetPageReserved(page);
#endif
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_finish_unmap(ttm);
#endif
return 0;
return page;
}
/*
@ -130,7 +133,7 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached)
if (do_tlbflush)
flush_agp_mappings();
DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached);
DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
return 0;
}
@ -149,21 +152,9 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
if (!ttm)
return 0;
if (atomic_read(&ttm->vma_count) > 0) {
ttm->destroy = 1;
DRM_ERROR("VMAs are still alive. Skipping destruction.\n");
return -EBUSY;
}
DRM_DEBUG("Destroying a ttm\n");
#ifdef DRM_TTM_ODD_COMPAT
BUG_ON(!list_empty(&ttm->vma_list));
BUG_ON(!list_empty(&ttm->p_mm_list));
#endif
be = ttm->be;
if (be) {
be->destroy(be);
be->func->destroy(be);
ttm->be = NULL;
}
@ -188,58 +179,50 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
DRM_ERROR("Erroneous map count. "
"Leaking page mappings.\n");
}
/*
* End debugging.
*/
drm_free_gatt_pages(*cur_page, 0);
__free_page(*cur_page);
drm_free_memctl(PAGE_SIZE);
--bm->cur_pages;
}
}
ttm_free(ttm->pages, ttm->num_pages * sizeof(*ttm->pages),
DRM_MEM_TTM);
ttm->pages = NULL;
ttm_free_pages(ttm);
}
drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
return 0;
}
struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index)
{
struct page *p;
drm_buffer_manager_t *bm = &ttm->dev->bm;
p = ttm->pages[index];
if (!p) {
p = drm_ttm_alloc_page();
if (!p)
return NULL;
ttm->pages[index] = p;
++bm->cur_pages;
}
return p;
}
static int drm_ttm_populate(drm_ttm_t * ttm)
{
struct page *page;
unsigned long i;
drm_buffer_manager_t *bm;
drm_ttm_backend_t *be;
if (ttm->state != ttm_unpopulated)
return 0;
bm = &ttm->dev->bm;
be = ttm->be;
for (i = 0; i < ttm->num_pages; ++i) {
page = ttm->pages[i];
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
page = drm_ttm_get_page(ttm, i);
if (!page)
return -ENOMEM;
}
page = drm_alloc_gatt_pages(0);
if (!page) {
drm_free_memctl(PAGE_SIZE);
return -ENOMEM;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
SetPageReserved(page);
#endif
ttm->pages[i] = page;
++bm->cur_pages;
}
}
be->populate(be, ttm->num_pages, ttm->pages);
be->func->populate(be, ttm->num_pages, ttm->pages);
ttm->state = ttm_unbound;
return 0;
}
@ -248,7 +231,7 @@ static int drm_ttm_populate(drm_ttm_t * ttm)
* Initialize a ttm.
*/
static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size)
{
drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
drm_ttm_t *ttm;
@ -260,11 +243,6 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
if (!ttm)
return NULL;
#ifdef DRM_ODD_MM_COMPAT
INIT_LIST_HEAD(&ttm->p_mm_list);
INIT_LIST_HEAD(&ttm->vma_list);
#endif
ttm->dev = dev;
atomic_set(&ttm->vma_count, 0);
@ -277,14 +255,12 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
* Account also for AGP module memory usage.
*/
ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages),
DRM_MEM_TTM);
ttm_alloc_pages(ttm);
if (!ttm->pages) {
drm_destroy_ttm(ttm);
DRM_ERROR("Failed allocating page table\n");
return NULL;
}
memset(ttm->pages, 0, ttm->num_pages * sizeof(*ttm->pages));
ttm->be = bo_driver->create_ttm_backend_entry(dev);
if (!ttm->be) {
drm_destroy_ttm(ttm);
@ -299,52 +275,37 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
* Unbind a ttm region from the aperture.
*/
int drm_evict_ttm(drm_ttm_t * ttm)
void drm_ttm_evict(drm_ttm_t * ttm)
{
drm_ttm_backend_t *be = ttm->be;
int ret;
switch (ttm->state) {
case ttm_bound:
if (be->needs_ub_cache_adjust(be)) {
ret = unmap_vma_pages(ttm);
if (ret) {
return ret;
}
}
be->unbind(be);
break;
default:
break;
if (ttm->state == ttm_bound) {
ret = be->func->unbind(be);
BUG_ON(ret);
}
ttm->state = ttm_evicted;
return 0;
}
void drm_fixup_ttm_caching(drm_ttm_t * ttm)
void drm_ttm_fixup_caching(drm_ttm_t * ttm)
{
if (ttm->state == ttm_evicted) {
drm_ttm_backend_t *be = ttm->be;
if (be->needs_ub_cache_adjust(be)) {
if (be->func->needs_ub_cache_adjust(be)) {
drm_set_caching(ttm, 0);
}
ttm->state = ttm_unbound;
}
}
int drm_unbind_ttm(drm_ttm_t * ttm)
void drm_ttm_unbind(drm_ttm_t * ttm)
{
int ret = 0;
if (ttm->state == ttm_bound)
ret = drm_evict_ttm(ttm);
drm_ttm_evict(ttm);
if (ret)
return ret;
drm_fixup_ttm_caching(ttm);
return 0;
drm_ttm_fixup_caching(ttm);
}
int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
@ -361,28 +322,15 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
be = ttm->be;
ret = drm_ttm_populate(ttm);
if (ret)
return ret;
if (ttm->state == ttm_unbound && !cached) {
ret = unmap_vma_pages(ttm);
if (ret)
return ret;
if (ttm->state == ttm_unbound && !cached) {
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
}
#ifdef DRM_ODD_MM_COMPAT
else if (ttm->state == ttm_evicted && !cached) {
ret = drm_ttm_lock_mm(ttm);
if (ret)
return ret;
}
#endif
if ((ret = be->bind(be, aper_offset, cached))) {
if ((ret = be->func->bind(be, aper_offset, cached))) {
ttm->state = ttm_evicted;
#ifdef DRM_ODD_MM_COMPAT
if (be->needs_ub_cache_adjust(be))
drm_ttm_unlock_mm(ttm);
#endif
DRM_ERROR("Couldn't bind backend.\n");
return ret;
}
@ -390,130 +338,7 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
ttm->aper_offset = aper_offset;
ttm->state = ttm_bound;
#ifdef DRM_ODD_MM_COMPAT
if (be->needs_ub_cache_adjust(be)) {
ret = drm_ttm_remap_bound(ttm);
if (ret)
return ret;
}
#endif
return 0;
}
/*
* dev->struct_mutex locked.
*/
static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
{
drm_map_list_t *list = &object->map_list;
drm_local_map_t *map;
if (list->user_token)
drm_ht_remove_item(&dev->map_hash, &list->hash);
if (list->file_offset_node) {
drm_mm_put_block(list->file_offset_node);
list->file_offset_node = NULL;
}
map = list->map;
if (map) {
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
if (ttm) {
if (drm_destroy_ttm(ttm) != -EBUSY) {
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
} else {
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
}
drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
}
void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
{
if (atomic_dec_and_test(&to->usage)) {
drm_ttm_object_remove(dev, to);
}
}
void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
{
if (atomic_dec_and_test(&to->usage)) {
mutex_lock(&dev->struct_mutex);
if (atomic_read(&to->usage) == 0)
drm_ttm_object_remove(dev, to);
mutex_unlock(&dev->struct_mutex);
}
}
/*
* Create a ttm and add it to the drm book-keeping.
* dev->struct_mutex locked.
*/
int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
uint32_t flags, drm_ttm_object_t ** ttm_object)
{
drm_ttm_object_t *object;
drm_map_list_t *list;
drm_local_map_t *map;
drm_ttm_t *ttm;
object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
if (!object)
return -ENOMEM;
object->flags = flags;
list = &object->map_list;
list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
if (!list->map) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
map = list->map;
ttm = drm_init_ttm(dev, size);
if (!ttm) {
DRM_ERROR("Could not create ttm\n");
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
map->offset = (unsigned long)ttm;
map->type = _DRM_TTM;
map->flags = _DRM_REMOVABLE;
map->size = ttm->num_pages * PAGE_SIZE;
map->handle = (void *)object;
/*
* Add a one-page "hole" to the block size to avoid the mm subsystem
* merging vmas.
* FIXME: Is this really needed?
*/
list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
ttm->num_pages + 1, 0, 0);
if (!list->file_offset_node) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
ttm->num_pages + 1, 0);
list->hash.key = list->file_offset_node->start;
if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
ttm->mapping_offset = list->hash.key;
atomic_set(&object->usage, 1);
*ttm_object = object;
return 0;
}
EXPORT_SYMBOL(drm_bind_ttm);

View file

@ -1,145 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _DRM_TTM_H
#define _DRM_TTM_H
#define DRM_HAS_TTM
/*
* The backend GART interface. (In our case AGP). Any similar type of device (PCIE?)
* needs only to implement these functions to be usable with the "TTM" interface.
* The AGP backend implementation lives in drm_agpsupport.c
* basically maps these calls to available functions in agpgart. Each drm device driver gets an
* additional function pointer that creates these types,
* so that the device can choose the correct aperture.
* (Multiple AGP apertures, etc.)
* Most device drivers will let this point to the standard AGP implementation.
*/
#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
#define DRM_BE_FLAG_CBA 0x00000004
typedef struct drm_ttm_backend {
unsigned long aperture_base;
void *private;
uint32_t flags;
uint32_t drm_map_type;
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
int (*populate) (struct drm_ttm_backend * backend,
unsigned long num_pages, struct page ** pages);
void (*clear) (struct drm_ttm_backend * backend);
int (*bind) (struct drm_ttm_backend * backend,
unsigned long offset, int cached);
int (*unbind) (struct drm_ttm_backend * backend);
void (*destroy) (struct drm_ttm_backend * backend);
} drm_ttm_backend_t;
typedef struct drm_ttm {
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
unsigned long aper_offset;
atomic_t vma_count;
struct drm_device *dev;
int destroy;
uint32_t mapping_offset;
drm_ttm_backend_t *be;
enum {
ttm_bound,
ttm_evicted,
ttm_unbound,
ttm_unpopulated,
} state;
#ifdef DRM_ODD_MM_COMPAT
struct list_head vma_list;
struct list_head p_mm_list;
#endif
} drm_ttm_t;
typedef struct drm_ttm_object {
atomic_t usage;
uint32_t flags;
drm_map_list_t map_list;
} drm_ttm_object_t;
extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
uint32_t flags,
drm_ttm_object_t ** ttm_object);
extern void drm_ttm_object_deref_locked(struct drm_device *dev,
drm_ttm_object_t * to);
extern void drm_ttm_object_deref_unlocked(struct drm_device *dev,
drm_ttm_object_t * to);
extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
uint32_t handle,
int check_owner);
extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
extern int drm_unbind_ttm(drm_ttm_t * ttm);
/*
* Evict a ttm region. Keeps Aperture caching policy.
*/
extern int drm_evict_ttm(drm_ttm_t * ttm);
extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
* which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
* when the last vma exits.
*/
extern int drm_destroy_ttm(drm_ttm_t * ttm);
extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to)
{
return (drm_ttm_t *) to->map_list.map->offset;
}
#define DRM_MASK_VAL(dest, mask, val) \
(dest) = ((dest) & ~(mask)) | ((val) & (mask));
#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
/*
* Page flags.
*/
#define DRM_TTM_PAGE_UNCACHED 0x01
#define DRM_TTM_PAGE_USED 0x02
#define DRM_TTM_PAGE_BOUND 0x04
#define DRM_TTM_PAGE_PRESENT 0x08
#endif

View file

@ -41,9 +41,9 @@
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
static void drm_vm_ttm_close(struct vm_area_struct *vma);
static int drm_vm_ttm_open(struct vm_area_struct *vma);
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma);
static int drm_bo_mmap_locked(struct vm_area_struct *vma,
struct file *filp,
drm_local_map_t *map);
pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
@ -122,7 +122,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
/*
* It's AGP memory - find the real physical page to map
*/
for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
list_for_each_entry(agpmem, &dev->agp->memory, head) {
if (agpmem->bound <= baddr &&
agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
break;
@ -159,95 +159,6 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
}
#endif /* __OS_HAS_AGP */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) || \
LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
static
#endif
struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
struct fault_data *data)
{
unsigned long address = data->address;
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
drm_buffer_manager_t *bm;
drm_device_t *dev;
unsigned long pfn;
int err;
pgprot_t pgprot;
if (!map) {
data->type = VM_FAULT_OOM;
return NULL;
}
if (address > vma->vm_end) {
data->type = VM_FAULT_SIGBUS;
return NULL;
}
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
/*
* Perhaps retry here?
*/
mutex_lock(&dev->struct_mutex);
drm_fixup_ttm_caching(ttm);
bm = &dev->bm;
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
data->type = VM_FAULT_OOM;
goto out;
}
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
if (!page) {
drm_free_memctl(PAGE_SIZE);
data->type = VM_FAULT_OOM;
goto out;
}
++bm->cur_pages;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
SetPageReserved(page);
#endif
}
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
/*
* FIXME: Check can't map aperture flag.
*/
pfn = ttm->aper_offset + page_offset +
(ttm->be->aperture_base >> PAGE_SHIFT);
pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
} else {
pfn = page_to_pfn(page);
pgprot = vma->vm_page_prot;
}
err = vm_insert_pfn(vma, address, pfn, pgprot);
if (!err || err == -EBUSY)
data->type = VM_FAULT_MINOR;
else
data->type = VM_FAULT_OOM;
out:
mutex_unlock(&dev->struct_mutex);
return NULL;
}
#endif
/**
* \c nopage method for shared virtual memory.
*
@ -269,13 +180,13 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!map)
return NOPAGE_OOM; /* Nothing allocated */
return NOPAGE_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start;
i = (unsigned long)map->handle + offset;
page = vmalloc_to_page((void *)i);
if (!page)
return NOPAGE_OOM;
return NOPAGE_SIGBUS;
get_page(page);
DRM_DEBUG("shm_nopage 0x%lx\n", address);
@ -294,10 +205,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
drm_vma_entry_t *pt, *prev, *next;
drm_vma_entry_t *pt, *temp;
drm_map_t *map;
drm_map_list_t *r_list;
struct list_head *list;
int found_maps = 0;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@ -307,19 +217,12 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
map = vma->vm_private_data;
mutex_lock(&dev->struct_mutex);
for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
next = pt->next;
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma->vm_private_data == map)
found_maps++;
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
} else {
dev->vmalist = pt->next;
}
list_del(&pt->head);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
} else {
prev = pt;
}
}
/* We were the only map that was found */
@ -328,9 +231,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
* we delete this mappings information.
*/
found_maps = 0;
list = &dev->maplist->head;
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map == map)
found_maps++;
}
@ -348,7 +249,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
drm_ioremapfree(map->handle, map->size, dev);
iounmap(map->handle);
break;
case _DRM_SHM:
vfree(map->handle);
@ -396,7 +297,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!dma->pagelist)
return NOPAGE_OOM; /* Nothing allocated */
return NOPAGE_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
page_nr = offset >> PAGE_SHIFT;
@ -435,7 +336,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
if (!entry->pagelist)
return NOPAGE_OOM; /* Nothing allocated */
return NOPAGE_SIGBUS; /* Nothing allocated */
offset = address - vma->vm_start;
map_offset = map->offset - (unsigned long)dev->sg->virtual;
@ -446,8 +347,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
return page;
}
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
@ -481,34 +380,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
}
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address, int unused)
{
return drm_do_vm_nopage(vma, address);
}
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address, int unused)
{
return drm_do_vm_shm_nopage(vma, address);
}
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address, int unused)
{
return drm_do_vm_dma_nopage(vma, address);
}
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address, int unused)
{
return drm_do_vm_sg_nopage(vma, address);
}
#endif
/** AGP virtual memory operations */
static struct vm_operations_struct drm_vm_ops = {
.nopage = drm_vm_nopage,
@ -537,20 +408,6 @@ static struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close,
};
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
static struct vm_operations_struct drm_vm_ttm_ops = {
.nopage = drm_vm_ttm_nopage,
.open = drm_vm_ttm_open_wrapper,
.close = drm_vm_ttm_close,
};
#else
static struct vm_operations_struct drm_vm_ttm_ops = {
.fault = drm_vm_ttm_fault,
.open = drm_vm_ttm_open_wrapper,
.close = drm_vm_ttm_close,
};
#endif
/**
* \c open method for shared virtual memory.
*
@ -559,7 +416,7 @@ static struct vm_operations_struct drm_vm_ttm_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
static void drm_vm_open(struct vm_area_struct *vma)
static void drm_vm_open_locked(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
@ -571,36 +428,20 @@ static void drm_vm_open(struct vm_area_struct *vma)
vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
mutex_lock(&dev->struct_mutex);
vma_entry->vma = vma;
vma_entry->next = dev->vmalist;
vma_entry->pid = current->pid;
dev->vmalist = vma_entry;
mutex_unlock(&dev->struct_mutex);
list_add(&vma_entry->head, &dev->vmalist);
}
}
static int drm_vm_ttm_open(struct vm_area_struct *vma) {
drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
drm_ttm_t *ttm;
static void drm_vm_open(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
drm_vm_open(vma);
mutex_lock(&dev->struct_mutex);
ttm = (drm_ttm_t *) map->offset;
atomic_inc(&ttm->vma_count);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_add_vma(ttm, vma);
#endif
drm_vm_open_locked(vma);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma)
{
drm_vm_ttm_open(vma);
}
/**
@ -615,20 +456,16 @@ static void drm_vm_close(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
drm_vma_entry_t *pt, *prev;
drm_vma_entry_t *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_dec(&dev->vma_count);
mutex_lock(&dev->struct_mutex);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
if (prev) {
prev->next = pt->next;
} else {
dev->vmalist = pt->next;
}
list_del(&pt->head);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
break;
}
@ -637,34 +474,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
}
static void drm_vm_ttm_close(struct vm_area_struct *vma)
{
drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
drm_ttm_t *ttm;
drm_device_t *dev;
int ret;
drm_vm_close(vma);
if (map) {
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
mutex_lock(&dev->struct_mutex);
#ifdef DRM_ODD_MM_COMPAT
drm_ttm_delete_vma(ttm, vma);
#endif
if (atomic_dec_and_test(&ttm->vma_count)) {
if (ttm->destroy) {
ret = drm_destroy_ttm(ttm);
BUG_ON(ret);
drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
}
mutex_unlock(&dev->struct_mutex);
}
return;
}
/**
* mmap DMA memory.
*
@ -682,7 +491,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
drm_device_dma_t *dma;
unsigned long length = vma->vm_end - vma->vm_start;
lock_kernel();
dev = priv->head->dev;
dma = dev->dma;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
@ -690,13 +498,10 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
/* Length must match exact page count */
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
unlock_kernel();
return -EINVAL;
}
unlock_kernel();
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) {
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
@ -712,15 +517,10 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
}
vma->vm_ops = &drm_vm_dma_ops;
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma);
drm_vm_open_locked(vma);
return 0;
}
@ -753,7 +553,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
* according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open().
*/
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
@ -771,6 +571,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
* the AGP mapped at physical address 0
* --BenH.
*/
if (!vma->vm_pgoff
#if __OS_HAS_AGP
&& (!dev->agp
@ -779,13 +580,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
)
return drm_mmap_dma(filp, vma);
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff , &hash)) {
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
DRM_ERROR("Could not find map\n");
return -EINVAL;
}
map = drm_hash_entry(hash,drm_map_list_t, hash)->map;
map = drm_hash_entry(hash, drm_map_list_t, hash)->map;
if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
@ -829,16 +629,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
#ifdef __sparc__
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >>PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
#else
if (remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
#endif
return -EAGAIN;
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%lx\n",
@ -859,46 +655,234 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = (void *)map;
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_flags |= VM_RESERVED;
#endif
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops;
vma->vm_private_data = (void *)map;
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_flags |= VM_RESERVED;
#endif
break;
case _DRM_TTM: {
vma->vm_ops = &drm_vm_ttm_ops;
vma->vm_private_data = (void *) map;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
#ifdef DRM_ODD_MM_COMPAT
mutex_lock(&dev->struct_mutex);
drm_ttm_map_bound(vma);
mutex_unlock(&dev->struct_mutex);
#endif
if (drm_vm_ttm_open(vma))
return -EAGAIN;
return 0;
}
case _DRM_TTM:
return drm_bo_mmap_locked(vma, filp, map);
default:
return -EINVAL; /* This should never happen. */
}
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma);
drm_vm_open_locked(vma);
return 0;
}
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_mmap_locked(filp, vma);
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_mmap);
/**
* buffer object vm functions.
*/
/**
* \c Pagefault method for buffer objects.
*
* \param vma Virtual memory area.
* \param address File offset.
* \return Error or refault. The pfn is manually inserted.
*
* It's important that pfns are inserted while holding the bo->mutex lock.
* otherwise we might race with unmap_mapping_range() which is always
* called with the bo->mutex lock held.
*
* We're modifying the page attribute bits of the vma->vm_page_prot field,
* without holding the mmap_sem in write mode. Only in read mode.
* These bits are not used by the mm subsystem code, and we consider them
* protected by the bo->mutex lock.
*/
#ifdef DRM_FULL_MM_COMPAT
static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
unsigned long address)
{
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
drm_ttm_t *ttm;
drm_device_t *dev;
unsigned long pfn;
int err;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
unsigned long ret = NOPFN_REFAULT;
if (address > vma->vm_end)
return NOPFN_SIGBUS;
err = mutex_lock_interruptible(&bo->mutex);
if (err)
return NOPFN_REFAULT;
err = drm_bo_wait(bo, 0, 0, 0);
if (err) {
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
goto out_unlock;
}
/*
* If buffer happens to be in a non-mappable location,
* move it to a mappable.
*/
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
uint32_t new_mask = bo->mem.mask |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
if (err) {
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
goto out_unlock;
}
}
dev = bo->dev;
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
&bus_size);
if (err) {
ret = NOPFN_SIGBUS;
goto out_unlock;
}
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
if (bus_size) {
drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
} else {
ttm = bo->ttm;
drm_ttm_fixup_caching(ttm);
page = drm_ttm_get_page(ttm, page_offset);
if (!page) {
ret = NOPFN_OOM;
goto out_unlock;
}
pfn = page_to_pfn(page);
vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
drm_io_prot(_DRM_TTM, vma);
}
err = vm_insert_pfn(vma, address, pfn);
if (err) {
ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
goto out_unlock;
}
out_unlock:
mutex_unlock(&bo->mutex);
return ret;
}
#endif
static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
{
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
drm_vm_open_locked(vma);
atomic_inc(&bo->usage);
#ifdef DRM_ODD_MM_COMPAT
drm_bo_add_vma(bo, vma);
#endif
}
/**
* \c vma open method for buffer objects.
*
* \param vma virtual memory area.
*/
static void drm_bo_vm_open(struct vm_area_struct *vma)
{
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
drm_device_t *dev = bo->dev;
mutex_lock(&dev->struct_mutex);
drm_bo_vm_open_locked(vma);
mutex_unlock(&dev->struct_mutex);
}
/**
* \c vma close method for buffer objects.
*
* \param vma virtual memory area.
*/
static void drm_bo_vm_close(struct vm_area_struct *vma)
{
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
drm_device_t *dev = bo->dev;
drm_vm_close(vma);
if (bo) {
mutex_lock(&dev->struct_mutex);
#ifdef DRM_ODD_MM_COMPAT
drm_bo_delete_vma(bo, vma);
#endif
drm_bo_usage_deref_locked(bo);
mutex_unlock(&dev->struct_mutex);
}
return;
}
static struct vm_operations_struct drm_bo_vm_ops = {
#ifdef DRM_FULL_MM_COMPAT
.nopfn = drm_bo_vm_nopfn,
#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
.nopfn = drm_bo_vm_nopfn,
#else
.nopage = drm_bo_vm_nopage,
#endif
#endif
.open = drm_bo_vm_open,
.close = drm_bo_vm_close,
};
/**
* mmap buffer object memory.
*
* \param vma virtual memory area.
* \param filp file pointer.
* \param map The buffer object drm map.
* \return zero on success or a negative number on failure.
*/
int drm_bo_mmap_locked(struct vm_area_struct *vma,
struct file *filp,
drm_local_map_t *map)
{
vma->vm_ops = &drm_bo_vm_ops;
vma->vm_private_data = map->handle;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
vma->vm_flags |= VM_PFNMAP;
#endif
drm_bo_vm_open_locked(vma);
#ifdef DRM_ODD_MM_COMPAT
drm_bo_map_bound(vma);
#endif
return 0;
}

View file

@ -39,12 +39,6 @@
#include "i810_drm.h"
#include "i810_drv.h"
#ifdef DO_MUNMAP_4_ARGS
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1)
#else
#define DO_MUNMAP(m, a, l) do_munmap(m, a, l)
#endif
#define I810_BUF_FREE 2
#define I810_BUF_CLIENT 1
#define I810_BUF_HARDWARE 0
@ -131,13 +125,13 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
unlock_kernel();
if (io_remap_pfn_range(vma, vma->vm_start,
VM_OFFSET(vma) >> PAGE_SHIFT,
vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
static struct file_operations i810_buffer_fops = {
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
@ -186,7 +180,7 @@ static int i810_unmap_buffer(drm_buf_t * buf)
return -EINVAL;
down_write(&current->mm->mmap_sem);
retcode = DO_MUNMAP(current->mm,
retcode = do_munmap(current->mm,
(unsigned long)buf_priv->virtual,
(size_t) buf->total);
up_write(&current->mm->mmap_sem);
@ -244,8 +238,7 @@ static int i810_dma_cleanup(drm_device_t * dev)
(drm_i810_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start) {
drm_ioremapfree((void *)dev_priv->ring.virtual_start,
dev_priv->ring.Size, dev);
drm_core_ioremapfree(&dev_priv->ring.map, dev);
}
if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE,
@ -261,9 +254,9 @@ static int i810_dma_cleanup(drm_device_t * dev)
for (i = 0; i < dma->buf_count; i++) {
drm_buf_t *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if (buf_priv->kernel_virtual && buf->total)
drm_ioremapfree(buf_priv->kernel_virtual,
buf->total, dev);
drm_core_ioremapfree(&buf_priv->map, dev);
}
}
return 0;
@ -336,8 +329,15 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
*buf_priv->in_use = I810_BUF_FREE;
buf_priv->kernel_virtual = drm_ioremap(buf->bus_address,
buf->total, dev);
buf_priv->map.offset = buf->bus_address;
buf_priv->map.size = buf->total;
buf_priv->map.type = _DRM_AGP;
buf_priv->map.flags = 0;
buf_priv->map.mtrr = 0;
drm_core_ioremap(&buf_priv->map, dev);
buf_priv->kernel_virtual = buf_priv->map.handle;
}
return 0;
}
@ -346,12 +346,10 @@ static int i810_dma_initialize(drm_device_t * dev,
drm_i810_private_t * dev_priv,
drm_i810_init_t * init)
{
struct list_head *list;
drm_map_list_t *r_list;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK) {
@ -388,18 +386,24 @@ static int i810_dma_initialize(drm_device_t * dev,
dev_priv->ring.End = init->ring_end;
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base +
init->ring_start,
init->ring_size, dev);
dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
dev_priv->ring.map.size = init->ring_size;
dev_priv->ring.map.type = _DRM_AGP;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
if (dev_priv->ring.virtual_start == NULL) {
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
return DRM_ERR(ENOMEM);
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
dev_priv->w = init->w;

View file

@ -61,6 +61,7 @@ typedef struct drm_i810_buf_priv {
int currently_mapped;
void *virtual;
void *kernel_virtual;
drm_local_map_t map;
} drm_i810_buf_priv_t;
typedef struct _drm_i810_ring_buffer {
@ -72,6 +73,7 @@ typedef struct _drm_i810_ring_buffer {
int head;
int tail;
int space;
drm_local_map_t map;
} drm_i810_ring_buffer_t;
typedef struct drm_i810_private {

File diff suppressed because it is too large Load diff

View file

@ -1,342 +0,0 @@
#ifndef _I830_DRM_H_
#define _I830_DRM_H_
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
*
* KW: Actually, you can't ever change them because doing so would
* break backwards compatibility.
*/
#ifndef _I830_DEFINES_
#define _I830_DEFINES_
#define I830_DMA_BUF_ORDER 12
#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
#define I830_DMA_BUF_NR 256
#define I830_NR_SAREA_CLIPRECTS 8
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
#define I830_NR_TEX_REGIONS 64
#define I830_LOG_MIN_TEX_REGION_SIZE 16
/* KW: These aren't correct but someone set them to two and then
* released the module. Now we can't change them as doing so would
* break backwards compatibility.
*/
#define I830_TEXTURE_COUNT 2
#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT
#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
#define I830_UPLOAD_CTX 0x1
#define I830_UPLOAD_BUFFERS 0x2
#define I830_UPLOAD_CLIPRECTS 0x4
#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
#define I830_UPLOAD_TEX0 0x10000
#define I830_UPLOAD_TEX1 0x20000
#define I830_UPLOAD_TEX2 0x40000
#define I830_UPLOAD_TEX3 0x80000
#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
#define I830_UPLOAD_TEX_MASK 0xf0000
#define I830_UPLOAD_TEXBLEND0 0x100000
#define I830_UPLOAD_TEXBLEND1 0x200000
#define I830_UPLOAD_TEXBLEND2 0x400000
#define I830_UPLOAD_TEXBLEND3 0x800000
#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n))
#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
#define I830_UPLOAD_STIPPLE 0x8000000
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
* or in a piecewise fashion as required.
*/
/* Destbuffer state
* - backbuffer linear offset and pitch -- invarient in the current dri
* - zbuffer linear offset and pitch -- also invarient
* - drawing origin in back and depth buffers.
*
* Keep the depth/back buffer state here to accommodate private buffers
* in the future.
*/
#define I830_DESTREG_CBUFADDR 0
#define I830_DESTREG_DBUFADDR 1
#define I830_DESTREG_DV0 2
#define I830_DESTREG_DV1 3
#define I830_DESTREG_SENABLE 4
#define I830_DESTREG_SR0 5
#define I830_DESTREG_SR1 6
#define I830_DESTREG_SR2 7
#define I830_DESTREG_DR0 8
#define I830_DESTREG_DR1 9
#define I830_DESTREG_DR2 10
#define I830_DESTREG_DR3 11
#define I830_DESTREG_DR4 12
#define I830_DEST_SETUP_SIZE 13
/* Context state
*/
#define I830_CTXREG_STATE1 0
#define I830_CTXREG_STATE2 1
#define I830_CTXREG_STATE3 2
#define I830_CTXREG_STATE4 3
#define I830_CTXREG_STATE5 4
#define I830_CTXREG_IALPHAB 5
#define I830_CTXREG_STENCILTST 6
#define I830_CTXREG_ENABLES_1 7
#define I830_CTXREG_ENABLES_2 8
#define I830_CTXREG_AA 9
#define I830_CTXREG_FOGCOLOR 10
#define I830_CTXREG_BLENDCOLR0 11
#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
#define I830_CTXREG_VF 13
#define I830_CTXREG_VF2 14
#define I830_CTXREG_MCSB0 15
#define I830_CTXREG_MCSB1 16
#define I830_CTX_SETUP_SIZE 17
/* 1.3: Stipple state
*/
#define I830_STPREG_ST0 0
#define I830_STPREG_ST1 1
#define I830_STP_SETUP_SIZE 2
/* Texture state (per tex unit)
*/
#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */
#define I830_TEXREG_MI1 1
#define I830_TEXREG_MI2 2
#define I830_TEXREG_MI3 3
#define I830_TEXREG_MI4 4
#define I830_TEXREG_MI5 5
#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */
#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */
#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */
#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
#define I830_TEX_SETUP_SIZE 10
#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
#define I830_TEXREG_TM0S0 1
#define I830_TEXREG_TM0S1 2
#define I830_TEXREG_TM0S2 3
#define I830_TEXREG_TM0S3 4
#define I830_TEXREG_TM0S4 5
#define I830_TEXREG_NOP0 6 /* noop */
#define I830_TEXREG_NOP1 7 /* noop */
#define I830_TEXREG_NOP2 8 /* noop */
#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */
#define __I830_TEX_SETUP_SIZE 10
#define I830_FRONT 0x1
#define I830_BACK 0x2
#define I830_DEPTH 0x4
#endif /* _I830_DEFINES_ */
typedef struct _drm_i830_init {
enum {
I830_INIT_DMA = 0x01,
I830_CLEANUP_DMA = 0x02
} func;
unsigned int mmio_offset;
unsigned int buffers_offset;
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;
unsigned int ring_size;
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
unsigned int w;
unsigned int h;
unsigned int pitch;
unsigned int pitch_bits;
unsigned int back_pitch;
unsigned int depth_pitch;
unsigned int cpp;
} drm_i830_init_t;
/* Warning: If you change the SAREA structure you must change the Xserver
* structure as well */
typedef struct _drm_i830_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char in_use; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_i830_tex_region_t;
typedef struct _drm_i830_sarea {
unsigned int ContextState[I830_CTX_SETUP_SIZE];
unsigned int BufferState[I830_DEST_SETUP_SIZE];
unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
unsigned int Palette[2][256];
unsigned int dirty;
unsigned int nbox;
drm_clip_rect_t boxes[I830_NR_SAREA_CLIPRECTS];
/* Maintain an LRU of contiguous regions of texture space. If
* you think you own a region of texture memory, and it has an
* age different to the one you set, then you are mistaken and
* it has been stolen by another client. If global texAge
* hasn't changed, there is no need to walk the list.
*
* These regions can be used as a proxy for the fine-grained
* texture information of other clients - by maintaining them
* in the same lru which is used to age their own textures,
* clients have an approximate lru for the whole of global
* texture space, and can make informed decisions as to which
* areas to kick out. There is no need to choose whether to
* kick out your own texture or someone else's - simply eject
* them all in LRU order.
*/
drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS + 1];
/* Last elt is sentinal */
int texAge; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
int ctxOwner; /* last context to upload state */
int vertex_prim;
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
int pf_current_page; /* which buffer is being displayed? */
int perf_boxes; /* performance boxes to be displayed */
/* Here's the state for texunits 2,3:
*/
unsigned int TexState2[I830_TEX_SETUP_SIZE];
unsigned int TexBlendState2[I830_TEXBLEND_SIZE];
unsigned int TexBlendStateWordsUsed2;
unsigned int TexState3[I830_TEX_SETUP_SIZE];
unsigned int TexBlendState3[I830_TEXBLEND_SIZE];
unsigned int TexBlendStateWordsUsed3;
unsigned int StippleState[I830_STP_SETUP_SIZE];
} drm_i830_sarea_t;
/* Flags for perf_boxes
*/
#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
#define I830_BOX_FLIP 0x2 /* populated by kernel */
#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
/* I830 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_I830_INIT 0x00
#define DRM_I830_VERTEX 0x01
#define DRM_I830_CLEAR 0x02
#define DRM_I830_FLUSH 0x03
#define DRM_I830_GETAGE 0x04
#define DRM_I830_GETBUF 0x05
#define DRM_I830_SWAP 0x06
#define DRM_I830_COPY 0x07
#define DRM_I830_DOCOPY 0x08
#define DRM_I830_FLIP 0x09
#define DRM_I830_IRQ_EMIT 0x0a
#define DRM_I830_IRQ_WAIT 0x0b
#define DRM_I830_GETPARAM 0x0c
#define DRM_I830_SETPARAM 0x0d
#define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t)
#define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t)
#define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t)
#define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH)
#define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE)
#define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t)
#define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP)
#define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t)
#define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY)
#define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP)
#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t)
#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t)
#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t)
#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t)
typedef struct _drm_i830_clear {
int clear_color;
int clear_depth;
int flags;
unsigned int clear_colormask;
unsigned int clear_depthmask;
} drm_i830_clear_t;
/* These may be placeholders if we have more cliprects than
* I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
* false, indicating that the buffer will be dispatched again with a
* new set of cliprects.
*/
typedef struct _drm_i830_vertex {
int idx; /* buffer index */
int used; /* nr bytes in use */
int discard; /* client is finished with the buffer? */
} drm_i830_vertex_t;
typedef struct _drm_i830_copy_t {
int idx; /* buffer index */
int used; /* nr bytes in use */
void __user *address; /* Address to copy from */
} drm_i830_copy_t;
typedef struct drm_i830_dma {
void __user *virtual;
int request_idx;
int request_size;
int granted;
} drm_i830_dma_t;
/* 1.3: Userspace can request & wait on irq's:
*/
typedef struct drm_i830_irq_emit {
int __user *irq_seq;
} drm_i830_irq_emit_t;
typedef struct drm_i830_irq_wait {
int irq_seq;
} drm_i830_irq_wait_t;
/* 1.3: New ioctl to query kernel params:
*/
#define I830_PARAM_IRQ_ACTIVE 1
typedef struct drm_i830_getparam {
int param;
int __user *value;
} drm_i830_getparam_t;
/* 1.3: New ioctl to set kernel params:
*/
#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
typedef struct drm_i830_setparam {
int param;
int value;
} drm_i830_setparam_t;
#endif /* _I830_DRM_H_ */

View file

@ -1,290 +0,0 @@
/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
*
*/
#ifndef _I830_DRV_H_
#define _I830_DRV_H_
/* General customization:
*/
#define DRIVER_AUTHOR "VA Linux Systems Inc."
#define DRIVER_NAME "i830"
#define DRIVER_DESC "Intel 830M"
#define DRIVER_DATE "20021108"
/* Interface history:
*
* 1.1: Original.
* 1.2: ?
* 1.3: New irq emit/wait ioctls.
* New pageflip ioctl.
* New getparam ioctl.
* State for texunits 3&4 in sarea.
* New (alternative) layout for texture state.
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 3
#define DRIVER_PATCHLEVEL 2
/* Driver will work either way: IRQ's save cpu time when waiting for
* the card, but are subject to subtle interactions between bios,
* hardware and the driver.
*/
/* XXX: Add vblank support? */
#define USE_IRQS 0
typedef struct drm_i830_buf_priv {
u32 *in_use;
int my_use_idx;
int currently_mapped;
void __user *virtual;
void *kernel_virtual;
} drm_i830_buf_priv_t;
typedef struct _drm_i830_ring_buffer {
int tail_mask;
unsigned long Start;
unsigned long End;
unsigned long Size;
u8 *virtual_start;
int head;
int tail;
int space;
} drm_i830_ring_buffer_t;
typedef struct drm_i830_private {
drm_map_t *sarea_map;
drm_map_t *mmio_map;
drm_i830_sarea_t *sarea_priv;
drm_i830_ring_buffer_t ring;
void *hw_status_page;
unsigned long counter;
dma_addr_t dma_status_page;
drm_buf_t *mmap_buffer;
u32 front_di1, back_di1, zi1;
int back_offset;
int depth_offset;
int front_offset;
int w, h;
int pitch;
int back_pitch;
int depth_pitch;
unsigned int cpp;
int do_boxes;
int dma_used;
int current_page;
int page_flipping;
wait_queue_head_t irq_queue;
atomic_t irq_received;
atomic_t irq_emitted;
int use_mi_batchbuffer_start;
} drm_i830_private_t;
extern drm_ioctl_desc_t i830_ioctls[];
extern int i830_max_ioctl;
/* i830_irq.c */
extern int i830_irq_emit(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int i830_irq_wait(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
extern void i830_driver_irq_preinstall(drm_device_t * dev);
extern void i830_driver_irq_postinstall(drm_device_t * dev);
extern void i830_driver_irq_uninstall(drm_device_t * dev);
extern int i830_driver_load(struct drm_device *, unsigned long flags);
extern void i830_driver_preclose(drm_device_t * dev, DRMFILE filp);
extern void i830_driver_lastclose(drm_device_t * dev);
extern void i830_driver_reclaim_buffers_locked(drm_device_t * dev,
struct file *filp);
extern int i830_driver_dma_quiescent(drm_device_t * dev);
extern int i830_driver_device_is_agp(drm_device_t * dev);
#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
#define I830_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
#define I830_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
#define I830_VERBOSE 0
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt;
#define BEGIN_LP_RING(n) do { \
if (I830_VERBOSE) \
printk("BEGIN_LP_RING(%d) in %s\n", \
n, __FUNCTION__); \
if (dev_priv->ring.space < n*4) \
i830_wait_ring(dev, n*4, __FUNCTION__); \
outcount = 0; \
outring = dev_priv->ring.tail; \
ringmask = dev_priv->ring.tail_mask; \
virt = dev_priv->ring.virtual_start; \
} while (0)
#define OUT_RING(n) do { \
if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outcount++; \
outring += 4; \
outring &= ringmask; \
} while (0)
#define ADVANCE_LP_RING() do { \
if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \
dev_priv->ring.tail = outring; \
dev_priv->ring.space -= outcount * 4; \
I830_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
extern int i830_wait_ring(drm_device_t * dev, int n, const char *caller);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
#define LOAD_TEXTURE_MAP0 (1<<11)
#define INST_PARSER_CLIENT 0x00000000
#define INST_OP_FLUSH 0x02000000
#define INST_FLUSH_MAP_CACHE 0x00000001
#define BB1_START_ADDR_MASK (~0x7)
#define BB1_PROTECTED (1<<0)
#define BB1_UNPROTECTED (0<<0)
#define BB2_END_ADDR_MASK (~0x7)
#define I830REG_HWSTAM 0x02098
#define I830REG_INT_IDENTITY_R 0x020a4
#define I830REG_INT_MASK_R 0x020a8
#define I830REG_INT_ENABLE_R 0x020a0
#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
#define LP_RING 0x2030
#define HP_RING 0x2040
#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
#define RING_HEAD 0x04
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
#define RING_START 0x08
#define START_ADDR 0x0xFFFFF000
#define RING_LEN 0x0C
#define RING_NR_PAGES 0x001FF000
#define RING_REPORT_MASK 0x00000006
#define RING_REPORT_64K 0x00000002
#define RING_REPORT_128K 0x00000004
#define RING_NO_REPORT 0x00000000
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
#define SC_ENABLE_MASK (0x1<<0)
#define SC_ENABLE (0x1<<0)
#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
#define SCI_YMIN_MASK (0xffff<<16)
#define SCI_XMIN_MASK (0xffff<<0)
#define SCI_YMAX_MASK (0xffff<<16)
#define SCI_XMAX_MASK (0xffff<<0)
#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
#define ASYNC_FLIP (1<<22)
#define CMD_3D (0x3<<29)
#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16))
#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
#define BR00_BITBLT_CLIENT 0x40000000
#define BR00_OP_COLOR_BLT 0x10000000
#define BR00_OP_SRC_COPY_BLT 0x10C00000
#define BR13_SOLID_PATTERN 0x80000000
#define BUF_3D_ID_COLOR_BACK (0x3<<24)
#define BUF_3D_ID_DEPTH (0x7<<24)
#define BUF_3D_USE_FENCE (1<<23)
#define BUF_3D_PITCH(x) (((x)/4)<<2)
#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
#define MAP_PALETTE_BOTH (1<<11)
#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
#define XY_COLOR_BLT_WRITE_RGB (1<<20)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
#define MI_BATCH_BUFFER ((0x30<<23)|1)
#define MI_BATCH_BUFFER_START (0x31<<23)
#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1)
#define MI_WAIT_FOR_EVENT ((0x3<<23))
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
#endif

View file

@ -1,198 +0,0 @@
/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
*
* Copyright 2002 Tungsten Graphics, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Keith Whitwell <keith@tungstengraphics.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"
#include "i830_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *) arg;
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
u16 temp;
temp = I830_READ16(I830REG_INT_IDENTITY_R);
DRM_DEBUG("%x\n", temp);
if (!(temp & 2))
return IRQ_NONE;
I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
atomic_inc(&dev_priv->irq_received);
wake_up_interruptible(&dev_priv->irq_queue);
return IRQ_HANDLED;
}
static int i830_emit_irq(drm_device_t * dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
atomic_inc(&dev_priv->irq_emitted);
BEGIN_LP_RING(2);
OUT_RING(0);
OUT_RING(GFX_OP_USER_INTERRUPT);
ADVANCE_LP_RING();
return atomic_read(&dev_priv->irq_emitted);
}
static int i830_wait_irq(drm_device_t * dev, int irq_nr)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
DECLARE_WAITQUEUE(entry, current);
unsigned long end = jiffies + HZ * 3;
int ret = 0;
DRM_DEBUG("%s\n", __FUNCTION__);
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
return 0;
dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
add_wait_queue(&dev_priv->irq_queue, &entry);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (atomic_read(&dev_priv->irq_received) >= irq_nr)
break;
if ((signed)(end - jiffies) <= 0) {
DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
I830_READ16(I830REG_INT_IDENTITY_R),
I830_READ16(I830REG_INT_MASK_R),
I830_READ16(I830REG_INT_ENABLE_R),
I830_READ16(I830REG_HWSTAM));
ret = -EBUSY; /* Lockup? Missed irq? */
break;
}
schedule_timeout(HZ * 3);
if (signal_pending(current)) {
ret = -EINTR;
break;
}
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev_priv->irq_queue, &entry);
return ret;
}
/* Needs the lock as it touches the ring.
*/
int i830_irq_emit(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_emit_t emit;
int result;
LOCK_TEST_WITH_RETURN(dev, filp);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return -EINVAL;
}
if (copy_from_user
(&emit, (drm_i830_irq_emit_t __user *) arg, sizeof(emit)))
return -EFAULT;
result = i830_emit_irq(dev);
if (copy_to_user(emit.irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
return 0;
}
/* Doesn't need the hardware lock.
*/
int i830_irq_wait(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_irq_wait_t irqwait;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return -EINVAL;
}
if (copy_from_user(&irqwait, (drm_i830_irq_wait_t __user *) arg,
sizeof(irqwait)))
return -EFAULT;
return i830_wait_irq(dev, irqwait.irq_seq);
}
/* drm_dma.h hooks
*/
void i830_driver_irq_preinstall(drm_device_t * dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
I830_WRITE16(I830REG_HWSTAM, 0xffff);
I830_WRITE16(I830REG_INT_MASK_R, 0x0);
I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
atomic_set(&dev_priv->irq_received, 0);
atomic_set(&dev_priv->irq_emitted, 0);
init_waitqueue_head(&dev_priv->irq_queue);
}
void i830_driver_irq_postinstall(drm_device_t * dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
}
void i830_driver_irq_uninstall(drm_device_t * dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
if (!dev_priv)
return;
I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
}

View file

@ -33,16 +33,15 @@
#include "i915_drm.h"
#include "i915_drv.h"
drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
{
return drm_agp_init_ttm(dev, NULL);
return drm_agp_init_ttm(dev);
}
int i915_fence_types(uint32_t buffer_flags, uint32_t * class, uint32_t * type)
int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
{
*class = 0;
if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
@ -64,3 +63,173 @@ int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
return i915_emit_mi_flush(dev, flush_cmd);
}
int i915_init_mem_type(drm_device_t * dev, uint32_t type,
drm_mem_type_manager_t * man)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_CACHED;
man->drm_bus_maptype = 0;
break;
case DRM_BO_MEM_TT:
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_AGP;
break;
case DRM_BO_MEM_PRIV0:
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_AGP;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
uint32_t i915_evict_mask(drm_buffer_object_t *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
case DRM_BO_MEM_TT:
return DRM_BO_FLAG_MEM_LOCAL;
default:
return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
}
}
static void i915_emit_copy_blit(drm_device_t * dev,
uint32_t src_offset,
uint32_t dst_offset,
uint32_t pages, int direction)
{
uint32_t cur_pages;
uint32_t stride = PAGE_SIZE;
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
if (!dev_priv)
return;
i915_kernel_lost_context(dev);
while (pages > 0) {
cur_pages = pages;
if (cur_pages > 2048)
cur_pages = 2048;
pages -= cur_pages;
BEGIN_LP_RING(6);
OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
(1 << 25) | (direction ? (1 << 30) : 0));
OUT_RING((cur_pages << 16) | PAGE_SIZE);
OUT_RING(dst_offset);
OUT_RING(stride & 0xffff);
OUT_RING(src_offset);
ADVANCE_LP_RING();
}
return;
}
static int i915_move_blit(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
{
drm_bo_mem_reg_t *old_mem = &bo->mem;
int dir = 0;
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->mm_node->start <
old_mem->mm_node->start + old_mem->mm_node->size)) {
dir = 1;
}
i915_emit_copy_blit(bo->dev,
old_mem->mm_node->start << PAGE_SHIFT,
new_mem->mm_node->start << PAGE_SHIFT,
new_mem->num_pages, dir);
i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
DRM_FENCE_TYPE_EXE |
DRM_I915_FENCE_TYPE_RW,
DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
}
/*
* Flip destination ttm into cached-coherent AGP,
* then blit and subsequently move out again.
*/
static int i915_move_flip(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
{
drm_device_t *dev = bo->dev;
drm_bo_mem_reg_t tmp_mem;
int ret;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
if (ret)
return ret;
ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start);
if (ret)
goto out_cleanup;
ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
if (ret)
goto out_cleanup;
ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
mutex_lock(&dev->struct_mutex);
if (tmp_mem.mm_node != bo->pinned_node)
drm_mm_put_block(tmp_mem.mm_node);
tmp_mem.mm_node = NULL;
mutex_unlock(&dev->struct_mutex);
}
return ret;
}
int i915_move(drm_buffer_object_t * bo,
int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
{
drm_bo_mem_reg_t *old_mem = &bo->mem;
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
if (i915_move_flip(bo, evict, no_wait, new_mem))
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else {
if (i915_move_blit(bo, evict, no_wait, new_mem))
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
}
return 0;
}

1
linux-core/i915_dma.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/i915_dma.c

1
linux-core/i915_drm.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/i915_drm.h

View file

@ -40,22 +40,32 @@ static struct pci_device_id pciidlist[] = {
#ifdef I915_HAVE_FENCE
static drm_fence_driver_t i915_fence_driver = {
.no_types = 2,
.wrap_diff = (1 << 30),
.flush_diff = (1 << 29),
.sequence_mask = 0xffffffffU,
.num_classes = 1,
.wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
.flush_diff = (1U << (BREADCRUMB_BITS - 2)),
.sequence_mask = BREADCRUMB_MASK,
.lazy_capable = 1,
.emit = i915_fence_emit_sequence,
.poke_flush = i915_poke_flush,
.has_irq = i915_fence_has_irq,
};
#endif
#ifdef I915_HAVE_BUFFER
static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL};
static drm_bo_driver_t i915_bo_driver = {
.iomap = {NULL, NULL},
.cached = {1, 1},
.mem_type_prio = i915_mem_prios,
.mem_busy_prio = i915_busy_prios,
.num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
.num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
.fence_type = i915_fence_types,
.invalidate_caches = i915_invalidate_caches
.invalidate_caches = i915_invalidate_caches,
.init_mem_type = i915_init_mem_type,
.evict_mask = i915_evict_mask,
.move = i915_move,
};
#endif
@ -69,6 +79,7 @@ static struct drm_driver driver = {
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
DRIVER_IRQ_VBL2,
.load = i915_driver_load,
.firstopen = i915_driver_firstopen,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.device_is_agp = i915_driver_device_is_agp,

1
linux-core/i915_drv.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/i915_drv.h

View file

@ -42,36 +42,34 @@ static void i915_perform_flush(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[0];
drm_fence_driver_t *driver = dev->driver->fence_driver;
uint32_t flush_flags = 0;
uint32_t flush_sequence = 0;
uint32_t i_status;
uint32_t diff;
uint32_t sequence;
int rwflush;
if (!dev_priv)
return;
if (fm->pending_exe_flush) {
if (fc->pending_exe_flush) {
sequence = READ_BREADCRUMB(dev_priv);
/*
* First update fences with the current breadcrumb.
*/
diff = sequence - fm->last_exe_flush;
diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;
if (diff < driver->wrap_diff && diff != 0) {
drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE);
drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE);
}
diff = sequence - fm->exe_flush_sequence;
if (diff < driver->wrap_diff) {
fm->pending_exe_flush = 0;
if (dev_priv->fence_irq_on) {
if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
i915_user_irq_off(dev_priv);
dev_priv->fence_irq_on = 0;
}
} else if (!dev_priv->fence_irq_on) {
} else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
i915_user_irq_on(dev_priv);
dev_priv->fence_irq_on = 1;
}
@ -84,17 +82,18 @@ static void i915_perform_flush(drm_device_t * dev)
flush_flags = dev_priv->flush_flags;
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
drm_fence_handler(dev, flush_sequence, flush_flags);
drm_fence_handler(dev, 0, flush_sequence, flush_flags);
}
}
if (fm->pending_flush && !dev_priv->flush_pending) {
rwflush = fc->pending_flush & DRM_I915_FENCE_TYPE_RW;
if (rwflush && !dev_priv->flush_pending) {
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
dev_priv->flush_flags = fm->pending_flush;
dev_priv->flush_flags = fc->pending_flush;
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
dev_priv->flush_pending = 1;
fm->pending_flush = 0;
fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
}
if (dev_priv->flush_pending) {
@ -104,13 +103,13 @@ static void i915_perform_flush(drm_device_t * dev)
flush_flags = dev_priv->flush_flags;
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
drm_fence_handler(dev, flush_sequence, flush_flags);
drm_fence_handler(dev, 0, flush_sequence, flush_flags);
}
}
}
void i915_poke_flush(drm_device_t * dev)
void i915_poke_flush(drm_device_t * dev, uint32_t class)
{
drm_fence_manager_t *fm = &dev->fm;
unsigned long flags;
@ -120,7 +119,7 @@ void i915_poke_flush(drm_device_t * dev)
write_unlock_irqrestore(&fm->lock, flags);
}
int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags,
int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
uint32_t * sequence, uint32_t * native_type)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -144,3 +143,15 @@ void i915_fence_handler(drm_device_t * dev)
i915_perform_flush(dev);
write_unlock(&fm->lock);
}
int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags)
{
/*
* We have an irq that tells us when we have a new breadcrumb.
*/
if (class == 0 && flags == DRM_FENCE_TYPE_EXE)
return 1;
return 0;
}

View file

@ -30,7 +30,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"

1
linux-core/i915_irq.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/i915_irq.c

1
linux-core/i915_mem.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/i915_mem.c

1
linux-core/linux Symbolic link
View file

@ -0,0 +1 @@
.

1
linux-core/mach64_dma.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mach64_dma.c

1
linux-core/mach64_drm.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mach64_drm.h

1
linux-core/mach64_drv.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mach64_drv.h

1
linux-core/mach64_irq.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mach64_irq.c

1
linux-core/mach64_state.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mach64_state.c

1
linux-core/mga_dma.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mga_dma.c

1
linux-core/mga_drm.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mga_drm.h

1
linux-core/mga_drv.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mga_drv.h

View file

@ -32,7 +32,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"

1
linux-core/mga_irq.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mga_irq.c

1
linux-core/mga_state.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mga_state.c

1
linux-core/mga_ucode.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mga_ucode.h

1
linux-core/mga_warp.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/mga_warp.c

1
linux-core/nouveau_drm.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_drm.h

View file

@ -1,8 +1,5 @@
/* i830_drv.c -- I810 driver -*- linux-c -*-
* Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
/*
* Copyright 2005 Stephane Marchesin.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -23,51 +20,39 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* Abraham vd Merwe <abraham@2d3d.co.za>
* Keith Whitwell <keith@tungstengraphics.com>
*/
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"
#include "i830_drv.h"
#include "nouveau_drv.h"
#include "drm_pciids.h"
static struct pci_device_id pciidlist[] = {
i830_PCI_IDS
nouveau_PCI_IDS
};
extern drm_ioctl_desc_t nouveau_ioctls[];
extern int nouveau_max_ioctl;
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
#if USE_IRQS
.driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
#endif
.dev_priv_size = sizeof(drm_i830_buf_priv_t),
.load = i830_driver_load,
.lastclose = i830_driver_lastclose,
.preclose = i830_driver_preclose,
.device_is_agp = i830_driver_device_is_agp,
.reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
.dma_quiescent = i830_driver_dma_quiescent,
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.load = nouveau_load,
.firstopen = nouveau_firstopen,
.lastclose = nouveau_lastclose,
.unload = nouveau_unload,
.preclose = nouveau_preclose,
.irq_preinstall = nouveau_irq_preinstall,
.irq_postinstall = nouveau_irq_postinstall,
.irq_uninstall = nouveau_irq_uninstall,
.irq_handler = nouveau_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
#if USE_IRQS
.irq_preinstall = i830_driver_irq_preinstall,
.irq_postinstall = i830_driver_irq_postinstall,
.irq_uninstall = i830_driver_irq_uninstall,
.irq_handler = i830_driver_irq_handler,
#endif
.ioctls = i830_ioctls,
.ioctls = nouveau_ioctls,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
@ -76,6 +61,9 @@ static struct drm_driver driver = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
.compat_ioctl = nouveau_compat_ioctl,
#endif
},
.pci_driver = {
.name = DRIVER_NAME,
@ -97,20 +85,19 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return drm_get_dev(pdev, ent, &driver);
}
static int __init i830_init(void)
static int __init nouveau_init(void)
{
driver.num_ioctls = i830_max_ioctl;
driver.num_ioctls = nouveau_max_ioctl;
return drm_init(&driver, pciidlist);
}
static void __exit i830_exit(void)
static void __exit nouveau_exit(void)
{
drm_exit(&driver);
}
module_init(i830_init);
module_exit(i830_exit);
module_init(nouveau_init);
module_exit(nouveau_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);

1
linux-core/nouveau_drv.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_drv.h

1
linux-core/nouveau_fifo.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_fifo.c

View file

@ -0,0 +1,72 @@
/**
* \file mga_ioc32.c
*
* 32-bit ioctl compatibility routines for the MGA DRM.
*
* \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
*
*
* Copyright (C) Paul Mackerras 2005
* Copyright (C) Egbert Eich 2003,2004
* Copyright (C) Dave Airlie 2005
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include "drmP.h"
#include "drm.h"
#include "nouveau_drm.h"
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*
* \param filp file pointer.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*/
long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
#if 0
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
#endif
lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn)(filp, cmd, arg);
else
ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
unlock_kernel();
return ret;
}

1
linux-core/nouveau_irq.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_irq.c

1
linux-core/nouveau_mem.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_mem.c

1
linux-core/nouveau_object.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_object.c

1
linux-core/nouveau_reg.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_reg.h

1
linux-core/nouveau_state.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_state.c

1
linux-core/nv04_fb.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv04_fb.c

1
linux-core/nv04_graph.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv04_graph.c

1
linux-core/nv04_mc.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv04_mc.c

Some files were not shown because too many files have changed in this diff Show more