Merge remote branch 'origin/master' into modesetting-gem

Conflicts:

	libdrm/Makefile.am
	libdrm/dri_bufmgr.h
	linux-core/drm_irq.c
	linux-core/drm_sysfs.c
	linux-core/drm_ttm.c
	shared-core/i915_dma.c
	shared-core/i915_irq.c
	shared-core/nouveau_drv.h
	shared-core/radeon_cp.c
This commit is contained in:
Dave Airlie 2008-09-26 15:37:21 +10:00
commit 972f657265
63 changed files with 8908 additions and 2680 deletions

1
.gitignore vendored
View file

@ -59,6 +59,7 @@ tests/getstats
tests/getversion
tests/lock
tests/gem_basic
tests/gem_flink
tests/gem_mmap
tests/gem_readwrite
tests/openclose

View file

@ -39,26 +39,86 @@
#define ATI_PCIE_WRITE 0x4
#define ATI_PCIE_READ 0x8
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
static void
drm_ati_alloc_pcigart_table_cb(void *arg, bus_dma_segment_t *segs,
int nsegs, int error)
{
dev->sg->dmah = drm_pci_alloc(dev, gart_info->table_size,
PAGE_SIZE,
gart_info->table_mask);
if (dev->sg->dmah == NULL)
struct drm_dma_handle *dmah = arg;
if (error != 0)
return;
KASSERT(nsegs == 1,
("drm_ati_alloc_pcigart_table_cb: bad dma segment count"));
dmah->busaddr = segs[0].ds_addr;
}
static int
drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
struct drm_dma_handle *dmah;
int flags, ret;
dmah = malloc(sizeof(struct drm_dma_handle), M_DRM, M_ZERO | M_NOWAIT);
if (dmah == NULL)
return ENOMEM;
DRM_UNLOCK();
ret = bus_dma_tag_create(NULL, PAGE_SIZE, 0, /* tag, align, boundary */
gart_info->table_mask, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
NULL, NULL, /* filtfunc, filtfuncargs */
gart_info->table_size, 1, /* maxsize, nsegs */
gart_info->table_size, /* maxsegsize */
BUS_DMA_ALLOCNOW, NULL, NULL, /* flags, lockfunc, lockfuncargs */
&dmah->tag);
if (ret != 0) {
free(dmah, M_DRM);
return ENOMEM;
}
flags = BUS_DMA_NOWAIT | BUS_DMA_ZERO;
if (gart_info->gart_reg_if == DRM_ATI_GART_IGP)
flags |= BUS_DMA_NOCACHE;
ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr, flags, &dmah->map);
if (ret != 0) {
bus_dma_tag_destroy(dmah->tag);
free(dmah, M_DRM);
return ENOMEM;
}
DRM_LOCK();
ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr,
gart_info->table_size, drm_ati_alloc_pcigart_table_cb, dmah, 0);
if (ret != 0) {
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
bus_dma_tag_destroy(dmah->tag);
free(dmah, M_DRM);
return ENOMEM;
}
dev->sg->dmah = dmah;
return 0;
}
static void drm_ati_free_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
static void
drm_ati_free_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
drm_pci_free(dev, dev->sg->dmah);
struct drm_dma_handle *dmah = dev->sg->dmah;
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
bus_dma_tag_destroy(dmah->tag);
free(dmah, M_DRM);
dev->sg->dmah = NULL;
}
int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
int
drm_ati_pcigart_cleanup(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
/* we need to support large memory configurations */
if (dev->sg == NULL) {
@ -77,17 +137,17 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
return 1;
}
int drm_ati_pcigart_init(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
int
drm_ati_pcigart_init(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
void *address = NULL;
unsigned long pages;
u32 *pci_gart, page_base;
dma_addr_t bus_address = 0;
dma_addr_t entry_addr;
int i, j, ret = 0;
int max_pages;
dma_addr_t entry_addr;
/* we need to support large memory configurations */
if (dev->sg == NULL) {
@ -129,12 +189,14 @@ int drm_ati_pcigart_init(struct drm_device *dev,
page_base = (u32) entry_addr & ATI_PCIGART_PAGE_MASK;
switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP:
page_base |= (upper_32_bits(entry_addr) & 0xff) << 4;
page_base |=
(upper_32_bits(entry_addr) & 0xff) << 4;
page_base |= 0xc;
break;
case DRM_ATI_GART_PCIE:
page_base >>= 8;
page_base |= (upper_32_bits(entry_addr) & 0xff) << 24;
page_base |=
(upper_32_bits(entry_addr) & 0xff) << 24;
page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE;
break;
default:
@ -147,8 +209,6 @@ int drm_ati_pcigart_init(struct drm_device *dev,
}
}
DRM_MEMORYBARRIER();
ret = 1;
done:

View file

@ -37,7 +37,7 @@
#if defined(_KERNEL) || defined(__KERNEL__)
struct drm_device;
typedef struct drm_file drm_file_t;
struct drm_file;
#include <sys/param.h>
#include <sys/queue.h>
@ -70,10 +70,10 @@ typedef struct drm_file drm_file_t;
#include <machine/pmap.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/specialreg.h>
#include <machine/sysarch.h>
#include <sys/endian.h>
#include <sys/mman.h>
#if defined(__FreeBSD__)
#include <sys/rman.h>
#include <sys/memrange.h>
#if __FreeBSD_version >= 800004
@ -82,31 +82,9 @@ typedef struct drm_file drm_file_t;
#include <pci/agpvar.h>
#endif /* __FreeBSD_version >= 800004 */
#include <sys/agpio.h>
#if __FreeBSD_version >= 500000
#include <sys/mutex.h>
#include <dev/pci/pcivar.h>
#include <sys/selinfo.h>
#else /* __FreeBSD_version >= 500000 */
#include <pci/pcivar.h>
#include <sys/select.h>
#endif /* __FreeBSD_version < 500000 */
#elif defined(__NetBSD__)
#include <machine/mtrr.h>
#include <sys/vnode.h>
#include <sys/select.h>
#include <sys/device.h>
#include <sys/resourcevar.h>
#include <sys/lkm.h>
#include <sys/agpio.h>
#include <sys/ttycom.h>
#include <uvm/uvm.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/agpvar.h>
#elif defined(__OpenBSD__)
#include <sys/lkm.h>
#include <uvm/uvm.h>
#endif
#include <sys/bus.h>
#include "drm.h"
@ -114,13 +92,11 @@ typedef struct drm_file drm_file_t;
#include "drm_atomic.h"
#include "drm_internal.h"
#ifdef __FreeBSD__
#include <opt_drm.h>
#ifdef DRM_DEBUG
#undef DRM_DEBUG
#define DRM_DEBUG_DEFAULT_ON 1
#endif /* DRM_DEBUG */
#endif
#if defined(DRM_LINUX) && DRM_LINUX && !defined(__amd64__)
#include <sys/file.h>
@ -135,6 +111,17 @@ typedef struct drm_file drm_file_t;
#define DRM_LINUX 0
#endif
/* driver capabilities and requirements mask */
#define DRIVER_USE_AGP 0x1
#define DRIVER_REQUIRE_AGP 0x2
#define DRIVER_USE_MTRR 0x4
#define DRIVER_PCI_DMA 0x8
#define DRIVER_SG 0x10
#define DRIVER_HAVE_DMA 0x20
#define DRIVER_HAVE_IRQ 0x40
#define DRIVER_DMA_QUEUE 0x100
#define DRM_HASH_SIZE 16 /* Size of key hash table */
#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
@ -183,12 +170,6 @@ MALLOC_DECLARE(M_DRM);
#define DRM_WAKEUP_INT(w) wakeup(w)
#define DRM_INIT_WAITQUEUE(queue) do {(void)(queue);} while (0)
#if defined(__FreeBSD__) && __FreeBSD_version < 502109
#define bus_alloc_resource_any(dev, type, rid, flags) \
bus_alloc_resource(dev, type, rid, 0ul, ~0ul, 1, flags)
#endif
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
#define DRM_CURPROC curthread
#define DRM_STRUCTPROC struct thread
#define DRM_SPINTYPE struct mtx
@ -206,21 +187,6 @@ MALLOC_DECLARE(M_DRM);
#define DRM_LOCK() mtx_lock(&dev->dev_lock)
#define DRM_UNLOCK() mtx_unlock(&dev->dev_lock)
#define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS)
#else /* __FreeBSD__ && __FreeBSD_version >= 500000 */
#define DRM_CURPROC curproc
#define DRM_STRUCTPROC struct proc
#define DRM_SPINTYPE struct simplelock
#define DRM_SPININIT(l,name)
#define DRM_SPINUNINIT(l)
#define DRM_SPINLOCK(l)
#define DRM_SPINUNLOCK(u)
#define DRM_SPINLOCK_ASSERT(l)
#define DRM_CURRENTPID curproc->p_pid
#define DRM_LOCK()
#define DRM_UNLOCK()
#define DRM_SYSCTL_HANDLER_ARGS SYSCTL_HANDLER_ARGS
#define spldrm() spltty()
#endif /* __NetBSD__ || __OpenBSD__ */
#define DRM_IRQ_ARGS void *arg
typedef void irqreturn_t;
@ -234,16 +200,8 @@ enum {
};
#define DRM_AGP_MEM struct agp_memory_info
#if defined(__FreeBSD__)
#define drm_get_device_from_kdev(_kdev) (_kdev->si_drv1)
#elif defined(__NetBSD__)
#define drm_get_device_from_kdev(_kdev) device_lookup(&drm_cd, minor(_kdev))
#elif defined(__OpenBSD__)
#define drm_get_device_from_kdev(_kdev) device_lookup(&drm_cd, \
minor(_kdev)))->dv_cfdata->cf_driver->cd_devs[minor(_kdev)]
#endif
#if defined(__FreeBSD__)
#define PAGE_ALIGN(addr) round_page(addr)
/* DRM_SUSER returns true if the user is superuser */
#if __FreeBSD_version >= 700000
@ -255,20 +213,6 @@ enum {
#define DRM_MTRR_WC MDF_WRITECOMBINE
#define jiffies ticks
#else /* __FreeBSD__ */
#define CDEV_MAJOR 34
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
/* DRM_SUSER returns true if the user is superuser */
#define DRM_SUSER(p) (suser(p->p_ucred, &p->p_acflag) == 0)
#define DRM_AGP_FIND_DEVICE() agp_find_device(0)
#define DRM_MTRR_WC MTRR_TYPE_WC
#define jiffies hardclock_ticks
typedef struct drm_device *device_t;
extern struct cfdriver drm_cd;
#endif /* !__FreeBSD__ */
/* Capabilities taken from src/sys/dev/pci/pcireg.h. */
#ifndef PCIY_AGP
#define PCIY_AGP 0x02
@ -306,7 +250,6 @@ typedef u_int8_t u8;
"lock; addl $0,0(%%rsp)" : : : "memory");
#endif
#ifdef __FreeBSD__
#define DRM_READ8(map, offset) \
*(volatile u_int8_t *) (((unsigned long)(map)->handle) + (offset))
#define DRM_READ16(map, offset) \
@ -323,27 +266,6 @@ typedef u_int8_t u8;
#define DRM_VERIFYAREA_READ( uaddr, size ) \
(!useracc(__DECONST(caddr_t, uaddr), size, VM_PROT_READ))
#else /* __FreeBSD__ */
typedef vaddr_t vm_offset_t;
#define DRM_READ8(map, offset) \
bus_space_read_1( (map)->bst, (map)->bsh, (offset))
#define DRM_READ16(map, offset) \
bus_space_read_2( (map)->bst, (map)->bsh, (offset))
#define DRM_READ32(map, offset) \
bus_space_read_4( (map)->bst, (map)->bsh, (offset))
#define DRM_WRITE8(map, offset, val) \
bus_space_write_1((map)->bst, (map)->bsh, (offset), (val))
#define DRM_WRITE16(map, offset, val) \
bus_space_write_2((map)->bst, (map)->bsh, (offset), (val))
#define DRM_WRITE32(map, offset, val) \
bus_space_write_4((map)->bst, (map)->bsh, (offset), (val))
#define DRM_VERIFYAREA_READ( uaddr, size ) \
(!uvm_useracc((caddr_t)uaddr, size, VM_PROT_READ))
#endif /* !__FreeBSD__ */
#define DRM_COPY_TO_USER(user, kern, size) \
copyout(kern, user, size)
#define DRM_COPY_FROM_USER(kern, user, size) \
@ -352,13 +274,8 @@ typedef vaddr_t vm_offset_t;
copyin(arg2, arg1, arg3)
#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
copyout(arg2, arg1, arg3)
#if __FreeBSD_version > 500000
#define DRM_GET_USER_UNCHECKED(val, uaddr) \
((val) = fuword32(uaddr), 0)
#else
#define DRM_GET_USER_UNCHECKED(val, uaddr) \
((val) = fuword(uaddr), 0)
#endif
#define cpu_to_le32(x) htole32(x)
#define le32_to_cpu(x) le32toh(x)
@ -381,7 +298,6 @@ do { \
} \
} while (0)
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
/* Returns -errno to shared code */
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
for ( ret = 0 ; !ret && !(condition) ; ) { \
@ -393,17 +309,6 @@ for ( ret = 0 ; !ret && !(condition) ; ) { \
mtx_unlock(&dev->irq_lock); \
DRM_LOCK(); \
}
#else
/* Returns -errno to shared code */
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
for ( ret = 0 ; !ret && !(condition) ; ) { \
int s = spldrm(); \
if (!(condition)) \
ret = -tsleep( &(queue), PZERO | PCATCH, \
"drmwtq", (timeout) ); \
splx(s); \
}
#endif
#define DRM_ERROR(fmt, arg...) \
printf("error: [" DRM_NAME ":pid%d:%s] *ERROR* " fmt, \
@ -489,12 +394,8 @@ typedef struct drm_freelist {
typedef struct drm_dma_handle {
void *vaddr;
bus_addr_t busaddr;
#if defined(__FreeBSD__)
bus_dma_tag_t tag;
bus_dmamap_t map;
#elif defined(__NetBSD__)
bus_dma_segment_t seg;
#endif
} drm_dma_handle_t;
typedef struct drm_buf_entry {
@ -511,19 +412,19 @@ typedef struct drm_buf_entry {
typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
struct drm_file {
TAILQ_ENTRY(drm_file) link;
struct drm_device *dev;
int authenticated;
int master;
int minor;
pid_t pid;
uid_t uid;
int refs;
drm_magic_t magic;
unsigned long ioctl_count;
void *driver_priv;
};
typedef struct drm_lock_data {
drm_hw_lock_t *hw_lock; /* Hardware lock */
struct drm_hw_lock *hw_lock; /* Hardware lock */
struct drm_file *file_priv; /* Unique identifier of holding process (NULL is kernel)*/
int lock_queue; /* Queue of blocked processes */
unsigned long lock_time; /* Time of last lock in jiffies */
@ -586,8 +487,8 @@ typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
typedef struct drm_local_map {
unsigned long offset; /* Physical address (0 for SAREA)*/
unsigned long size; /* Physical size (bytes) */
drm_map_type_t type; /* Type of memory mapped */
drm_map_flags_t flags; /* Flags */
enum drm_map_type type; /* Type of memory mapped */
enum drm_map_flags flags; /* Flags */
void *handle; /* User-space: "Handle" to pass to mmap */
/* Kernel-space: kernel-virtual address */
int mtrr; /* Boolean: MTRR used */
@ -608,6 +509,19 @@ typedef struct drm_vbl_sig {
int pid;
} drm_vbl_sig_t;
struct drm_vblank_info {
wait_queue_head_t queue; /* vblank wait queue */
atomic_t count; /* number of VBLANK interrupts */
/* (driver must alloc the right number of counters) */
struct drm_vbl_sig_list sigs; /* signal list to send on VBLANK */
atomic_t refcount; /* number of users of vblank interrupts */
u32 last; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
int enabled; /* so we don't call enable more than */
/* once per disable */
int inmodeset; /* Display driver is setting mode */
};
/* location of GART table */
#define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2
@ -637,9 +551,9 @@ struct drm_ati_pcigart_info {
struct drm_driver_info {
int (*load)(struct drm_device *, unsigned long flags);
int (*firstopen)(struct drm_device *);
int (*open)(struct drm_device *, drm_file_t *);
int (*open)(struct drm_device *, struct drm_file *);
void (*preclose)(struct drm_device *, struct drm_file *file_priv);
void (*postclose)(struct drm_device *, drm_file_t *);
void (*postclose)(struct drm_device *, struct drm_file *);
void (*lastclose)(struct drm_device *);
int (*unload)(struct drm_device *);
void (*reclaim_buffers_locked)(struct drm_device *,
@ -649,9 +563,9 @@ struct drm_driver_info {
void (*dma_ready)(struct drm_device *);
int (*dma_quiescent)(struct drm_device *);
int (*dma_flush_block_and_flush)(struct drm_device *, int context,
drm_lock_flags_t flags);
enum drm_lock_flags flags);
int (*dma_flush_unblock)(struct drm_device *, int context,
drm_lock_flags_t flags);
enum drm_lock_flags flags);
int (*context_ctor)(struct drm_device *dev, int context);
int (*context_dtor)(struct drm_device *dev, int context);
int (*kernel_context_switch)(struct drm_device *dev, int old,
@ -692,16 +606,7 @@ struct drm_driver_info {
const char *desc; /* Longer driver name */
const char *date; /* Date of last major changes. */
unsigned use_agp :1;
unsigned require_agp :1;
unsigned use_sg :1;
unsigned use_dma :1;
unsigned use_pci_dma :1;
unsigned use_dma_queue :1;
unsigned use_irq :1;
unsigned use_vbl_irq :1;
unsigned use_vbl_irq2 :1;
unsigned use_mtrr :1;
u32 driver_features;
};
/* Length for the array of resource pointers for drm_get_resource_*. */
@ -711,11 +616,7 @@ struct drm_driver_info {
* DRM device functions structure
*/
struct drm_device {
#if defined(__NetBSD__) || defined(__OpenBSD__)
struct device device; /* softc is an extension of struct device */
#endif
struct drm_driver_info driver;
struct drm_driver_info *driver;
drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */
u_int16_t pci_device; /* PCI device id */
@ -723,21 +624,17 @@ struct drm_device {
char *unique; /* Unique identifier: e.g., busid */
int unique_len; /* Length of unique field */
#ifdef __FreeBSD__
device_t device; /* Device instance from newbus */
#endif
struct cdev *devnode; /* Device number for mknod */
int if_version; /* Highest interface version set */
int flags; /* Flags to open(2) */
/* Locks */
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
struct mtx vbl_lock; /* protects vblank operations */
struct mtx dma_lock; /* protects dev->dma */
struct mtx irq_lock; /* protects irq condition checks */
struct mtx dev_lock; /* protects everything else */
#endif
DRM_SPINTYPE drw_lock;
DRM_SPINTYPE tsk_lock;
@ -747,7 +644,7 @@ struct drm_device {
/* Performance counters */
unsigned long counters;
drm_stat_type_t types[15];
enum drm_stat_type types[15];
atomic_t counts[15];
/* Authentication */
@ -768,12 +665,8 @@ struct drm_device {
/* Context support */
int irq; /* Interrupt used by board */
int irq_enabled; /* True if the irq handler is enabled */
#ifdef __FreeBSD__
int irqrid; /* Interrupt used by board */
struct resource *irqr; /* Resource for interrupt used by board */
#elif defined(__NetBSD__) || defined(__OpenBSD__)
struct pci_attach_args pa;
#endif
void *irqh; /* Handle from bus_setup_intr */
/* Storage of resource pointers for drm_get_resource_* */
@ -787,27 +680,15 @@ struct drm_device {
atomic_t context_flag; /* Context swapping flag */
int last_context; /* Last current context */
int vblank_disable_allowed;
wait_queue_head_t *vbl_queue; /* vblank wait queue */
atomic_t *_vblank_count; /* number of VBLANK interrupts */
/* (driver must alloc the right number of counters) */
struct drm_vbl_sig_list *vbl_sigs; /* signal list to send on VBLANK */
atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */
u32 *last_vblank; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
int *vblank_enabled; /* so we don't call enable more than */
/* once per disable */
int *vblank_inmodeset; /* Display driver is setting mode */
atomic_t vbl_signal_pending; /* number of signals pending on all crtcs */
struct callout vblank_disable_timer;
u32 max_vblank_count; /* size of vblank counter register */
struct drm_vblank_info *vblank; /* per crtc vblank info */
int num_crtcs;
#ifdef __FreeBSD__
struct sigio *buf_sigio; /* Processes waiting for SIGIO */
#elif defined(__NetBSD__)
pid_t buf_pgid;
#endif
/* Sysctl support */
struct drm_sysctl_info *sysctl;
@ -827,45 +708,39 @@ struct drm_device {
void (*locked_task_call)(struct drm_device *dev);
};
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
{
return ((dev->driver->driver_features & feature) ? 1 : 0);
}
#if __OS_HAS_AGP
static inline int drm_core_has_AGP(struct drm_device *dev)
{
return drm_core_check_feature(dev, DRIVER_USE_AGP);
}
#else
#define drm_core_has_AGP(dev) (0)
#endif
extern int drm_debug_flag;
/* Device setup support (drm_drv.c) */
#ifdef __FreeBSD__
int drm_probe(device_t nbdev, drm_pci_id_list_t *idlist);
int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist);
void drm_close(void *data);
int drm_detach(device_t nbdev);
d_ioctl_t drm_ioctl;
d_open_t drm_open;
d_close_t drm_close;
d_read_t drm_read;
d_poll_t drm_poll;
d_mmap_t drm_mmap;
#elif defined(__NetBSD__) || defined(__OpenBSD__)
int drm_probe(struct pci_attach_args *pa, drm_pci_id_list_t *idlist);
int drm_attach(struct pci_attach_args *pa, dev_t kdev, drm_pci_id_list_t *idlist);
dev_type_ioctl(drm_ioctl);
dev_type_open(drm_open);
dev_type_close(drm_close);
dev_type_read(drm_read);
dev_type_poll(drm_poll);
dev_type_mmap(drm_mmap);
#endif
extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
/* File operations helpers (drm_fops.c) */
#ifdef __FreeBSD__
extern int drm_open_helper(struct cdev *kdev, int flags, int fmt,
DRM_STRUCTPROC *p,
struct drm_device *dev);
extern drm_file_t *drm_find_file_by_proc(struct drm_device *dev,
DRM_STRUCTPROC *p);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
extern int drm_open_helper(dev_t kdev, int flags, int fmt,
DRM_STRUCTPROC *p,
struct drm_device *dev);
extern drm_file_t *drm_find_file_by_proc(struct drm_device *dev,
DRM_STRUCTPROC *p);
#endif /* __NetBSD__ || __OpenBSD__ */
/* Memory management support (drm_memory.c) */
void drm_mem_init(void);
@ -875,6 +750,7 @@ void *drm_calloc(size_t nmemb, size_t size, int area);
void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
int area);
void drm_free(void *pt, size_t size, int area);
void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map);
void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map);
void drm_ioremapfree(drm_local_map_t *map);
int drm_mtrr_add(unsigned long offset, size_t size, int flags);
@ -889,13 +765,11 @@ void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
int drm_ctxbitmap_next(struct drm_device *dev);
/* Locking IOCTL support (drm_lock.c) */
int drm_lock_take(__volatile__ unsigned int *lock,
unsigned int context);
int drm_lock_transfer(struct drm_device *dev,
__volatile__ unsigned int *lock,
int drm_lock_take(struct drm_lock_data *lock_data,
unsigned int context);
int drm_lock_transfer(struct drm_lock_data *lock_data,
unsigned int context);
int drm_lock_free(struct drm_device *dev,
__volatile__ unsigned int *lock,
int drm_lock_free(struct drm_lock_data *lock_data,
unsigned int context);
/* Buffer management support (drm_bufs.c) */
@ -907,11 +781,11 @@ void drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
int drm_order(unsigned long size);
int drm_addmap(struct drm_device *dev, unsigned long offset,
unsigned long size,
drm_map_type_t type, drm_map_flags_t flags,
enum drm_map_type type, enum drm_map_flags flags,
drm_local_map_t **map_ptr);
int drm_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request);
int drm_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request);
int drm_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request);
int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request);
int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request);
int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request);
/* DMA support (drm_dma.c) */
int drm_dma_setup(struct drm_device *dev);
@ -943,26 +817,24 @@ int drm_device_is_pcie(struct drm_device *dev);
drm_agp_head_t *drm_agp_init(void);
int drm_agp_acquire(struct drm_device *dev);
int drm_agp_release(struct drm_device *dev);
int drm_agp_info(struct drm_device * dev, drm_agp_info_t *info);
int drm_agp_enable(struct drm_device *dev, drm_agp_mode_t mode);
int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info);
int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
void *drm_agp_allocate_memory(size_t pages, u32 type);
int drm_agp_free_memory(void *handle);
int drm_agp_bind_memory(void *handle, off_t start);
int drm_agp_unbind_memory(void *handle);
int drm_agp_alloc(struct drm_device *dev, drm_agp_buffer_t *request);
int drm_agp_free(struct drm_device *dev, drm_agp_buffer_t *request);
int drm_agp_bind(struct drm_device *dev, drm_agp_binding_t *request);
int drm_agp_unbind(struct drm_device *dev, drm_agp_binding_t *request);
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
/* Scatter Gather Support (drm_scatter.c) */
void drm_sg_cleanup(drm_sg_mem_t *entry);
int drm_sg_alloc(struct drm_device *dev, drm_scatter_gather_t * request);
int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
#ifdef __FreeBSD__
/* sysctl support (drm_sysctl.h) */
extern int drm_sysctl_init(struct drm_device *dev);
extern int drm_sysctl_cleanup(struct drm_device *dev);
#endif /* __FreeBSD__ */
/* ATI PCIGART support (ati_pcigart.c) */
int drm_ati_pcigart_init(struct drm_device *dev,
@ -1000,7 +872,7 @@ int drm_noop(struct drm_device *dev, void *data,
int drm_resctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_addctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
struct drm_file *file_priv);
int drm_modctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_getctx(struct drm_device *dev, void *data,
@ -1026,6 +898,9 @@ int drm_update_draw(struct drm_device *dev, void *data,
struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
int handle);
/* Drawable support (drm_drawable.c) */
void drm_drawable_free_all(struct drm_device *dev);
/* Authentication IOCTL support (drm_auth.c) */
int drm_getmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@ -1052,7 +927,8 @@ int drm_mapbufs(struct drm_device *dev, void *data,
int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv);
/* IRQ support (drm_irq.c) */
int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv);
int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_locked_tasklet(struct drm_device *dev,
@ -1087,10 +963,13 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr);
void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
#define drm_core_ioremap_wc drm_core_ioremap
/* Inline replacements for DRM_IOREMAP macros */
static __inline__ void
drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
{
map->handle = drm_ioremap_wc(dev, map);
}
static __inline__ void
drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
map->handle = drm_ioremap(dev, map);

View file

@ -35,20 +35,17 @@
#include "drmP.h"
#ifdef __FreeBSD__
#if __FreeBSD_version >= 800004
#include <dev/agp/agpreg.h>
#else /* __FreeBSD_version >= 800004 */
#include <pci/agpreg.h>
#endif /* __FreeBSD_version >= 800004 */
#include <dev/pci/pcireg.h>
#endif
/* Returns 1 if AGP or 0 if not. */
static int
drm_device_find_capability(struct drm_device *dev, int cap)
{
#ifdef __FreeBSD__
#if __FreeBSD_version >= 602102
return (pci_find_extcap(dev->device, cap, NULL) == 0);
@ -82,21 +79,17 @@ drm_device_find_capability(struct drm_device *dev, int cap)
return 0;
#endif
#else
/* XXX: fill me in for non-FreeBSD */
return 1;
#endif
}
int drm_device_is_agp(struct drm_device *dev)
{
if (dev->driver.device_is_agp != NULL) {
if (dev->driver->device_is_agp != NULL) {
int ret;
/* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely
* AGP, 2 = fall back to PCI capability
*/
ret = (*dev->driver.device_is_agp)(dev);
ret = (*dev->driver->device_is_agp)(dev);
if (ret != DRM_MIGHT_BE_AGP)
return ret;
}
@ -109,7 +102,7 @@ int drm_device_is_pcie(struct drm_device *dev)
return (drm_device_find_capability(dev, PCIY_EXPRESS));
}
int drm_agp_info(struct drm_device * dev, drm_agp_info_t *info)
int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info)
{
struct agp_info *kern;
@ -135,13 +128,13 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int err;
drm_agp_info_t info;
struct drm_agp_info info;
err = drm_agp_info(dev, &info);
if (err != 0)
return err;
*(drm_agp_info_t *) data = info;
*(struct drm_agp_info *) data = info;
return 0;
}
@ -183,7 +176,7 @@ int drm_agp_release(struct drm_device * dev)
return 0;
}
int drm_agp_enable(struct drm_device *dev, drm_agp_mode_t mode)
int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
{
if (!dev->agp || !dev->agp->acquired)
@ -198,14 +191,14 @@ int drm_agp_enable(struct drm_device *dev, drm_agp_mode_t mode)
int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_mode_t mode;
struct drm_agp_mode mode;
mode = *(drm_agp_mode_t *) data;
mode = *(struct drm_agp_mode *) data;
return drm_agp_enable(dev, mode);
}
int drm_agp_alloc(struct drm_device *dev, drm_agp_buffer_t *request)
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
drm_agp_mem_t *entry;
void *handle;
@ -251,16 +244,16 @@ int drm_agp_alloc(struct drm_device *dev, drm_agp_buffer_t *request)
int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_buffer_t request;
struct drm_agp_buffer request;
int retcode;
request = *(drm_agp_buffer_t *) data;
request = *(struct drm_agp_buffer *) data;
DRM_LOCK();
retcode = drm_agp_alloc(dev, &request);
DRM_UNLOCK();
*(drm_agp_buffer_t *) data = request;
*(struct drm_agp_buffer *) data = request;
return retcode;
}
@ -276,7 +269,7 @@ static drm_agp_mem_t * drm_agp_lookup_entry(struct drm_device *dev,
return NULL;
}
int drm_agp_unbind(struct drm_device *dev, drm_agp_binding_t *request)
int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
{
drm_agp_mem_t *entry;
int retcode;
@ -301,10 +294,10 @@ int drm_agp_unbind(struct drm_device *dev, drm_agp_binding_t *request)
int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_binding_t request;
struct drm_agp_binding request;
int retcode;
request = *(drm_agp_binding_t *) data;
request = *(struct drm_agp_binding *) data;
DRM_LOCK();
retcode = drm_agp_unbind(dev, &request);
@ -313,7 +306,7 @@ int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
return retcode;
}
int drm_agp_bind(struct drm_device *dev, drm_agp_binding_t *request)
int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
{
drm_agp_mem_t *entry;
int retcode;
@ -342,10 +335,10 @@ int drm_agp_bind(struct drm_device *dev, drm_agp_binding_t *request)
int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_binding_t request;
struct drm_agp_binding request;
int retcode;
request = *(drm_agp_binding_t *) data;
request = *(struct drm_agp_binding *) data;
DRM_LOCK();
retcode = drm_agp_bind(dev, &request);
@ -354,7 +347,7 @@ int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
return retcode;
}
int drm_agp_free(struct drm_device *dev, drm_agp_buffer_t *request)
int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
{
drm_agp_mem_t *entry;
@ -387,10 +380,10 @@ int drm_agp_free(struct drm_device *dev, drm_agp_buffer_t *request)
int drm_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_buffer_t request;
struct drm_agp_buffer request;
int retcode;
request = *(drm_agp_buffer_t *) data;
request = *(struct drm_agp_buffer *) data;
DRM_LOCK();
retcode = drm_agp_free(dev, &request);

View file

@ -33,65 +33,12 @@
typedef u_int32_t atomic_t;
#ifdef __FreeBSD__
#define atomic_set(p, v) (*(p) = (v))
#define atomic_read(p) (*(p))
#define atomic_inc(p) atomic_add_int(p, 1)
#define atomic_dec(p) atomic_subtract_int(p, 1)
#define atomic_add(n, p) atomic_add_int(p, n)
#define atomic_sub(n, p) atomic_subtract_int(p, n)
#else /* __FreeBSD__ */
/* FIXME */
#define atomic_set(p, v) (*(p) = (v))
#define atomic_read(p) (*(p))
#define atomic_inc(p) (*(p) += 1)
#define atomic_dec(p) (*(p) -= 1)
#define atomic_add(n, p) (*(p) += (n))
#define atomic_sub(n, p) (*(p) -= (n))
/* FIXME */
#define atomic_add_int(p, v) *(p) += v
#define atomic_subtract_int(p, v) *(p) -= v
#define atomic_set_int(p, bits) *(p) |= (bits)
#define atomic_clear_int(p, bits) *(p) &= ~(bits)
#endif /* !__FreeBSD__ */
#if !defined(__FreeBSD_version) || (__FreeBSD_version < 500000)
#if defined(__i386__)
/* The extra atomic functions from 5.0 haven't been merged to 4.x */
static __inline int
atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
{
int res = exp;
__asm __volatile (
" lock ; "
" cmpxchgl %1,%2 ; "
" setz %%al ; "
" movzbl %%al,%0 ; "
"1: "
"# atomic_cmpset_int"
: "+a" (res) /* 0 (result) */
: "r" (src), /* 1 */
"m" (*(dst)) /* 2 */
: "memory");
return (res);
}
#else /* __i386__ */
static __inline int
atomic_cmpset_int(__volatile__ int *dst, int old, int new)
{
int s = splhigh();
if (*dst==old) {
*dst = new;
splx(s);
return 1;
}
splx(s);
return 0;
}
#endif /* !__i386__ */
#endif /* !__FreeBSD_version || __FreeBSD_version < 500000 */
static __inline atomic_t
test_and_set_bit(int b, volatile void *p)

View file

@ -43,7 +43,7 @@ static int drm_hash_magic(drm_magic_t magic)
/**
* Returns the file private associated with the given magic number.
*/
static drm_file_t *drm_find_file(struct drm_device *dev, drm_magic_t magic)
static struct drm_file *drm_find_file(struct drm_device *dev, drm_magic_t magic)
{
drm_magic_entry_t *pt;
int hash = drm_hash_magic(magic);
@ -63,7 +63,7 @@ static drm_file_t *drm_find_file(struct drm_device *dev, drm_magic_t magic)
* Inserts the given magic number into the hash table of used magic number
* lists.
*/
static int drm_add_magic(struct drm_device *dev, drm_file_t *priv,
static int drm_add_magic(struct drm_device *dev, struct drm_file *priv,
drm_magic_t magic)
{
int hash;
@ -75,7 +75,8 @@ static int drm_add_magic(struct drm_device *dev, drm_file_t *priv,
hash = drm_hash_magic(magic);
entry = malloc(sizeof(*entry), M_DRM, M_ZERO | M_NOWAIT);
if (!entry) return ENOMEM;
if (!entry)
return ENOMEM;
entry->magic = magic;
entry->priv = priv;
entry->next = NULL;
@ -117,11 +118,11 @@ static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic)
if (prev) {
prev->next = pt->next;
}
free(pt, M_DRM);
return 0;
}
}
free(pt, M_DRM);
return EINVAL;
}
@ -136,9 +137,9 @@ static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic)
int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
static drm_magic_t sequence = 0;
drm_auth_t *auth = data;
struct drm_auth *auth = data;
/* Find unique magic */
/* Find unique magic */
if (file_priv->magic) {
auth->magic = file_priv->magic;
} else {
@ -167,8 +168,8 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_auth_t *auth = data;
drm_file_t *priv;
struct drm_auth *auth = data;
struct drm_file *priv;
DRM_DEBUG("%u\n", auth->magic);

View file

@ -36,22 +36,6 @@
#include "drmP.h"
/*
* Compute order. Can be made faster.
*/
int drm_order(unsigned long size)
{
int order;
unsigned long tmp;
for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
if ( size & ~(1 << order) )
++order;
return order;
}
/* Allocation of PCI memory resources (framebuffer, registers, etc.) for
* drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
* address for accessing them. Cleaned up at unload.
@ -102,7 +86,7 @@ unsigned long drm_get_resource_len(struct drm_device *dev,
int drm_addmap(struct drm_device * dev, unsigned long offset,
unsigned long size,
drm_map_type_t type, drm_map_flags_t flags, drm_local_map_t **map_ptr)
enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
{
drm_local_map_t *map;
int align;
@ -152,7 +136,7 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
* initialization necessary.
*/
map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
if ( !map ) {
if (!map) {
DRM_LOCK();
return ENOMEM;
}
@ -162,7 +146,7 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
map->type = type;
map->flags = flags;
switch ( map->type ) {
switch (map->type) {
case _DRM_REGISTERS:
map->handle = drm_ioremap(dev, map);
if (!(map->flags & _DRM_WRITE_COMBINING))
@ -174,15 +158,15 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
break;
case _DRM_SHM:
map->handle = malloc(map->size, M_DRM, M_NOWAIT);
DRM_DEBUG( "%lu %d %p\n",
map->size, drm_order(map->size), map->handle );
if ( !map->handle ) {
DRM_DEBUG("%lu %d %p\n",
map->size, drm_order(map->size), map->handle);
if (!map->handle) {
free(map, M_DRM);
DRM_LOCK();
return ENOMEM;
}
map->offset = (unsigned long)map->handle;
if ( map->flags & _DRM_CONTAINS_LOCK ) {
if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */
DRM_LOCK();
if (dev->lock.hw_lock != NULL) {
@ -274,7 +258,7 @@ done:
int drm_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_map_t *request = data;
struct drm_map *request = data;
drm_local_map_t *map;
int err;
@ -355,7 +339,7 @@ int drm_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_local_map_t *map;
drm_map_t *request = data;
struct drm_map *request = data;
DRM_LOCK();
TAILQ_FOREACH(map, &dev->maplist, link) {
@ -402,7 +386,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev,
}
}
static int drm_do_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_entry_t *entry;
@ -426,20 +410,20 @@ static int drm_do_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
? round_page(size) : size;
? round_page(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = dev->agp->base + request->agp_start;
DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order );
DRM_DEBUG( "size: %d\n", size );
DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
DRM_DEBUG( "alignment: %d\n", alignment );
DRM_DEBUG( "page_order: %d\n", page_order );
DRM_DEBUG( "total: %d\n", total );
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
/* Make sure buffers are located in AGP memory that we own */
/* Breaks MGA due to drm_alloc_agp not setting up entries for the
@ -465,7 +449,7 @@ static int drm_do_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
M_NOWAIT | M_ZERO);
if ( !entry->buflist ) {
if (!entry->buflist) {
return ENOMEM;
}
@ -474,7 +458,7 @@ static int drm_do_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
offset = 0;
while ( entry->buf_count < count ) {
while (entry->buf_count < count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
@ -488,7 +472,7 @@ static int drm_do_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
buf->pending = 0;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver.buf_priv_size;
buf->dev_priv_size = dev->driver->buf_priv_size;
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
M_NOWAIT | M_ZERO);
if (buf->dev_private == NULL) {
@ -503,7 +487,7 @@ static int drm_do_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
byte_count += PAGE_SIZE << page_order;
}
DRM_DEBUG( "byte_count: %d\n", byte_count );
DRM_DEBUG("byte_count: %d\n", byte_count);
temp_buflist = realloc(dma->buflist,
(dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
@ -515,15 +499,15 @@ static int drm_do_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
}
dma->buflist = temp_buflist;
for ( i = 0 ; i < entry->buf_count ; i++ ) {
for (i = 0; i < entry->buf_count; i++) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
request->count = entry->buf_count;
request->size = size;
@ -533,7 +517,7 @@ static int drm_do_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
return 0;
}
static int drm_do_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
{
drm_device_dma_t *dma = dev->dma;
int count;
@ -555,11 +539,11 @@ static int drm_do_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
order = drm_order(request->size);
size = 1 << order;
DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
request->count, request->size, size, order );
DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
request->count, request->size, size, order);
alignment = (request->flags & _DRM_PAGE_ALIGN)
? round_page(size) : size;
? round_page(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
@ -578,25 +562,28 @@ static int drm_do_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
if (entry->buflist == NULL || entry->seglist == NULL ||
temp_pagelist == NULL) {
free(entry->buflist, M_DRM);
free(temp_pagelist, M_DRM);
free(entry->seglist, M_DRM);
free(entry->buflist, M_DRM);
return ENOMEM;
}
memcpy(temp_pagelist, dma->pagelist, dma->page_count *
sizeof(*dma->pagelist));
DRM_DEBUG( "pagelist: %d entries\n",
dma->page_count + (count << page_order) );
DRM_DEBUG("pagelist: %d entries\n",
dma->page_count + (count << page_order));
entry->buf_size = size;
entry->page_order = page_order;
byte_count = 0;
page_count = 0;
while ( entry->buf_count < count ) {
while (entry->buf_count < count) {
DRM_SPINUNLOCK(&dev->dma_lock);
drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
0xfffffffful);
DRM_SPINLOCK(&dev->dma_lock);
if (dmah == NULL) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@ -607,16 +594,16 @@ static int drm_do_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
}
entry->seglist[entry->seg_count++] = dmah;
for ( i = 0 ; i < (1 << page_order) ; i++ ) {
DRM_DEBUG( "page %d @ %p\n",
dma->page_count + page_count,
(char *)dmah->vaddr + PAGE_SIZE * i );
for (i = 0; i < (1 << page_order); i++) {
DRM_DEBUG("page %d @ %p\n",
dma->page_count + page_count,
(char *)dmah->vaddr + PAGE_SIZE * i);
temp_pagelist[dma->page_count + page_count++] =
(long)dmah->vaddr + PAGE_SIZE * i;
}
for ( offset = 0 ;
offset + size <= total && entry->buf_count < count ;
offset += alignment, ++entry->buf_count ) {
for (offset = 0;
offset + size <= total && entry->buf_count < count;
offset += alignment, ++entry->buf_count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
@ -629,7 +616,7 @@ static int drm_do_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
buf->pending = 0;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver.buf_priv_size;
buf->dev_priv_size = dev->driver->buf_priv_size;
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
M_NOWAIT | M_ZERO);
if (buf->dev_private == NULL) {
@ -641,8 +628,8 @@ static int drm_do_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
return ENOMEM;
}
DRM_DEBUG( "buffer %d @ %p\n",
entry->buf_count, buf->address );
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
}
byte_count += PAGE_SIZE << page_order;
}
@ -658,7 +645,7 @@ static int drm_do_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
}
dma->buflist = temp_buflist;
for ( i = 0 ; i < entry->buf_count ; i++ ) {
for (i = 0; i < entry->buf_count; i++) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
@ -680,7 +667,7 @@ static int drm_do_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
}
static int drm_do_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_entry_t *entry;
@ -702,20 +689,20 @@ static int drm_do_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
? round_page(size) : size;
? round_page(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = request->agp_start;
DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order );
DRM_DEBUG( "size: %d\n", size );
DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
DRM_DEBUG( "alignment: %d\n", alignment );
DRM_DEBUG( "page_order: %d\n", page_order );
DRM_DEBUG( "total: %d\n", total );
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %ld\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
entry = &dma->bufs[order];
@ -729,7 +716,7 @@ static int drm_do_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
offset = 0;
while ( entry->buf_count < count ) {
while (entry->buf_count < count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
@ -743,7 +730,7 @@ static int drm_do_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
buf->pending = 0;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver.buf_priv_size;
buf->dev_priv_size = dev->driver->buf_priv_size;
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
M_NOWAIT | M_ZERO);
if (buf->dev_private == NULL) {
@ -753,15 +740,15 @@ static int drm_do_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
return ENOMEM;
}
DRM_DEBUG( "buffer %d @ %p\n",
entry->buf_count, buf->address );
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
DRM_DEBUG( "byte_count: %d\n", byte_count );
DRM_DEBUG("byte_count: %d\n", byte_count);
temp_buflist = realloc(dma->buflist,
(dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
@ -773,15 +760,15 @@ static int drm_do_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
}
dma->buflist = temp_buflist;
for ( i = 0 ; i < entry->buf_count ; i++ ) {
for (i = 0; i < entry->buf_count; i++) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
request->count = entry->buf_count;
request->size = size;
@ -791,7 +778,7 @@ static int drm_do_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
return 0;
}
int drm_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
{
int order, ret;
@ -822,7 +809,7 @@ int drm_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
return ret;
}
int drm_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
{
int order, ret;
@ -856,7 +843,7 @@ int drm_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
return ret;
}
int drm_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
{
int order, ret;
@ -893,7 +880,7 @@ int drm_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
int drm_addbufs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_buf_desc_t *request = data;
struct drm_buf_desc *request = data;
int err;
if (request->flags & _DRM_AGP_BUFFER)
@ -909,7 +896,7 @@ int drm_addbufs_ioctl(struct drm_device *dev, void *data,
int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_info_t *request = data;
struct drm_buf_info *request = data;
int i;
int count;
int retcode = 0;
@ -918,16 +905,17 @@ int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
++dev->buf_use; /* Can't allocate more after this call */
DRM_SPINUNLOCK(&dev->dma_lock);
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
if ( dma->bufs[i].buf_count ) ++count;
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
if (dma->bufs[i].buf_count)
++count;
}
DRM_DEBUG( "count = %d\n", count );
DRM_DEBUG("count = %d\n", count);
if ( request->count >= count ) {
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
if ( dma->bufs[i].buf_count ) {
drm_buf_desc_t from;
if (request->count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
if (dma->bufs[i].buf_count) {
struct drm_buf_desc from;
from.count = dma->bufs[i].buf_count;
from.size = dma->bufs[i].buf_size;
@ -935,17 +923,16 @@ int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
from.high_mark = dma->bufs[i].freelist.high_mark;
if (DRM_COPY_TO_USER(&request->list[count], &from,
sizeof(drm_buf_desc_t)) != 0) {
sizeof(struct drm_buf_desc)) != 0) {
retcode = EFAULT;
break;
}
DRM_DEBUG( "%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
dma->bufs[i].freelist.low_mark,
dma->bufs[i].freelist.high_mark );
DRM_DEBUG("%d %d %d %d %d\n",
i, dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
dma->bufs[i].freelist.low_mark,
dma->bufs[i].freelist.high_mark);
++count;
}
}
@ -958,11 +945,11 @@ int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t *request = data;
struct drm_buf_desc *request = data;
int order;
DRM_DEBUG( "%d, %d, %d\n",
request->size, request->low_mark, request->high_mark );
DRM_DEBUG("%d, %d, %d\n",
request->size, request->low_mark, request->high_mark);
order = drm_order(request->size);
@ -988,30 +975,30 @@ int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_free_t *request = data;
struct drm_buf_free *request = data;
int i;
int idx;
drm_buf_t *buf;
int retcode = 0;
DRM_DEBUG( "%d\n", request->count );
DRM_DEBUG("%d\n", request->count);
DRM_SPINLOCK(&dev->dma_lock);
for ( i = 0 ; i < request->count ; i++ ) {
for (i = 0; i < request->count; i++) {
if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
retcode = EFAULT;
break;
}
if ( idx < 0 || idx >= dma->buf_count ) {
DRM_ERROR( "Index %d (of %d max)\n",
idx, dma->buf_count - 1 );
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
idx, dma->buf_count - 1);
retcode = EINVAL;
break;
}
buf = dma->buflist[idx];
if ( buf->file_priv != file_priv ) {
if (buf->file_priv != file_priv) {
DRM_ERROR("Process %d freeing buffer not owned\n",
DRM_CURRENTPID);
DRM_CURRENTPID);
retcode = EINVAL;
break;
}
@ -1029,30 +1016,13 @@ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
const int zero = 0;
vm_offset_t address;
struct vmspace *vms;
#ifdef __FreeBSD__
vm_ooffset_t foff;
vm_size_t size;
vm_offset_t vaddr;
#elif defined(__NetBSD__) || defined(__OpenBSD__)
struct vnode *vn;
voff_t foff;
vsize_t size;
vaddr_t vaddr;
#endif /* __NetBSD__ || __OpenBSD__ */
drm_buf_map_t *request = data;
struct drm_buf_map *request = data;
int i;
#if defined(__NetBSD__) || defined(__OpenBSD__)
if (!vfinddev(kdev, VCHR, &vn))
return 0; /* FIXME: Shouldn't this be EINVAL or something? */
#endif /* __NetBSD__ || __OpenBSD */
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
vms = DRM_CURPROC->td_proc->p_vmspace;
#else
vms = DRM_CURPROC->p_vmspace;
#endif
DRM_SPINLOCK(&dev->dma_lock);
dev->buf_use++; /* Can't allocate more after this call */
@ -1061,8 +1031,9 @@ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
if (request->count < dma->buf_count)
goto done;
if ((dev->driver.use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
(dev->driver.use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
(drm_core_check_feature(dev, DRIVER_SG) &&
(dma->flags & _DRM_DMA_USE_SG))) {
drm_local_map_t *map = dev->agp_buffer_map;
if (map == NULL) {
@ -1076,7 +1047,6 @@ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
foff = 0;
}
#ifdef __FreeBSD__
vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
#if __FreeBSD_version >= 600023
retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
@ -1086,18 +1056,12 @@ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&dev->devnode->si_hlist),
foff);
#endif
#elif defined(__NetBSD__) || defined(__OpenBSD__)
vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
&vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
#endif /* __NetBSD__ || __OpenBSD */
if (retcode)
goto done;
request->virtual = (void *)vaddr;
for ( i = 0 ; i < dma->buf_count ; i++ ) {
for (i = 0; i < dma->buf_count; i++) {
if (DRM_COPY_TO_USER(&request->list[i].idx,
&dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
retcode = EFAULT;
@ -1124,7 +1088,23 @@ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
done:
request->count = dma->buf_count;
DRM_DEBUG( "%d buffers, retcode = %d\n", request->count, retcode );
DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
return retcode;
}
/*
* Compute order. Can be made faster.
*/
int drm_order(unsigned long size)
{
int order;
unsigned long tmp;
for (order = 0, tmp = size; tmp >>= 1; ++order);
if (size & ~(1 << order))
++order;
return order;
}

View file

@ -62,7 +62,7 @@ int drm_ctxbitmap_next(struct drm_device *dev)
return -1;
DRM_LOCK();
bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
if (bit >= DRM_MAX_CTXBITMAP) {
DRM_UNLOCK();
return -1;
@ -108,7 +108,7 @@ int drm_ctxbitmap_init(struct drm_device *dev)
DRM_LOCK();
dev->ctx_bitmap = malloc(PAGE_SIZE, M_DRM, M_NOWAIT | M_ZERO);
if ( dev->ctx_bitmap == NULL ) {
if (dev->ctx_bitmap == NULL) {
DRM_UNLOCK();
return ENOMEM;
}
@ -116,9 +116,9 @@ int drm_ctxbitmap_init(struct drm_device *dev)
dev->max_context = -1;
DRM_UNLOCK();
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
temp = drm_ctxbitmap_next(dev);
DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
}
return 0;
@ -140,7 +140,7 @@ void drm_ctxbitmap_cleanup(struct drm_device *dev)
int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_ctx_priv_map_t *request = data;
struct drm_ctx_priv_map *request = data;
drm_local_map_t *map;
DRM_LOCK();
@ -161,7 +161,7 @@ int drm_getsareactx(struct drm_device *dev, void *data,
int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_ctx_priv_map_t *request = data;
struct drm_ctx_priv_map *request = data;
drm_local_map_t *map = NULL;
DRM_LOCK();
@ -188,49 +188,49 @@ bad:
int drm_context_switch(struct drm_device *dev, int old, int new)
{
if ( test_and_set_bit( 0, &dev->context_flag ) ) {
DRM_ERROR( "Reentering -- FIXME\n" );
return EBUSY;
}
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return EBUSY;
}
DRM_DEBUG( "Context switch from %d to %d\n", old, new );
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if ( new == dev->last_context ) {
clear_bit( 0, &dev->context_flag );
return 0;
}
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
return 0;
return 0;
}
int drm_context_switch_complete(struct drm_device *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) {
DRM_ERROR( "Lock isn't held after context switch\n" );
}
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
clear_bit( 0, &dev->context_flag );
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
clear_bit(0, &dev->context_flag);
return 0;
return 0;
}
int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_ctx_res_t *res = data;
drm_ctx_t ctx;
struct drm_ctx_res *res = data;
struct drm_ctx ctx;
int i;
if ( res->count >= DRM_RESERVED_CONTEXTS ) {
if (res->count >= DRM_RESERVED_CONTEXTS) {
bzero(&ctx, sizeof(ctx));
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if ( DRM_COPY_TO_USER( &res->contexts[i],
&ctx, sizeof(ctx) ) )
if (DRM_COPY_TO_USER(&res->contexts[i],
&ctx, sizeof(ctx)))
return EFAULT;
}
}
@ -241,23 +241,23 @@ int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_ctx_t *ctx = data;
struct drm_ctx *ctx = data;
ctx->handle = drm_ctxbitmap_next(dev);
if ( ctx->handle == DRM_KERNEL_CONTEXT ) {
/* Skip kernel's context and get a new one. */
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx->handle = drm_ctxbitmap_next(dev);
}
DRM_DEBUG( "%d\n", ctx->handle );
if ( ctx->handle == -1 ) {
DRM_DEBUG( "Not enough free contexts.\n" );
/* Should this return -EBUSY instead? */
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle == -1) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return ENOMEM;
}
if (dev->driver.context_ctor && ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_ctor && ctx->handle != DRM_KERNEL_CONTEXT) {
DRM_LOCK();
dev->driver.context_ctor(dev, ctx->handle);
dev->driver->context_ctor(dev, ctx->handle);
DRM_UNLOCK();
}
@ -272,7 +272,7 @@ int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_ctx_t *ctx = data;
struct drm_ctx *ctx = data;
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
@ -283,17 +283,17 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_switchctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_ctx_t *ctx = data;
struct drm_ctx *ctx = data;
DRM_DEBUG( "%d\n", ctx->handle );
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
int drm_newctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_ctx_t *ctx = data;
struct drm_ctx *ctx = data;
DRM_DEBUG( "%d\n", ctx->handle );
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, ctx->handle);
return 0;
@ -301,13 +301,13 @@ int drm_newctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_rmctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_ctx_t *ctx = data;
struct drm_ctx *ctx = data;
DRM_DEBUG( "%d\n", ctx->handle );
if ( ctx->handle != DRM_KERNEL_CONTEXT ) {
if (dev->driver.context_dtor) {
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor) {
DRM_LOCK();
dev->driver.context_dtor(dev, ctx->handle);
dev->driver->context_dtor(dev, ctx->handle);
DRM_UNLOCK();
}

View file

@ -58,14 +58,12 @@ void drm_dma_takedown(struct drm_device *dev)
if (dma == NULL)
return;
/* Clear dma buffers */
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
" seg_count = %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
" seg_count = %d\n", i, dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
drm_pci_free(dev, dma->bufs[i].seglist[j]);
}
@ -91,7 +89,8 @@ void drm_dma_takedown(struct drm_device *dev)
void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf)
{
if (!buf) return;
if (!buf)
return;
buf->pending = 0;
buf->file_priv= NULL;
@ -103,7 +102,9 @@ void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
drm_device_dma_t *dma = dev->dma;
int i;
if (!dma) return;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
@ -125,9 +126,9 @@ void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
if (dev->driver.dma_ioctl) {
if (dev->driver->dma_ioctl) {
/* shared code returns -errno */
return -dev->driver.dma_ioctl(dev, data, file_priv);
return -dev->driver->dma_ioctl(dev, data, file_priv);
} else {
DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
return EINVAL;

View file

@ -68,7 +68,7 @@ drm_get_drawable_info(struct drm_device *dev, int handle)
int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_draw_t *draw = data;
struct drm_draw *draw = data;
struct bsd_drm_drawable_info *info;
info = drm_calloc(1, sizeof(struct bsd_drm_drawable_info),
@ -89,7 +89,7 @@ int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_draw_t *draw = (drm_draw_t *)data;
struct drm_draw *draw = (struct drm_draw *)data;
struct drm_drawable_info *info;
DRM_SPINLOCK(&dev->drw_lock);
@ -151,3 +151,22 @@ int drm_update_draw(struct drm_device *dev, void *data,
return EINVAL;
}
}
void drm_drawable_free_all(struct drm_device *dev)
{
struct bsd_drm_drawable_info *info, *next;
DRM_SPINLOCK(&dev->drw_lock);
for (info = RB_MIN(drawable_tree, &dev->drw_head);
info != NULL ; info = next) {
next = RB_NEXT(drawable_tree, &dev->drw_head, info);
RB_REMOVE(drawable_tree, &dev->drw_head,
(struct bsd_drm_drawable_info *)info);
DRM_SPINUNLOCK(&dev->drw_lock);
free_unr(dev->drw_unrhdr, info->handle);
drm_free(info, sizeof(struct bsd_drm_drawable_info),
DRM_MEM_DRAWABLE);
DRM_SPINLOCK(&dev->drw_lock);
}
DRM_SPINUNLOCK(&dev->drw_lock);
}

View file

@ -50,22 +50,13 @@ static void drm_unload(struct drm_device *dev);
static drm_pci_id_list_t *drm_find_description(int vendor, int device,
drm_pci_id_list_t *idlist);
#ifdef __FreeBSD__
#define DRIVER_SOFTC(unit) \
((struct drm_device *)devclass_get_softc(drm_devclass, unit))
MODULE_VERSION(drm, 1);
MODULE_DEPEND(drm, agp, 1, 1, 1);
MODULE_DEPEND(drm, pci, 1, 1, 1);
#if __FreeBSD_version > 502127
MODULE_DEPEND(drm, mem, 1, 1, 1);
#endif
#endif /* __FreeBSD__ */
#if defined(__NetBSD__) || defined(__OpenBSD__)
#define DRIVER_SOFTC(unit) \
((struct drm_device *)device_lookup(&drm_cd, unit))
#endif /* __NetBSD__ || __OpenBSD__ */
static drm_ioctl_desc_t drm_ioctls[256] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
@ -129,27 +120,15 @@ static drm_ioctl_desc_t drm_ioctls[256] = {
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};
#ifdef __FreeBSD__
static struct cdevsw drm_cdevsw = {
#if __FreeBSD_version >= 502103
.d_version = D_VERSION,
#endif
.d_open = drm_open,
.d_close = drm_close,
.d_read = drm_read,
.d_ioctl = drm_ioctl,
.d_poll = drm_poll,
.d_mmap = drm_mmap,
.d_name = "drm",
#if __FreeBSD_version >= 502103
.d_flags = D_TRACKCLOSE | D_NEEDGIANT,
#else
.d_maj = 145,
.d_flags = D_TRACKCLOSE,
#endif
#if __FreeBSD_version < 500000
.d_bmaj = -1
#endif
.d_flags = D_TRACKCLOSE | D_NEEDGIANT
};
int drm_probe(device_t dev, drm_pci_id_list_t *idlist)
@ -202,13 +181,12 @@ int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist)
DRM_DEV_GID,
DRM_DEV_MODE,
"dri/card%d", unit);
#if __FreeBSD_version >= 500000
mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
mtx_init(&dev->tsk_lock, "drmtsk", NULL, MTX_DEF);
#endif
id_entry = drm_find_description(pci_get_vendor(dev->device),
pci_get_device(dev->device), idlist);
@ -229,147 +207,6 @@ int drm_detach(device_t dev)
devclass_t drm_devclass;
#elif defined(__NetBSD__) || defined(__OpenBSD__)
static struct cdevsw drm_cdevsw = {
drm_open,
drm_close,
drm_read,
nowrite,
drm_ioctl,
nostop,
notty,
drm_poll,
drm_mmap,
nokqfilter,
D_TTY
};
int drm_refcnt = 0;
#if defined(__NetBSD__) && __NetBSD_Version__ >= 106080000
MOD_DEV("drm", DRIVER_NAME, NULL, -1, &drm_cdevsw, CDEV_MAJOR);
#else
MOD_DEV("drm", LM_DT_CHAR, CDEV_MAJOR, &drm_cdevsw);
#endif
int drm_lkmentry(struct lkm_table *lkmtp, int cmd, int ver);
static int drm_lkmhandle(struct lkm_table *lkmtp, int cmd);
int drm_modprobe(void);
int drm_probe(struct pci_attach_args *pa);
void drm_attach(struct pci_attach_args *pa, dev_t kdev);
int drm_lkmentry(struct lkm_table *lkmtp, int cmd, int ver) {
DISPATCH(lkmtp, cmd, ver, drm_lkmhandle, drm_lkmhandle, drm_lkmhandle);
}
static int drm_lkmhandle(struct lkm_table *lkmtp, int cmd)
{
int error = 0;
switch(cmd) {
case LKM_E_LOAD:
if (lkmexists(lkmtp))
return EEXIST;
if(drm_modprobe())
return 0;
return 1;
case LKM_E_UNLOAD:
if (drm_refcnt > 0)
return (EBUSY);
break;
case LKM_E_STAT:
break;
default:
error = EIO;
break;
}
return error;
}
int drm_modprobe(void)
{
struct pci_attach_args pa;
int error;
error = pci_find_device(&pa, drm_probe, idlist);
if (error != 0)
drm_attach(&pa, 0);
return error;
}
int drm_probe(struct pci_attach_args *pa, drm_pci_id_list_t idlist)
{
const char *desc;
drm_pci_id_list_t *id_entry;
id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
PCI_PRODUCT(pa->pa_id), idlist);
if (id_entry != NULL) {
return 1;
}
return 0;
}
void drm_attach(struct pci_attach_args *pa, dev_t kdev,
drm_pci_id_list_t *idlist)
{
int i;
struct drm_device *dev;
drm_pci_id_list_t *id_entry;
config_makeroom(kdev, &drm_cd);
drm_cd.cd_devs[(kdev)] = malloc(sizeof(struct drm_device),
M_DRM, M_WAITOK);
dev = DRIVER_SOFTC(kdev);
memset(dev, 0, sizeof(struct drm_device));
memcpy(&dev->pa, pa, sizeof(dev->pa));
dev->irq = pa->pa_intrline;
dev->pci_domain = 0;
dev->pci_bus = pa->pa_bus;
dev->pci_slot = pa->pa_device;
dev->pci_func = pa->pa_function;
dev->dma_tag = pa->pa_dmat;
id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
PCI_PRODUCT(pa->pa_id), idlist);
dev->driver.pci_id_entry = id_entry;
DRM_INFO("%s", id_entry->name);
drm_load(dev);
}
int drm_detach(struct device *self, int flags)
{
drm_unload((struct drm_device *)self);
return 0;
}
int drm_activate(struct device *self, enum devact act)
{
switch (act) {
case DVACT_ACTIVATE:
return (EOPNOTSUPP);
break;
case DVACT_DEACTIVATE:
/* FIXME */
break;
}
return (0);
}
#endif /* __NetBSD__ || __OpenBSD__ */
drm_pci_id_list_t *drm_find_description(int vendor, int device,
drm_pci_id_list_t *idlist)
{
@ -393,22 +230,22 @@ static int drm_firstopen(struct drm_device *dev)
/* prebuild the SAREA */
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
_DRM_CONTAINS_LOCK, &map);
_DRM_CONTAINS_LOCK, &map);
if (i != 0)
return i;
if (dev->driver.firstopen)
dev->driver.firstopen(dev);
if (dev->driver->firstopen)
dev->driver->firstopen(dev);
dev->buf_use = 0;
if (dev->driver.use_dma) {
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
i = drm_dma_setup(dev);
if (i != 0)
return i;
}
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
@ -419,13 +256,9 @@ static int drm_firstopen(struct drm_device *dev)
dev->last_context = 0;
dev->if_version = 0;
#ifdef __FreeBSD__
dev->buf_sigio = NULL;
#elif defined(__NetBSD__) || defined(__OpenBSD__)
dev->buf_pgid = 0;
#endif
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
return 0;
}
@ -438,39 +271,43 @@ static int drm_lastclose(struct drm_device *dev)
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
if (dev->driver.lastclose != NULL)
dev->driver.lastclose(dev);
if (dev->driver->lastclose != NULL)
dev->driver->lastclose(dev);
if (dev->irq_enabled)
drm_irq_uninstall(dev);
if ( dev->unique ) {
if (dev->unique) {
free(dev->unique, M_DRM);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
free(pt, M_DRM);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
/* Clear AGP information */
if ( dev->agp ) {
DRM_UNLOCK();
drm_drawable_free_all(dev);
DRM_LOCK();
/* Clear AGP information */
if (dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
/* Remove AGP resources, but leave dev->agp intact until
* drm_unload is called.
*/
for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
if ( entry->bound )
if (entry->bound)
drm_agp_unbind_memory(entry->handle);
drm_agp_free_memory(entry->handle);
free(entry, M_DRM);
@ -494,7 +331,7 @@ static int drm_lastclose(struct drm_device *dev)
}
drm_dma_takedown(dev);
if ( dev->lock.hw_lock ) {
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.file_priv = NULL;
DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
@ -507,10 +344,10 @@ static int drm_load(struct drm_device *dev)
{
int i, retcode;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
dev->irq = pci_get_irq(dev->device);
#if defined(__FreeBSD__) && __FreeBSD_version >= 700053
#if __FreeBSD_version >= 700053
dev->pci_domain = pci_get_domain(dev->device);
#else
dev->pci_domain = 0;
@ -525,9 +362,7 @@ static int drm_load(struct drm_device *dev)
TAILQ_INIT(&dev->maplist);
drm_mem_init();
#ifdef __FreeBSD__
drm_sysctl_init(dev);
#endif
TAILQ_INIT(&dev->files);
dev->counters = 6;
@ -538,13 +373,13 @@ static int drm_load(struct drm_device *dev)
dev->types[4] = _DRM_STAT_LOCKS;
dev->types[5] = _DRM_STAT_UNLOCKS;
for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
atomic_set( &dev->counts[i], 0 );
for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
atomic_set(&dev->counts[i], 0);
if (dev->driver.load != NULL) {
if (dev->driver->load != NULL) {
DRM_LOCK();
/* Shared code returns -errno. */
retcode = -dev->driver.load(dev,
retcode = -dev->driver->load(dev,
dev->id_entry->driver_private);
if (pci_enable_busmaster(dev->device))
DRM_ERROR("Request to enable bus-master failed.\n");
@ -553,10 +388,11 @@ static int drm_load(struct drm_device *dev)
goto error;
}
if (dev->driver.use_agp) {
if (drm_core_has_AGP(dev)) {
if (drm_device_is_agp(dev))
dev->agp = drm_agp_init();
if (dev->driver.require_agp && dev->agp == NULL) {
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
dev->agp == NULL) {
DRM_ERROR("Card isn't AGP, or couldn't initialize "
"AGP.\n");
retcode = ENOMEM;
@ -582,30 +418,27 @@ static int drm_load(struct drm_device *dev)
}
DRM_INFO("Initialized %s %d.%d.%d %s\n",
dev->driver.name,
dev->driver.major,
dev->driver.minor,
dev->driver.patchlevel,
dev->driver.date);
dev->driver->name,
dev->driver->major,
dev->driver->minor,
dev->driver->patchlevel,
dev->driver->date);
return 0;
error:
#ifdef __FreeBSD__
drm_sysctl_cleanup(dev);
#endif
DRM_LOCK();
drm_lastclose(dev);
DRM_UNLOCK();
#ifdef __FreeBSD__
destroy_dev(dev->devnode);
#if __FreeBSD_version >= 500000
mtx_destroy(&dev->tsk_lock);
mtx_destroy(&dev->drw_lock);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->vbl_lock);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->dev_lock);
#endif
#endif
return retcode;
}
@ -613,12 +446,10 @@ static void drm_unload(struct drm_device *dev)
{
int i;
DRM_DEBUG( "\n" );
DRM_DEBUG("\n");
#ifdef __FreeBSD__
drm_sysctl_cleanup(dev);
destroy_dev(dev->devnode);
#endif
drm_ctxbitmap_cleanup(dev);
@ -647,13 +478,16 @@ static void drm_unload(struct drm_device *dev)
dev->pcir[i] = NULL;
}
if ( dev->agp ) {
if (dev->agp) {
free(dev->agp, M_DRM);
dev->agp = NULL;
}
if (dev->driver.unload != NULL)
dev->driver.unload(dev);
if (dev->driver->unload != NULL) {
DRM_LOCK();
dev->driver->unload(dev);
DRM_UNLOCK();
}
delete_unrhdr(dev->drw_unrhdr);
@ -662,18 +496,17 @@ static void drm_unload(struct drm_device *dev)
if (pci_disable_busmaster(dev->device))
DRM_ERROR("Request to disable bus-master failed.\n");
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
mtx_destroy(&dev->tsk_lock);
mtx_destroy(&dev->drw_lock);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->vbl_lock);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->dev_lock);
#endif
}
int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_version_t *version = data;
struct drm_version *version = data;
int len;
#define DRM_COPY( name, value ) \
@ -685,13 +518,13 @@ int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
return EFAULT; \
}
version->version_major = dev->driver.major;
version->version_minor = dev->driver.minor;
version->version_patchlevel = dev->driver.patchlevel;
version->version_major = dev->driver->major;
version->version_minor = dev->driver->minor;
version->version_patchlevel = dev->driver->patchlevel;
DRM_COPY(version->name, dev->driver.name);
DRM_COPY(version->date, dev->driver.date);
DRM_COPY(version->desc, dev->driver.desc);
DRM_COPY(version->name, dev->driver->name);
DRM_COPY(version->date, dev->driver->date);
DRM_COPY(version->desc, dev->driver->desc);
return 0;
}
@ -703,17 +536,15 @@ int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
dev = DRIVER_SOFTC(minor(kdev));
DRM_DEBUG( "open_count = %d\n", dev->open_count );
DRM_DEBUG("open_count = %d\n", dev->open_count);
retcode = drm_open_helper(kdev, flags, fmt, p, dev);
if ( !retcode ) {
atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
if (!retcode) {
atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
DRM_LOCK();
#ifdef __FreeBSD__
device_busy(dev->device);
#endif
if ( !dev->open_count++ )
if (!dev->open_count++)
retcode = drm_firstopen(dev);
DRM_UNLOCK();
}
@ -721,103 +552,76 @@ int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
return retcode;
}
int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
void drm_close(void *data)
{
struct drm_device *dev = drm_get_device_from_kdev(kdev);
drm_file_t *file_priv;
struct drm_file *file_priv = data;
struct drm_device *dev = file_priv->dev;
int retcode = 0;
DRM_DEBUG( "open_count = %d\n", dev->open_count );
DRM_DEBUG("open_count = %d\n", dev->open_count);
DRM_LOCK();
file_priv = drm_find_file_by_proc(dev, p);
if (!file_priv) {
DRM_UNLOCK();
DRM_ERROR("can't find authenticator\n");
return EINVAL;
}
if (--file_priv->refs != 0)
goto done;
if (dev->driver.preclose != NULL)
dev->driver.preclose(dev, file_priv);
if (dev->driver->preclose != NULL)
dev->driver->preclose(dev, file_priv);
/* ========================================================
* Begin inline drm_release
*/
#ifdef __FreeBSD__
DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
DRM_CURRENTPID, (long)dev->device, dev->open_count );
#elif defined(__NetBSD__) || defined(__OpenBSD__)
DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
DRM_CURRENTPID, (long)&dev->device, dev->open_count);
#endif
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
DRM_CURRENTPID, (long)dev->device, dev->open_count);
if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
&& dev->lock.file_priv == file_priv) {
DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
DRM_CURRENTPID,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
if (dev->driver.reclaim_buffers_locked != NULL)
dev->driver.reclaim_buffers_locked(dev, file_priv);
if (dev->driver->reclaim_buffers_locked != NULL)
dev->driver->reclaim_buffers_locked(dev, file_priv);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
drm_lock_free(&dev->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
/* FIXME: may require heavy-handed reset of
hardware at this point, possibly
processed via a callback to the X
server. */
} else if (dev->driver.reclaim_buffers_locked != NULL &&
} else if (dev->driver->reclaim_buffers_locked != NULL &&
dev->lock.hw_lock != NULL) {
/* The lock is required to reclaim buffers */
for (;;) {
if ( !dev->lock.hw_lock ) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
retcode = EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
/* Contention */
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
/* Contention */
retcode = mtx_sleep((void *)&dev->lock.lock_queue,
&dev->dev_lock, PZERO | PCATCH, "drmlk2", 0);
#else
retcode = tsleep((void *)&dev->lock.lock_queue,
PZERO | PCATCH, "drmlk2", 0);
#endif
if (retcode)
break;
}
if (retcode == 0) {
dev->driver.reclaim_buffers_locked(dev, file_priv);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT);
dev->driver->reclaim_buffers_locked(dev, file_priv);
drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
}
}
if (dev->driver.use_dma && !dev->driver.reclaim_buffers_locked)
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked)
drm_reclaim_buffers(dev, file_priv);
#if defined (__FreeBSD__) && (__FreeBSD_version >= 500000)
funsetown(&dev->buf_sigio);
#elif defined(__FreeBSD__)
funsetown(dev->buf_sigio);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
dev->buf_pgid = 0;
#endif /* __NetBSD__ || __OpenBSD__ */
if (dev->driver.postclose != NULL)
dev->driver.postclose(dev, file_priv);
if (dev->driver->postclose != NULL)
dev->driver->postclose(dev, file_priv);
TAILQ_REMOVE(&dev->files, file_priv, link);
free(file_priv, M_DRM);
@ -825,18 +629,13 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
* End inline drm_release
*/
done:
atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
#ifdef __FreeBSD__
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
device_unbusy(dev->device);
#endif
if (--dev->open_count == 0) {
retcode = drm_lastclose(dev);
}
DRM_UNLOCK();
return retcode;
}
/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
@ -850,55 +649,34 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
int nr = DRM_IOCTL_NR(cmd);
int is_driver_ioctl = 0;
drm_file_t *file_priv;
struct drm_file *file_priv;
DRM_LOCK();
file_priv = drm_find_file_by_proc(dev, p);
retcode = devfs_get_cdevpriv((void **)&file_priv);
DRM_UNLOCK();
if (file_priv == NULL) {
if (retcode != 0) {
DRM_ERROR("can't find authenticator\n");
return EINVAL;
}
atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
#ifdef __FreeBSD__
DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
DRM_CURRENTPID, cmd, nr, (long)dev->device,
file_priv->authenticated );
#elif defined(__NetBSD__) || defined(__OpenBSD__)
DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
DRM_CURRENTPID, cmd, nr, (long)&dev->device,
file_priv->authenticated );
#endif
file_priv->authenticated);
switch (cmd) {
case FIONBIO:
case FIOASYNC:
return 0;
#ifdef __FreeBSD__
case FIOSETOWN:
return fsetown(*(int *)data, &dev->buf_sigio);
case FIOGETOWN:
#if (__FreeBSD_version >= 500000)
*(int *) data = fgetown(&dev->buf_sigio);
#else
*(int *) data = fgetown(dev->buf_sigio);
#endif
return 0;
#endif /* __FreeBSD__ */
#if defined(__NetBSD__) || defined(__OpenBSD__)
case TIOCSPGRP:
dev->buf_pgid = *(int *)data;
return 0;
case TIOCGPGRP:
*(int *)data = dev->buf_pgid;
return 0;
#endif /* __NetBSD__ */
}
if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
@ -911,18 +689,18 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
/* The array entries begin at DRM_COMMAND_BASE ioctl nr */
nr -= DRM_COMMAND_BASE;
if (nr > dev->driver.max_ioctl) {
if (nr > dev->driver->max_ioctl) {
DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
nr, dev->driver.max_ioctl);
nr, dev->driver->max_ioctl);
return EINVAL;
}
ioctl = &dev->driver.ioctls[nr];
ioctl = &dev->driver->ioctls[nr];
is_driver_ioctl = 1;
}
func = ioctl->func;
if (func == NULL) {
DRM_DEBUG( "no function\n" );
DRM_DEBUG("no function\n");
return EINVAL;
}

View file

@ -36,31 +36,12 @@
#include "drmP.h"
drm_file_t *drm_find_file_by_proc(struct drm_device *dev, DRM_STRUCTPROC *p)
{
#if __FreeBSD_version >= 500021
uid_t uid = p->td_ucred->cr_svuid;
pid_t pid = p->td_proc->p_pid;
#else
uid_t uid = p->p_cred->p_svuid;
pid_t pid = p->p_pid;
#endif
drm_file_t *priv;
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
TAILQ_FOREACH(priv, &dev->files, link)
if (priv->pid == pid && priv->uid == uid)
return priv;
return NULL;
}
/* drm_open_helper is called whenever a process opens /dev/drm. */
int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
struct drm_device *dev)
{
int m = minor(kdev);
drm_file_t *priv;
struct drm_file *priv;
int m = minor(kdev);
int retcode;
if (flags & O_EXCL)
@ -69,50 +50,44 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
DRM_DEBUG("pid = %d, minor = %d\n", DRM_CURRENTPID, m);
DRM_LOCK();
priv = drm_find_file_by_proc(dev, p);
if (priv) {
priv->refs++;
} else {
priv = malloc(sizeof(*priv), M_DRM, M_NOWAIT | M_ZERO);
if (priv == NULL) {
DRM_UNLOCK();
return ENOMEM;
}
#if __FreeBSD_version >= 500000
priv->uid = p->td_ucred->cr_svuid;
priv->pid = p->td_proc->p_pid;
#else
priv->uid = p->p_cred->p_svuid;
priv->pid = p->p_pid;
#endif
priv->refs = 1;
priv->minor = m;
priv->ioctl_count = 0;
/* for compatibility root is always authenticated */
priv->authenticated = DRM_SUSER(p);
if (dev->driver.open) {
/* shared code returns -errno */
retcode = -dev->driver.open(dev, priv);
if (retcode != 0) {
free(priv, M_DRM);
DRM_UNLOCK();
return retcode;
}
}
/* first opener automatically becomes master */
priv->master = TAILQ_EMPTY(&dev->files);
TAILQ_INSERT_TAIL(&dev->files, priv, link);
priv = malloc(sizeof(*priv), M_DRM, M_NOWAIT | M_ZERO);
if (priv == NULL) {
return ENOMEM;
}
retcode = devfs_set_cdevpriv(priv, drm_close);
if (retcode != 0) {
free(priv, M_DRM);
return retcode;
}
DRM_LOCK();
priv->dev = dev;
priv->uid = p->td_ucred->cr_svuid;
priv->pid = p->td_proc->p_pid;
priv->minor = m;
priv->ioctl_count = 0;
/* for compatibility root is always authenticated */
priv->authenticated = DRM_SUSER(p);
if (dev->driver->open) {
/* shared code returns -errno */
retcode = -dev->driver->open(dev, priv);
if (retcode != 0) {
devfs_clear_cdevpriv();
free(priv, M_DRM);
DRM_UNLOCK();
return retcode;
}
}
/* first opener automatically becomes master */
priv->master = TAILQ_EMPTY(&dev->files);
TAILQ_INSERT_TAIL(&dev->files, priv, link);
DRM_UNLOCK();
#ifdef __FreeBSD__
kdev->si_drv1 = dev;
#endif
return 0;
}

View file

@ -44,7 +44,7 @@
int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_unique_t *u = data;
struct drm_unique *u = data;
if (u->unique_len >= dev->unique_len) {
if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
@ -61,7 +61,7 @@ int drm_getunique(struct drm_device *dev, void *data,
int drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_unique_t *u = data;
struct drm_unique *u = data;
int domain, bus, slot, func, ret;
char *busid;
@ -141,7 +141,7 @@ drm_set_busid(struct drm_device *dev)
int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_map_t *map = data;
struct drm_map *map = data;
drm_local_map_t *mapinlist;
int idx;
int i = 0;
@ -155,7 +155,7 @@ int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
TAILQ_FOREACH(mapinlist, &dev->maplist, link) {
if (i==idx) {
if (i == idx) {
map->offset = mapinlist->offset;
map->size = mapinlist->size;
map->type = mapinlist->type;
@ -178,16 +178,15 @@ int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_client_t *client = data;
drm_file_t *pt;
int idx;
int i = 0;
struct drm_client *client = data;
struct drm_file *pt;
int idx;
int i = 0;
idx = client->idx;
DRM_LOCK();
TAILQ_FOREACH(pt, &dev->files, link) {
if (i==idx)
{
if (i == idx) {
client->auth = pt->authenticated;
client->pid = pt->pid;
client->uid = pt->uid;
@ -205,21 +204,20 @@ int drm_getclient(struct drm_device *dev, void *data,
int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_stats_t *stats = data;
struct drm_stats *stats = data;
int i;
memset(stats, 0, sizeof(drm_stats_t));
memset(stats, 0, sizeof(struct drm_stats));
DRM_LOCK();
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
stats->data[i].value
= (dev->lock.hw_lock
? dev->lock.hw_lock->lock : 0);
stats->data[i].value =
(dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
else
stats->data[i].value = atomic_read(&dev->counts[i]);
stats->data[i].type = dev->types[i];
stats->data[i].type = dev->types[i];
}
stats->count = dev->counters;
@ -235,8 +233,8 @@ int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_setversion(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_set_version_t *sv = data;
drm_set_version_t ver;
struct drm_set_version *sv = data;
struct drm_set_version ver;
int if_version;
/* Save the incoming data, and set the response before continuing
@ -245,8 +243,8 @@ int drm_setversion(struct drm_device *dev, void *data,
ver = *sv;
sv->drm_di_major = DRM_IF_MAJOR;
sv->drm_di_minor = DRM_IF_MINOR;
sv->drm_dd_major = dev->driver.major;
sv->drm_dd_minor = dev->driver.minor;
sv->drm_dd_major = dev->driver->major;
sv->drm_dd_minor = dev->driver->minor;
if (ver.drm_di_major != -1) {
if (ver.drm_di_major != DRM_IF_MAJOR ||
@ -265,9 +263,9 @@ int drm_setversion(struct drm_device *dev, void *data,
}
if (ver.drm_dd_major != -1) {
if (ver.drm_dd_major != dev->driver.major ||
if (ver.drm_dd_major != dev->driver->major ||
ver.drm_dd_minor < 0 ||
ver.drm_dd_minor > dev->driver.minor)
ver.drm_dd_minor > dev->driver->minor)
{
return EINVAL;
}

View file

@ -38,7 +38,7 @@ static void drm_locked_task(void *context, int pending __unused);
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_irq_busid_t *irq = data;
struct drm_irq_busid *irq = data;
if ((irq->busnum >> 8) != dev->pci_domain ||
(irq->busnum & 0xff) != dev->pci_bus ||
@ -49,22 +49,20 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
irq->irq = dev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
irq->busnum, irq->devnum, irq->funcnum, irq->irq);
irq->busnum, irq->devnum, irq->funcnum, irq->irq);
return 0;
}
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
static irqreturn_t
drm_irq_handler_wrap(DRM_IRQ_ARGS)
{
struct drm_device *dev = arg;
DRM_SPINLOCK(&dev->irq_lock);
dev->driver.irq_handler(arg);
dev->driver->irq_handler(arg);
DRM_SPINUNLOCK(&dev->irq_lock);
}
#endif
static void vblank_disable_fn(void *arg)
{
@ -81,17 +79,18 @@ static void vblank_disable_fn(void *arg)
}
callout_deactivate(&dev->vblank_disable_timer);
DRM_DEBUG("vblank_disable_allowed=%d\n", dev->vblank_disable_allowed);
if (!dev->vblank_disable_allowed)
return;
for (i = 0; i < dev->num_crtcs; i++) {
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) {
if (atomic_read(&dev->vblank[i].refcount) == 0 &&
dev->vblank[i].enabled) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
dev->last_vblank[i] =
dev->driver.get_vblank_counter(dev, i);
dev->driver.disable_vblank(dev, i);
dev->vblank_enabled[i] = 0;
dev->vblank[i].last =
dev->driver->get_vblank_counter(dev, i);
dev->driver->disable_vblank(dev, i);
dev->vblank[i].enabled = 0;
}
}
}
@ -102,7 +101,7 @@ static void drm_vblank_cleanup(struct drm_device *dev)
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
return;
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
callout_stop(&dev->vblank_disable_timer);
@ -112,20 +111,8 @@ static void drm_vblank_cleanup(struct drm_device *dev)
vblank_disable_fn((void *)dev);
drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
drm_free(dev->vblank, sizeof(struct drm_vblank_info) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
dev->num_crtcs, DRM_MEM_DRIVER);
dev->num_crtcs = 0;
}
@ -138,46 +125,19 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
atomic_set(&dev->vbl_signal_pending, 0);
dev->num_crtcs = num_crtcs;
dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
dev->vblank = drm_calloc(num_crtcs, sizeof(struct drm_vblank_info),
DRM_MEM_DRIVER);
if (!dev->vbl_queue)
if (!dev->vblank)
goto err;
dev->vbl_sigs = drm_alloc(sizeof(struct drm_vbl_sig) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vbl_sigs)
goto err;
dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->_vblank_count)
goto err;
dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vblank_refcount)
goto err;
dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
DRM_MEM_DRIVER);
if (!dev->vblank_enabled)
goto err;
dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
if (!dev->last_vblank)
goto err;
dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
DRM_MEM_DRIVER);
if (!dev->vblank_inmodeset)
goto err;
DRM_DEBUG("\n");
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
DRM_INIT_WAITQUEUE(&dev->vbl_queue[i]);
TAILQ_INIT(&dev->vbl_sigs[i]);
atomic_set(&dev->_vblank_count[i], 0);
atomic_set(&dev->vblank_refcount[i], 0);
DRM_INIT_WAITQUEUE(&dev->vblank[i].queue);
TAILQ_INIT(&dev->vblank[i].sigs);
atomic_set(&dev->vblank[i].count, 0);
atomic_set(&dev->vblank[i].refcount, 0);
}
dev->vblank_disable_allowed = 0;
@ -192,14 +152,11 @@ err:
int drm_irq_install(struct drm_device *dev)
{
int retcode;
#ifdef __NetBSD__
pci_intr_handle_t ih;
#endif
if (dev->irq == 0 || dev->dev_private == NULL)
return EINVAL;
DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
DRM_DEBUG("irq=%d\n", dev->irq);
DRM_LOCK();
if (dev->irq_enabled) {
@ -210,12 +167,11 @@ int drm_irq_install(struct drm_device *dev)
dev->context_flag = 0;
/* Before installing handler */
dev->driver.irq_preinstall(dev);
/* Before installing handler */
dev->driver->irq_preinstall(dev);
DRM_UNLOCK();
/* Install handler */
#ifdef __FreeBSD__
/* Install handler */
dev->irqrid = 0;
dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
&dev->irqrid, RF_SHAREABLE);
@ -234,22 +190,10 @@ int drm_irq_install(struct drm_device *dev)
#endif
if (retcode != 0)
goto err;
#elif defined(__NetBSD__) || defined(__OpenBSD__)
if (pci_intr_map(&dev->pa, &ih) != 0) {
retcode = ENOENT;
goto err;
}
dev->irqh = pci_intr_establish(&dev->pa.pa_pc, ih, IPL_TTY,
(irqreturn_t (*)(void *))dev->irq_handler, dev);
if (!dev->irqh) {
retcode = ENOENT;
goto err;
}
#endif
/* After installing handler */
/* After installing handler */
DRM_LOCK();
dev->driver.irq_postinstall(dev);
dev->driver->irq_postinstall(dev);
DRM_UNLOCK();
TASK_INIT(&dev->locked_task, 0, drm_locked_task, dev);
@ -257,44 +201,35 @@ int drm_irq_install(struct drm_device *dev)
err:
DRM_LOCK();
dev->irq_enabled = 0;
#ifdef ___FreeBSD__
if (dev->irqrid != 0) {
bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
dev->irqr);
dev->irqrid = 0;
}
#endif
DRM_UNLOCK();
return retcode;
}
int drm_irq_uninstall(struct drm_device *dev)
{
#ifdef __FreeBSD__
int irqrid;
#endif
if (!dev->irq_enabled)
return EINVAL;
dev->irq_enabled = 0;
#ifdef __FreeBSD__
irqrid = dev->irqrid;
dev->irqrid = 0;
#endif
DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
DRM_DEBUG("irq=%d\n", dev->irq);
dev->driver.irq_uninstall(dev);
dev->driver->irq_uninstall(dev);
#ifdef __FreeBSD__
DRM_UNLOCK();
bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
bus_release_resource(dev->device, SYS_RES_IRQ, irqrid, dev->irqr);
DRM_LOCK();
#elif defined(__NetBSD__) || defined(__OpenBSD__)
pci_intr_disestablish(&dev->pa.pa_pc, dev->irqh);
#endif
drm_vblank_cleanup(dev);
return 0;
@ -302,22 +237,22 @@ int drm_irq_uninstall(struct drm_device *dev)
int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_control_t *ctl = data;
struct drm_control *ctl = data;
int err;
switch ( ctl->func ) {
switch (ctl->func) {
case DRM_INST_HANDLER:
/* Handle drivers whose DRM used to require IRQ setup but the
* no longer does.
*/
if (!dev->driver.use_irq)
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != dev->irq)
return EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
if (!dev->driver.use_irq)
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
DRM_LOCK();
err = drm_irq_uninstall(dev);
@ -330,7 +265,7 @@ int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
return atomic_read(&dev->_vblank_count[crtc]);
return atomic_read(&dev->vblank[crtc].count);
}
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
@ -344,19 +279,19 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
* here if the register is small or we had vblank interrupts off for
* a long time.
*/
cur_vblank = dev->driver.get_vblank_counter(dev, crtc);
diff = cur_vblank - dev->last_vblank[crtc];
if (cur_vblank < dev->last_vblank[crtc]) {
cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
diff = cur_vblank - dev->vblank[crtc].last;
if (cur_vblank < dev->vblank[crtc].last) {
diff += dev->max_vblank_count;
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
crtc, dev->last_vblank[crtc], cur_vblank, diff);
DRM_DEBUG("vblank[%d].last=0x%x, cur_vblank=0x%x => diff=0x%x\n",
crtc, dev->vblank[crtc].last, cur_vblank, diff);
}
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
atomic_add(diff, &dev->_vblank_count[crtc]);
atomic_add(diff, &dev->vblank[crtc].count);
}
int drm_vblank_get(struct drm_device *dev, int crtc)
@ -366,14 +301,14 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
atomic_add_acq_int(&dev->vblank_refcount[crtc], 1);
if (dev->vblank_refcount[crtc] == 1 &&
!dev->vblank_enabled[crtc]) {
ret = dev->driver.enable_vblank(dev, crtc);
atomic_add_acq_int(&dev->vblank[crtc].refcount, 1);
if (dev->vblank[crtc].refcount == 1 &&
!dev->vblank[crtc].enabled) {
ret = dev->driver->enable_vblank(dev, crtc);
if (ret)
atomic_dec(&dev->vblank_refcount[crtc]);
atomic_dec(&dev->vblank[crtc].refcount);
else {
dev->vblank_enabled[crtc] = 1;
dev->vblank[crtc].enabled = 1;
drm_update_vblank_count(dev, crtc);
}
}
@ -388,8 +323,8 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
/* Last user schedules interrupt disable */
atomic_subtract_acq_int(&dev->vblank_refcount[crtc], 1);
if (dev->vblank_refcount[crtc] == 0)
atomic_subtract_acq_int(&dev->vblank[crtc].refcount, 1);
if (dev->vblank[crtc].refcount == 0)
callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ,
(timeout_t *)vblank_disable_fn, (void *)dev);
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
@ -402,11 +337,13 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
unsigned long irqflags;
int crtc, ret = 0;
DRM_DEBUG("num_crtcs=%d\n", dev->num_crtcs);
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
goto out;
goto out;
crtc = modeset->crtc;
DRM_DEBUG("crtc=%d\n", crtc);
if (crtc >= dev->num_crtcs) {
ret = EINVAL;
goto out;
@ -421,16 +358,18 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
*/
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
if (!dev->vblank_inmodeset[crtc]) {
dev->vblank_inmodeset[crtc] = 1;
DRM_DEBUG("pre-modeset\n");
if (!dev->vblank[crtc].inmodeset) {
dev->vblank[crtc].inmodeset = 1;
drm_vblank_get(dev, crtc);
}
break;
case _DRM_POST_MODESET:
if (dev->vblank_inmodeset[crtc]) {
DRM_DEBUG("post-modeset\n");
if (dev->vblank[crtc].inmodeset) {
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
dev->vblank_inmodeset[crtc] = 0;
dev->vblank[crtc].inmodeset = 0;
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
drm_vblank_put(dev, crtc);
}
@ -446,7 +385,7 @@ out:
int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_wait_vblank_t *vblwait = data;
union drm_wait_vblank *vblwait = data;
int ret = 0;
int flags, seq, crtc;
@ -469,7 +408,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
ret = drm_vblank_get(dev, crtc);
if (ret)
return ret;
return ret;
seq = drm_vblank_count(dev, crtc);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
@ -511,7 +450,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
DRM_LOCK();
/* shared code returns -errno */
DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
((drm_vblank_count(dev, crtc)
- vblwait->request.sequence) <= (1 << 23)));
DRM_UNLOCK();
@ -546,7 +485,7 @@ void drm_vbl_send_signals(struct drm_device *dev, int crtc )
while (vbl_sig != NULL) {
drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
p = pfind(vbl_sig->pid);
if (p != NULL)
psignal(p, vbl_sig->signo);
@ -561,8 +500,8 @@ void drm_vbl_send_signals(struct drm_device *dev, int crtc )
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
atomic_inc(&dev->_vblank_count[crtc]);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
atomic_inc(&dev->vblank[crtc].count);
DRM_WAKEUP(&dev->vblank[crtc].queue);
drm_vbl_send_signals(dev, crtc);
}
@ -574,7 +513,7 @@ static void drm_locked_task(void *context, int pending __unused)
DRM_LOCK(); /* XXX drm_lock_take() should do it's own locking */
if (dev->locked_task_call == NULL ||
drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT) == 0) {
drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT) == 0) {
DRM_UNLOCK();
DRM_SPINUNLOCK(&dev->tsk_lock);
return;
@ -588,7 +527,7 @@ static void drm_locked_task(void *context, int pending __unused)
dev->locked_task_call(dev);
drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
dev->locked_task_call = NULL;

View file

@ -49,89 +49,28 @@
#include "drmP.h"
int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old, new;
do {
old = *lock;
if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT;
else new = context | _DRM_LOCK_HELD;
} while (!atomic_cmpset_int(lock, old, new));
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
}
}
if (new == (context | _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
return 0;
}
/* This takes a lock forcibly and hands it to context. Should ONLY be used
inside *_unlock to give lock to kernel before calling *_dma_schedule. */
int drm_lock_transfer(struct drm_device *dev,
__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old, new;
dev->lock.file_priv = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
} while (!atomic_cmpset_int(lock, old, new));
return 1;
}
int drm_lock_free(struct drm_device *dev,
__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old, new;
dev->lock.file_priv = NULL;
do {
old = *lock;
new = 0;
} while (!atomic_cmpset_int(lock, old, new));
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
return 0;
}
int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_lock_t *lock = data;
int ret = 0;
struct drm_lock *lock = data;
int ret = 0;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
DRM_CURRENTPID, lock->context);
return EINVAL;
}
return EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
lock->flags);
if (dev->driver.use_dma_queue && lock->context < 0)
return EINVAL;
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) &&
lock->context < 0)
return EINVAL;
DRM_LOCK();
for (;;) {
if (drm_lock_take(&dev->lock.hw_lock->lock, lock->context)) {
if (drm_lock_take(&dev->lock, lock->context)) {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
@ -139,13 +78,8 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
/* Contention */
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
PZERO | PCATCH, "drmlk2", 0);
#else
ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH,
"drmlk2", 0);
#endif
if (ret != 0)
break;
}
@ -157,16 +91,16 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
/* XXX: Add signal blocking here */
if (dev->driver.dma_quiescent != NULL &&
if (dev->driver->dma_quiescent != NULL &&
(lock->flags & _DRM_LOCK_QUIESCENT))
dev->driver.dma_quiescent(dev);
dev->driver->dma_quiescent(dev);
return 0;
}
int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_lock_t *lock = data;
struct drm_lock *lock = data;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -190,12 +124,77 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
DRM_LOCK();
drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
drm_lock_transfer(&dev->lock, DRM_KERNEL_CONTEXT);
if (drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) {
if (drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
DRM_UNLOCK();
return 0;
}
int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context)
{
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned int old, new;
do {
old = *lock;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
else
new = context | _DRM_LOCK_HELD;
} while (!atomic_cmpset_int(lock, old, new));
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
}
}
if (new == (context | _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
return 0;
}
/* This takes a lock forcibly and hands it to context. Should ONLY be used
inside *_unlock to give lock to kernel before calling *_dma_schedule. */
int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context)
{
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned int old, new;
lock_data->file_priv = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
} while (!atomic_cmpset_int(lock, old, new));
return 1;
}
int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
{
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned int old, new;
lock_data->file_priv = NULL;
do {
old = *lock;
new = 0;
} while (!atomic_cmpset_int(lock, old, new));
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
DRM_WAKEUP_INT((void *)&lock_data->lock_queue);
return 0;
}

View file

@ -42,9 +42,6 @@ MALLOC_DEFINE(M_DRM, "drm", "DRM Data Structures");
void drm_mem_init(void)
{
#if defined(__NetBSD__) || defined(__OpenBSD__)
malloc_type_attach(M_DRM);
#endif
}
void drm_mem_uninit(void)
@ -69,7 +66,7 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
if (pt == NULL)
return NULL;
if (oldpt && oldsize) {
memcpy(pt, oldpt, oldsize);
memcpy(pt, oldpt, DRM_MIN(oldsize,size));
free(oldpt, M_DRM);
}
return pt;
@ -80,29 +77,21 @@ void drm_free(void *pt, size_t size, int area)
free(pt, M_DRM);
}
void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map)
{
return pmap_mapdev_attr(map->offset, map->size, PAT_WRITE_COMBINING);
}
void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map)
{
#ifdef __FreeBSD__
return pmap_mapdev(map->offset, map->size);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
map->bst = dev->pa.pa_memt;
if (bus_space_map(map->bst, map->offset, map->size,
BUS_SPACE_MAP_LINEAR, &map->bsh))
return NULL;
return bus_space_vaddr(map->bst, map->bsh);
#endif
}
void drm_ioremapfree(drm_local_map_t *map)
{
#ifdef __FreeBSD__
pmap_unmapdev((vm_offset_t) map->handle, map->size);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
bus_space_unmap(map->bst, map->bsh, map->size);
#endif
}
#ifdef __FreeBSD__
int
drm_mtrr_add(unsigned long offset, size_t size, int flags)
{
@ -130,30 +119,3 @@ drm_mtrr_del(int __unused handle, unsigned long offset, size_t size, int flags)
strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
return mem_range_attr_set(&mrdesc, &act);
}
#elif defined(__NetBSD__) || defined(__OpenBSD__)
int
drm_mtrr_add(unsigned long offset, size_t size, int flags)
{
struct mtrr mtrrmap;
int one = 1;
mtrrmap.base = offset;
mtrrmap.len = size;
mtrrmap.type = flags;
mtrrmap.flags = MTRR_VALID;
return mtrr_set(&mtrrmap, &one, NULL, MTRR_GETSET_KERNEL);
}
int
drm_mtrr_del(unsigned long offset, size_t size, int flags)
{
struct mtrr mtrrmap;
int one = 1;
mtrrmap.base = offset;
mtrrmap.len = size;
mtrrmap.type = flags;
mtrrmap.flags = 0;
return mtrr_set(&mtrrmap, &one, NULL, MTRR_GETSET_KERNEL);
}
#endif

View file

@ -34,7 +34,6 @@
/** \name PCI memory */
/*@{*/
#if defined(__FreeBSD__)
static void
drm_pci_busdma_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
@ -46,7 +45,6 @@ drm_pci_busdma_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error
KASSERT(nsegs == 1, ("drm_pci_busdma_callback: bad dma segment count"));
dmah->busaddr = segs[0].ds_addr;
}
#endif
/**
* \brief Allocate a physically contiguous DMA-accessible consistent
@ -70,8 +68,14 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
if (dmah == NULL)
return NULL;
#ifdef __FreeBSD__
DRM_UNLOCK();
/* Make sure we aren't holding locks here */
mtx_assert(&dev->dev_lock, MA_NOTOWNED);
if (mtx_owned(&dev->dev_lock))
DRM_ERROR("called while holding dev_lock\n");
mtx_assert(&dev->dma_lock, MA_NOTOWNED);
if (mtx_owned(&dev->dma_lock))
DRM_ERROR("called while holding dma_lock\n");
ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */
maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
NULL, NULL, /* filtfunc, filtfuncargs */
@ -80,7 +84,6 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
&dmah->tag);
if (ret != 0) {
free(dmah, M_DRM);
DRM_LOCK();
return NULL;
}
@ -89,10 +92,9 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
if (ret != 0) {
bus_dma_tag_destroy(dmah->tag);
free(dmah, M_DRM);
DRM_LOCK();
return NULL;
}
DRM_LOCK();
ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size,
drm_pci_busdma_callback, dmah, 0);
if (ret != 0) {
@ -101,24 +103,6 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
free(dmah, M_DRM);
return NULL;
}
#elif defined(__NetBSD__)
ret = bus_dmamem_alloc(dev->dma_tag, size, align, PAGE_SIZE,
&dmah->seg, 1, &nsegs, BUS_DMA_NOWAIT);
if ((ret != 0) || (nsegs != 1)) {
free(dmah, M_DRM);
return NULL;
}
ret = bus_dmamem_map(dev->dma_tag, &dmah->seg, 1, size, &dmah->addr,
BUS_DMA_NOWAIT);
if (ret != 0) {
bus_dmamem_free(dev->dma_tag, &dmah->seg, 1);
free(dmah, M_DRM);
return NULL;
}
dmah->dmaaddr = h->seg.ds_addr;
#endif
return dmah;
}
@ -132,12 +116,8 @@ drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
if (dmah == NULL)
return;
#if defined(__FreeBSD__)
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
bus_dma_tag_destroy(dmah->tag);
#elif defined(__NetBSD__)
bus_dmamem_free(dev->dma_tag, &dmah->seg, 1);
#endif
free(dmah, M_DRM);
}

View file

@ -45,27 +45,27 @@ void drm_sg_cleanup(drm_sg_mem_t *entry)
free(entry, M_DRM);
}
int drm_sg_alloc(struct drm_device * dev, drm_scatter_gather_t * request)
int drm_sg_alloc(struct drm_device * dev, struct drm_scatter_gather * request)
{
drm_sg_mem_t *entry;
unsigned long pages;
int i;
if ( dev->sg )
if (dev->sg)
return EINVAL;
entry = malloc(sizeof(*entry), M_DRM, M_WAITOK | M_ZERO);
if ( !entry )
if (!entry)
return ENOMEM;
pages = round_page(request->size) / PAGE_SIZE;
DRM_DEBUG( "sg size=%ld pages=%ld\n", request->size, pages );
DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->busaddr = malloc(pages * sizeof(*entry->busaddr), M_DRM,
M_WAITOK | M_ZERO);
if ( !entry->busaddr ) {
if (!entry->busaddr) {
drm_sg_cleanup(entry);
return ENOMEM;
}
@ -81,7 +81,7 @@ int drm_sg_alloc(struct drm_device * dev, drm_scatter_gather_t * request)
entry->busaddr[i] = vtophys(entry->handle + i * PAGE_SIZE);
}
DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
DRM_DEBUG("sg alloc handle = %08lx\n", entry->handle);
entry->virtual = (void *)entry->handle;
request->handle = entry->handle;
@ -101,10 +101,10 @@ int drm_sg_alloc(struct drm_device * dev, drm_scatter_gather_t * request)
int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_scatter_gather_t *request = data;
struct drm_scatter_gather *request = data;
int ret;
DRM_DEBUG( "%s\n", __FUNCTION__ );
DRM_DEBUG("%s\n", __FUNCTION__);
ret = drm_sg_alloc(dev, request);
return ret;
@ -112,7 +112,7 @@ int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
int drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_scatter_gather_t *request = data;
struct drm_scatter_gather *request = data;
drm_sg_mem_t *entry;
DRM_LOCK();
@ -120,10 +120,10 @@ int drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
dev->sg = NULL;
DRM_UNLOCK();
if ( !entry || entry->handle != request->handle )
if (!entry || entry->handle != request->handle)
return EINVAL;
DRM_DEBUG( "sg free virtual = 0x%lx\n", entry->handle );
DRM_DEBUG("sg free virtual = 0x%lx\n", entry->handle);
drm_sg_cleanup(entry);

View file

@ -132,7 +132,7 @@ static int drm_name_info DRM_SYSCTL_HANDLER_ARGS
int retcode;
int hasunique = 0;
DRM_SYSCTL_PRINT("%s 0x%x", dev->driver.name, dev2udev(dev->devnode));
DRM_SYSCTL_PRINT("%s 0x%x", dev->driver->name, dev2udev(dev->devnode));
DRM_LOCK();
if (dev->unique) {
@ -268,7 +268,7 @@ done:
static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS
{
struct drm_device *dev = arg1;
drm_file_t *priv, *tempprivs;
struct drm_file *priv, *tempprivs;
char buf[128];
int retcode;
int privcount, i;
@ -279,7 +279,7 @@ static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS
TAILQ_FOREACH(priv, &dev->files, link)
privcount++;
tempprivs = malloc(sizeof(drm_file_t) * privcount, M_DRM, M_NOWAIT);
tempprivs = malloc(sizeof(struct drm_file) * privcount, M_DRM, M_NOWAIT);
if (tempprivs == NULL) {
DRM_UNLOCK();
return ENOMEM;

View file

@ -28,34 +28,24 @@
#include "drmP.h"
#include "drm.h"
#if defined(__FreeBSD__) && __FreeBSD_version >= 500102
int drm_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr,
int prot)
#elif defined(__FreeBSD__)
int drm_mmap(dev_t kdev, vm_offset_t offset, int prot)
#elif defined(__NetBSD__) || defined(__OpenBSD__)
paddr_t drm_mmap(dev_t kdev, off_t offset, int prot)
#endif
{
struct drm_device *dev = drm_get_device_from_kdev(kdev);
struct drm_file *file_priv;
drm_local_map_t *map;
drm_file_t *priv;
drm_map_type_t type;
#ifdef __FreeBSD__
enum drm_map_type type;
vm_paddr_t phys;
#else
paddr_t phys;
#endif
DRM_LOCK();
priv = drm_find_file_by_proc(dev, DRM_CURPROC);
TAILQ_FOREACH(file_priv, &dev->files, link)
if (file_priv->pid == curthread->td_proc->p_pid &&
file_priv->uid == curthread->td_ucred->cr_svuid &&
file_priv->authenticated == 1)
break;
DRM_UNLOCK();
if (priv == NULL) {
DRM_ERROR("can't find authenticator\n");
return EINVAL;
}
if (!priv->authenticated)
if (!file_priv)
return EACCES;
if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) {
@ -68,12 +58,8 @@ paddr_t drm_mmap(dev_t kdev, off_t offset, int prot)
unsigned long phys = dma->pagelist[page];
DRM_SPINUNLOCK(&dev->dma_lock);
#if defined(__FreeBSD__) && __FreeBSD_version >= 500102
*paddr = phys;
return 0;
#else
return atop(phys);
#endif
} else {
DRM_SPINUNLOCK(&dev->dma_lock);
return -1;
@ -124,11 +110,7 @@ paddr_t drm_mmap(dev_t kdev, off_t offset, int prot)
return -1; /* This should never happen. */
}
#if defined(__FreeBSD__) && __FreeBSD_version >= 500102
*paddr = phys;
return 0;
#else
return atop(phys);
#endif
}

View file

@ -68,40 +68,36 @@ static int i915_resume(device_t nbdev)
static void i915_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = sizeof(drm_i915_private_t);
dev->driver.load = i915_driver_load;
dev->driver.unload = i915_driver_unload;
dev->driver.firstopen = i915_driver_firstopen;
dev->driver.preclose = i915_driver_preclose;
dev->driver.lastclose = i915_driver_lastclose;
dev->driver.device_is_agp = i915_driver_device_is_agp;
dev->driver.get_vblank_counter = i915_get_vblank_counter;
dev->driver.enable_vblank = i915_enable_vblank;
dev->driver.disable_vblank = i915_disable_vblank;
dev->driver.irq_preinstall = i915_driver_irq_preinstall;
dev->driver.irq_postinstall = i915_driver_irq_postinstall;
dev->driver.irq_uninstall = i915_driver_irq_uninstall;
dev->driver.irq_handler = i915_driver_irq_handler;
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_IRQ;
dev->driver.ioctls = i915_ioctls;
dev->driver.max_ioctl = i915_max_ioctl;
dev->driver->buf_priv_size = sizeof(drm_i915_private_t);
dev->driver->load = i915_driver_load;
dev->driver->unload = i915_driver_unload;
dev->driver->firstopen = i915_driver_firstopen;
dev->driver->preclose = i915_driver_preclose;
dev->driver->lastclose = i915_driver_lastclose;
dev->driver->device_is_agp = i915_driver_device_is_agp;
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
dev->driver->irq_preinstall = i915_driver_irq_preinstall;
dev->driver->irq_postinstall = i915_driver_irq_postinstall;
dev->driver->irq_uninstall = i915_driver_irq_uninstall;
dev->driver->irq_handler = i915_driver_irq_handler;
dev->driver.name = DRIVER_NAME;
dev->driver.desc = DRIVER_DESC;
dev->driver.date = DRIVER_DATE;
dev->driver.major = DRIVER_MAJOR;
dev->driver.minor = DRIVER_MINOR;
dev->driver.patchlevel = DRIVER_PATCHLEVEL;
dev->driver->ioctls = i915_ioctls;
dev->driver->max_ioctl = i915_max_ioctl;
dev->driver.use_agp = 1;
dev->driver.require_agp = 1;
dev->driver.use_mtrr = 1;
dev->driver.use_irq = 1;
dev->driver.use_vbl_irq = 1;
dev->driver.use_vbl_irq2 = 1;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
#ifdef __FreeBSD__
static int
i915_probe(device_t dev)
{
@ -114,17 +110,33 @@ i915_attach(device_t nbdev)
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(struct drm_device));
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
i915_configure(dev);
return drm_attach(nbdev, i915_pciidlist);
}
static int
i915_detach(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
int ret;
ret = drm_detach(nbdev);
free(dev->driver, M_DRM);
return ret;
}
static device_method_t i915_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, i915_probe),
DEVMETHOD(device_attach, i915_attach),
DEVMETHOD(device_suspend, i915_suspend),
DEVMETHOD(device_resume, i915_resume),
DEVMETHOD(device_detach, drm_detach),
DEVMETHOD(device_detach, i915_detach),
{ 0, 0 }
};
@ -146,7 +158,3 @@ DRIVER_MODULE(i915, vgapci, i915_driver, drm_devclass, 0, 0);
DRIVER_MODULE(i915, agp, i915_driver, drm_devclass, 0, 0);
#endif
MODULE_DEPEND(i915, drm, 1, 1, 1);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
CFDRIVER_DECL(i915, DV_TTY, NULL);
#endif

View file

@ -46,36 +46,32 @@ static drm_pci_id_list_t mach64_pciidlist[] = {
static void mach64_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = 1; /* No dev_priv */
dev->driver.lastclose = mach64_driver_lastclose;
dev->driver.get_vblank_counter = mach64_get_vblank_counter;
dev->driver.enable_vblank = mach64_enable_vblank;
dev->driver.disable_vblank = mach64_disable_vblank;
dev->driver.irq_preinstall = mach64_driver_irq_preinstall;
dev->driver.irq_postinstall = mach64_driver_irq_postinstall;
dev->driver.irq_uninstall = mach64_driver_irq_uninstall;
dev->driver.irq_handler = mach64_driver_irq_handler;
dev->driver.dma_ioctl = mach64_dma_buffers;
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
dev->driver.ioctls = mach64_ioctls;
dev->driver.max_ioctl = mach64_max_ioctl;
dev->driver->buf_priv_size = 1; /* No dev_priv */
dev->driver->lastclose = mach64_driver_lastclose;
dev->driver->get_vblank_counter = mach64_get_vblank_counter;
dev->driver->enable_vblank = mach64_enable_vblank;
dev->driver->disable_vblank = mach64_disable_vblank;
dev->driver->irq_preinstall = mach64_driver_irq_preinstall;
dev->driver->irq_postinstall = mach64_driver_irq_postinstall;
dev->driver->irq_uninstall = mach64_driver_irq_uninstall;
dev->driver->irq_handler = mach64_driver_irq_handler;
dev->driver->dma_ioctl = mach64_dma_buffers;
dev->driver.name = DRIVER_NAME;
dev->driver.desc = DRIVER_DESC;
dev->driver.date = DRIVER_DATE;
dev->driver.major = DRIVER_MAJOR;
dev->driver.minor = DRIVER_MINOR;
dev->driver.patchlevel = DRIVER_PATCHLEVEL;
dev->driver->ioctls = mach64_ioctls;
dev->driver->max_ioctl = mach64_max_ioctl;
dev->driver.use_agp = 1;
dev->driver.use_mtrr = 1;
dev->driver.use_pci_dma = 1;
dev->driver.use_dma = 1;
dev->driver.use_irq = 1;
dev->driver.use_vbl_irq = 1;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
#ifdef __FreeBSD__
static int
mach64_probe(device_t dev)
{
@ -88,15 +84,31 @@ mach64_attach(device_t nbdev)
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(struct drm_device));
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
mach64_configure(dev);
return drm_attach(nbdev, mach64_pciidlist);
}
static int
mach64_detach(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
int ret;
ret = drm_detach(nbdev);
free(dev->driver, M_DRM);
return ret;
}
static device_method_t mach64_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, mach64_probe),
DEVMETHOD(device_attach, mach64_attach),
DEVMETHOD(device_detach, drm_detach),
DEVMETHOD(device_detach, mach64_detach),
{ 0, 0 }
};
@ -114,7 +126,3 @@ DRIVER_MODULE(mach64, vgapci, mach64_driver, drm_devclass, 0, 0);
DRIVER_MODULE(mach64, pci, mach64_driver, drm_devclass, 0, 0);
#endif
MODULE_DEPEND(mach64, drm, 1, 1, 1);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
CFDRIVER_DECL(mach64, DV_TTY, NULL);
#endif

View file

@ -86,42 +86,36 @@ static int mga_driver_device_is_agp(struct drm_device * dev)
static void mga_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = sizeof(drm_mga_buf_priv_t);
dev->driver.load = mga_driver_load;
dev->driver.unload = mga_driver_unload;
dev->driver.lastclose = mga_driver_lastclose;
dev->driver.get_vblank_counter = mga_get_vblank_counter;
dev->driver.enable_vblank = mga_enable_vblank;
dev->driver.disable_vblank = mga_disable_vblank;
dev->driver.irq_preinstall = mga_driver_irq_preinstall;
dev->driver.irq_postinstall = mga_driver_irq_postinstall;
dev->driver.irq_uninstall = mga_driver_irq_uninstall;
dev->driver.irq_handler = mga_driver_irq_handler;
dev->driver.dma_ioctl = mga_dma_buffers;
dev->driver.dma_quiescent = mga_driver_dma_quiescent;
dev->driver.device_is_agp = mga_driver_device_is_agp;
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
dev->driver.ioctls = mga_ioctls;
dev->driver.max_ioctl = mga_max_ioctl;
dev->driver->buf_priv_size = sizeof(drm_mga_buf_priv_t);
dev->driver->load = mga_driver_load;
dev->driver->unload = mga_driver_unload;
dev->driver->lastclose = mga_driver_lastclose;
dev->driver->get_vblank_counter = mga_get_vblank_counter;
dev->driver->enable_vblank = mga_enable_vblank;
dev->driver->disable_vblank = mga_disable_vblank;
dev->driver->irq_preinstall = mga_driver_irq_preinstall;
dev->driver->irq_postinstall = mga_driver_irq_postinstall;
dev->driver->irq_uninstall = mga_driver_irq_uninstall;
dev->driver->irq_handler = mga_driver_irq_handler;
dev->driver->dma_ioctl = mga_dma_buffers;
dev->driver->dma_quiescent = mga_driver_dma_quiescent;
dev->driver->device_is_agp = mga_driver_device_is_agp;
dev->driver.name = DRIVER_NAME;
dev->driver.desc = DRIVER_DESC;
dev->driver.date = DRIVER_DATE;
dev->driver.major = DRIVER_MAJOR;
dev->driver.minor = DRIVER_MINOR;
dev->driver.patchlevel = DRIVER_PATCHLEVEL;
dev->driver->ioctls = mga_ioctls;
dev->driver->max_ioctl = mga_max_ioctl;
dev->driver.use_agp = 1;
dev->driver.require_agp = 1;
dev->driver.use_mtrr = 1;
dev->driver.use_dma = 1;
dev->driver.use_irq = 1;
dev->driver.use_vbl_irq = 1;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
#ifdef __FreeBSD__
static int
mga_probe(device_t dev)
{
@ -134,15 +128,31 @@ mga_attach(device_t nbdev)
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(struct drm_device));
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
mga_configure(dev);
return drm_attach(nbdev, mga_pciidlist);
}
static int
mga_detach(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
int ret;
ret = drm_detach(nbdev);
free(dev->driver, M_DRM);
return ret;
}
static device_method_t mga_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, mga_probe),
DEVMETHOD(device_attach, mga_attach),
DEVMETHOD(device_detach, drm_detach),
DEVMETHOD(device_detach, mga_detach),
{ 0, 0 }
};
@ -160,12 +170,3 @@ DRIVER_MODULE(mga, vgapci, mga_driver, drm_devclass, 0, 0);
DRIVER_MODULE(mga, pci, mga_driver, drm_devclass, 0, 0);
#endif
MODULE_DEPEND(mga, drm, 1, 1, 1);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
#ifdef _LKM
CFDRIVER_DECL(mga, DV_TTY, NULL);
#else
CFATTACH_DECL(mga, sizeof(struct drm_device), drm_probe, drm_attach, drm_detach,
drm_activate);
#endif
#endif

View file

@ -44,38 +44,33 @@ static drm_pci_id_list_t r128_pciidlist[] = {
static void r128_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = sizeof(drm_r128_buf_priv_t);
dev->driver.preclose = r128_driver_preclose;
dev->driver.lastclose = r128_driver_lastclose;
dev->driver.get_vblank_counter = r128_get_vblank_counter;
dev->driver.enable_vblank = r128_enable_vblank;
dev->driver.disable_vblank = r128_disable_vblank;
dev->driver.irq_preinstall = r128_driver_irq_preinstall;
dev->driver.irq_postinstall = r128_driver_irq_postinstall;
dev->driver.irq_uninstall = r128_driver_irq_uninstall;
dev->driver.irq_handler = r128_driver_irq_handler;
dev->driver.dma_ioctl = r128_cce_buffers;
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_SG | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
dev->driver.ioctls = r128_ioctls;
dev->driver.max_ioctl = r128_max_ioctl;
dev->driver->buf_priv_size = sizeof(drm_r128_buf_priv_t);
dev->driver->preclose = r128_driver_preclose;
dev->driver->lastclose = r128_driver_lastclose;
dev->driver->get_vblank_counter = r128_get_vblank_counter;
dev->driver->enable_vblank = r128_enable_vblank;
dev->driver->disable_vblank = r128_disable_vblank;
dev->driver->irq_preinstall = r128_driver_irq_preinstall;
dev->driver->irq_postinstall = r128_driver_irq_postinstall;
dev->driver->irq_uninstall = r128_driver_irq_uninstall;
dev->driver->irq_handler = r128_driver_irq_handler;
dev->driver->dma_ioctl = r128_cce_buffers;
dev->driver.name = DRIVER_NAME;
dev->driver.desc = DRIVER_DESC;
dev->driver.date = DRIVER_DATE;
dev->driver.major = DRIVER_MAJOR;
dev->driver.minor = DRIVER_MINOR;
dev->driver.patchlevel = DRIVER_PATCHLEVEL;
dev->driver->ioctls = r128_ioctls;
dev->driver->max_ioctl = r128_max_ioctl;
dev->driver.use_agp = 1;
dev->driver.use_mtrr = 1;
dev->driver.use_pci_dma = 1;
dev->driver.use_sg = 1;
dev->driver.use_dma = 1;
dev->driver.use_irq = 1;
dev->driver.use_vbl_irq = 1;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
#ifdef __FreeBSD__
static int
r128_probe(device_t dev)
{
@ -88,15 +83,31 @@ r128_attach(device_t nbdev)
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(struct drm_device));
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
r128_configure(dev);
return drm_attach(nbdev, r128_pciidlist);
}
static int
r128_detach(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
int ret;
ret = drm_detach(nbdev);
free(dev->driver, M_DRM);
return ret;
}
static device_method_t r128_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, r128_probe),
DEVMETHOD(device_attach, r128_attach),
DEVMETHOD(device_detach, drm_detach),
DEVMETHOD(device_detach, r128_detach),
{ 0, 0 }
};
@ -114,12 +125,3 @@ DRIVER_MODULE(r128, vgapci, r128_driver, drm_devclass, 0, 0);
DRIVER_MODULE(r128, pci, r128_driver, drm_devclass, 0, 0);
#endif
MODULE_DEPEND(r128, drm, 1, 1, 1);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
#ifdef _LKM
CFDRIVER_DECL(r128, DV_TTY, NULL);
#else
CFATTACH_DECL(r128, sizeof(struct drm_device), drm_probe, drm_attach,
drm_detach, drm_activate);
#endif
#endif

View file

@ -44,44 +44,38 @@ static drm_pci_id_list_t radeon_pciidlist[] = {
static void radeon_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = sizeof(drm_radeon_buf_priv_t);
dev->driver.load = radeon_driver_load;
dev->driver.unload = radeon_driver_unload;
dev->driver.firstopen = radeon_driver_firstopen;
dev->driver.open = radeon_driver_open;
dev->driver.preclose = radeon_driver_preclose;
dev->driver.postclose = radeon_driver_postclose;
dev->driver.lastclose = radeon_driver_lastclose;
dev->driver.get_vblank_counter = radeon_get_vblank_counter;
dev->driver.enable_vblank = radeon_enable_vblank;
dev->driver.disable_vblank = radeon_disable_vblank;
dev->driver.irq_preinstall = radeon_driver_irq_preinstall;
dev->driver.irq_postinstall = radeon_driver_irq_postinstall;
dev->driver.irq_uninstall = radeon_driver_irq_uninstall;
dev->driver.irq_handler = radeon_driver_irq_handler;
dev->driver.dma_ioctl = radeon_cp_buffers;
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_SG | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
dev->driver.ioctls = radeon_ioctls;
dev->driver.max_ioctl = radeon_max_ioctl;
dev->driver->buf_priv_size = sizeof(drm_radeon_buf_priv_t);
dev->driver->load = radeon_driver_load;
dev->driver->unload = radeon_driver_unload;
dev->driver->firstopen = radeon_driver_firstopen;
dev->driver->open = radeon_driver_open;
dev->driver->preclose = radeon_driver_preclose;
dev->driver->postclose = radeon_driver_postclose;
dev->driver->lastclose = radeon_driver_lastclose;
dev->driver->get_vblank_counter = radeon_get_vblank_counter;
dev->driver->enable_vblank = radeon_enable_vblank;
dev->driver->disable_vblank = radeon_disable_vblank;
dev->driver->irq_preinstall = radeon_driver_irq_preinstall;
dev->driver->irq_postinstall = radeon_driver_irq_postinstall;
dev->driver->irq_uninstall = radeon_driver_irq_uninstall;
dev->driver->irq_handler = radeon_driver_irq_handler;
dev->driver->dma_ioctl = radeon_cp_buffers;
dev->driver.name = DRIVER_NAME;
dev->driver.desc = DRIVER_DESC;
dev->driver.date = DRIVER_DATE;
dev->driver.major = DRIVER_MAJOR;
dev->driver.minor = DRIVER_MINOR;
dev->driver.patchlevel = DRIVER_PATCHLEVEL;
dev->driver->ioctls = radeon_ioctls;
dev->driver->max_ioctl = radeon_max_ioctl;
dev->driver.use_agp = 1;
dev->driver.use_mtrr = 1;
dev->driver.use_pci_dma = 1;
dev->driver.use_sg = 1;
dev->driver.use_dma = 1;
dev->driver.use_irq = 1;
dev->driver.use_vbl_irq = 1;
dev->driver.use_vbl_irq2 = 1;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
#ifdef __FreeBSD__
static int
radeon_probe(device_t dev)
{
@ -94,15 +88,31 @@ radeon_attach(device_t nbdev)
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(struct drm_device));
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
radeon_configure(dev);
return drm_attach(nbdev, radeon_pciidlist);
}
static int
radeon_detach(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
int ret;
ret = drm_detach(nbdev);
free(dev->driver, M_DRM);
return ret;
}
static device_method_t radeon_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, radeon_probe),
DEVMETHOD(device_attach, radeon_attach),
DEVMETHOD(device_detach, drm_detach),
DEVMETHOD(device_detach, radeon_detach),
{ 0, 0 }
};
@ -120,12 +130,3 @@ DRIVER_MODULE(radeon, vgapci, radeon_driver, drm_devclass, 0, 0);
DRIVER_MODULE(radeon, pci, radeon_driver, drm_devclass, 0, 0);
#endif
MODULE_DEPEND(radeon, drm, 1, 1, 1);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
#ifdef _LKM
CFDRIVER_DECL(radeon, DV_TTY, NULL);
#else
CFATTACH_DECL(radeon, sizeof(struct drm_device), drm_probe, drm_attach,
drm_detach, drm_activate);
#endif
#endif /* __FreeBSD__ */

View file

@ -39,31 +39,29 @@ static drm_pci_id_list_t savage_pciidlist[] = {
static void savage_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = sizeof(drm_savage_buf_priv_t);
dev->driver.load = savage_driver_load;
dev->driver.firstopen = savage_driver_firstopen;
dev->driver.lastclose = savage_driver_lastclose;
dev->driver.unload = savage_driver_unload;
dev->driver.reclaim_buffers_locked = savage_reclaim_buffers;
dev->driver.dma_ioctl = savage_bci_buffers;
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA;
dev->driver.ioctls = savage_ioctls;
dev->driver.max_ioctl = savage_max_ioctl;
dev->driver->buf_priv_size = sizeof(drm_savage_buf_priv_t);
dev->driver->load = savage_driver_load;
dev->driver->firstopen = savage_driver_firstopen;
dev->driver->lastclose = savage_driver_lastclose;
dev->driver->unload = savage_driver_unload;
dev->driver->reclaim_buffers_locked = savage_reclaim_buffers;
dev->driver->dma_ioctl = savage_bci_buffers;
dev->driver.name = DRIVER_NAME;
dev->driver.desc = DRIVER_DESC;
dev->driver.date = DRIVER_DATE;
dev->driver.major = DRIVER_MAJOR;
dev->driver.minor = DRIVER_MINOR;
dev->driver.patchlevel = DRIVER_PATCHLEVEL;
dev->driver->ioctls = savage_ioctls;
dev->driver->max_ioctl = savage_max_ioctl;
dev->driver.use_agp = 1;
dev->driver.use_mtrr = 1;
dev->driver.use_pci_dma = 1;
dev->driver.use_dma = 1;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
#ifdef __FreeBSD__
static int
savage_probe(device_t dev)
{
@ -76,15 +74,31 @@ savage_attach(device_t nbdev)
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(struct drm_device));
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
savage_configure(dev);
return drm_attach(nbdev, savage_pciidlist);
}
static int
savage_detach(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
int ret;
ret = drm_detach(nbdev);
free(dev->driver, M_DRM);
return ret;
}
static device_method_t savage_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, savage_probe),
DEVMETHOD(device_attach, savage_attach),
DEVMETHOD(device_detach, drm_detach),
DEVMETHOD(device_detach, savage_detach),
{ 0, 0 }
};
@ -102,7 +116,3 @@ DRIVER_MODULE(savage, vgapci, savage_driver, drm_devclass, 0, 0);
DRIVER_MODULE(savage, pci, savage_driver, drm_devclass, 0, 0);
#endif
MODULE_DEPEND(savage, drm, 1, 1, 1);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
CFDRIVER_DECL(savage, DV_TTY, NULL);
#endif

View file

@ -38,25 +38,24 @@ static drm_pci_id_list_t sis_pciidlist[] = {
static void sis_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = 1; /* No dev_priv */
dev->driver.context_ctor = sis_init_context;
dev->driver.context_dtor = sis_final_context;
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR;
dev->driver.ioctls = sis_ioctls;
dev->driver.max_ioctl = sis_max_ioctl;
dev->driver->buf_priv_size = 1; /* No dev_priv */
dev->driver->context_ctor = sis_init_context;
dev->driver->context_dtor = sis_final_context;
dev->driver.name = DRIVER_NAME;
dev->driver.desc = DRIVER_DESC;
dev->driver.date = DRIVER_DATE;
dev->driver.major = DRIVER_MAJOR;
dev->driver.minor = DRIVER_MINOR;
dev->driver.patchlevel = DRIVER_PATCHLEVEL;
dev->driver->ioctls = sis_ioctls;
dev->driver->max_ioctl = sis_max_ioctl;
dev->driver.use_agp = 1;
dev->driver.use_mtrr = 1;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
#ifdef __FreeBSD__
static int
sis_probe(device_t dev)
{
@ -69,15 +68,31 @@ sis_attach(device_t nbdev)
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(struct drm_device));
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
sis_configure(dev);
return drm_attach(nbdev, sis_pciidlist);
}
static int
sis_detach(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
int ret;
ret = drm_detach(nbdev);
free(dev->driver, M_DRM);
return ret;
}
static device_method_t sis_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, sis_probe),
DEVMETHOD(device_attach, sis_attach),
DEVMETHOD(device_detach, drm_detach),
DEVMETHOD(device_detach, sis_detach),
{ 0, 0 }
};
@ -95,12 +110,3 @@ DRIVER_MODULE(sisdrm, vgapci, sis_driver, drm_devclass, 0, 0);
DRIVER_MODULE(sisdrm, pci, sis_driver, drm_devclass, 0, 0);
#endif
MODULE_DEPEND(sisdrm, drm, 1, 1, 1);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
#ifdef _LKM
CFDRIVER_DECL(sis, DV_TTY, NULL);
#else
CFATTACH_DECL(sis, sizeof(struct drm_device), drm_probe, drm_attach, drm_detach,
drm_activate);
#endif
#endif

View file

@ -43,21 +43,21 @@ static drm_pci_id_list_t tdfx_pciidlist[] = {
static void tdfx_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = 1; /* No dev_priv */
dev->driver->driver_features =
DRIVER_USE_MTRR;
dev->driver.max_ioctl = 0;
dev->driver->buf_priv_size = 1; /* No dev_priv */
dev->driver.name = DRIVER_NAME;
dev->driver.desc = DRIVER_DESC;
dev->driver.date = DRIVER_DATE;
dev->driver.major = DRIVER_MAJOR;
dev->driver.minor = DRIVER_MINOR;
dev->driver.patchlevel = DRIVER_PATCHLEVEL;
dev->driver->max_ioctl = 0;
dev->driver.use_mtrr = 1;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
#ifdef __FreeBSD__
static int
tdfx_probe(device_t dev)
{
@ -70,15 +70,31 @@ tdfx_attach(device_t nbdev)
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(struct drm_device));
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
tdfx_configure(dev);
return drm_attach(nbdev, tdfx_pciidlist);
}
static int
tdfx_detach(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
int ret;
ret = drm_detach(nbdev);
free(dev->driver, M_DRM);
return ret;
}
static device_method_t tdfx_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, tdfx_probe),
DEVMETHOD(device_attach, tdfx_attach),
DEVMETHOD(device_detach, drm_detach),
DEVMETHOD(device_detach, tdfx_detach),
{ 0, 0 }
};
@ -96,12 +112,3 @@ DRIVER_MODULE(tdfx, vgapci, tdfx_driver, drm_devclass, 0, 0);
DRIVER_MODULE(tdfx, pci, tdfx_driver, drm_devclass, 0, 0);
#endif
MODULE_DEPEND(tdfx, drm, 1, 1, 1);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
#ifdef _LKM
CFDRIVER_DECL(tdfx, DV_TTY, NULL);
#else
CFATTACH_DECL(tdfx, sizeof(struct drm_device), drm_probe, drm_attach,
drm_detach, drm_activate);
#endif
#endif

View file

@ -41,35 +41,34 @@ static drm_pci_id_list_t via_pciidlist[] = {
static void via_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = 1;
dev->driver.load = via_driver_load;
dev->driver.unload = via_driver_unload;
dev->driver.context_ctor = via_init_context;
dev->driver.context_dtor = via_final_context;
dev->driver.vblank_wait = via_driver_vblank_wait;
dev->driver.irq_preinstall = via_driver_irq_preinstall;
dev->driver.irq_postinstall = via_driver_irq_postinstall;
dev->driver.irq_uninstall = via_driver_irq_uninstall;
dev->driver.irq_handler = via_driver_irq_handler;
dev->driver.dma_quiescent = via_driver_dma_quiescent;
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ;
dev->driver.ioctls = via_ioctls;
dev->driver.max_ioctl = via_max_ioctl;
dev->driver->buf_priv_size = 1;
dev->driver->load = via_driver_load;
dev->driver->unload = via_driver_unload;
dev->driver->context_ctor = via_init_context;
dev->driver->context_dtor = via_final_context;
dev->driver->get_vblank_counter = via_get_vblank_counter;
dev->driver->enable_vblank = via_enable_vblank;
dev->driver->disable_vblank = via_disable_vblank;
dev->driver->irq_preinstall = via_driver_irq_preinstall;
dev->driver->irq_postinstall = via_driver_irq_postinstall;
dev->driver->irq_uninstall = via_driver_irq_uninstall;
dev->driver->irq_handler = via_driver_irq_handler;
dev->driver->dma_quiescent = via_driver_dma_quiescent;
dev->driver.name = DRIVER_NAME;
dev->driver.desc = DRIVER_DESC;
dev->driver.date = DRIVER_DATE;
dev->driver.major = DRIVER_MAJOR;
dev->driver.minor = DRIVER_MINOR;
dev->driver.patchlevel = DRIVER_PATCHLEVEL;
dev->driver->ioctls = via_ioctls;
dev->driver->max_ioctl = via_max_ioctl;
dev->driver.use_agp = 1;
dev->driver.use_mtrr = 1;
dev->driver.use_irq = 1;
dev->driver.use_vbl_irq = 1;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
#ifdef __FreeBSD__
static int
via_probe(device_t dev)
{
@ -82,15 +81,31 @@ via_attach(device_t nbdev)
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(struct drm_device));
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
via_configure(dev);
return drm_attach(nbdev, via_pciidlist);
}
static int
via_detach(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
int ret;
ret = drm_detach(nbdev);
free(dev->driver, M_DRM);
return ret;
}
static device_method_t via_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, via_probe),
DEVMETHOD(device_attach, via_attach),
DEVMETHOD(device_detach, drm_detach),
DEVMETHOD(device_detach, via_detach),
{ 0, 0 }
};
@ -104,12 +119,3 @@ static driver_t via_driver = {
extern devclass_t drm_devclass;
DRIVER_MODULE(via, pci, via_driver, drm_devclass, 0, 0);
MODULE_DEPEND(via, drm, 1, 1, 1);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
#ifdef _LKM
CFDRIVER_DECL(via, DV_TTY, NULL);
#else
CFATTACH_DECL(via, sizeof(struct drm_device), drm_probe, drm_attach, drm_detach,
drm_activate);
#endif
#endif

View file

@ -34,6 +34,9 @@ AC_SYS_LARGEFILE
pkgconfigdir=${libdir}/pkgconfig
AC_SUBST(pkgconfigdir)
AC_ARG_ENABLE(udev, AS_HELP_STRING([--enable-udev],
[Enable support for using udev instead of mknod (default: disabled)]),
[UDEV=$enableval], [UDEV=no])
dnl ===========================================================================
@ -101,6 +104,10 @@ AC_CACHE_CHECK([for supported warning flags], libdrm_cv_warn_cflags, [
AC_MSG_CHECKING([which warning flags were supported])])
WARN_CFLAGS="$libdrm_cv_warn_cflags"
if test "x$UDEV" = xyes; then
AC_DEFINE(UDEV, 1, [Have UDEV support])
fi
AC_SUBST(WARN_CFLAGS)
AC_OUTPUT([
Makefile

View file

@ -26,10 +26,10 @@ libdrm_la_LDFLAGS = -version-number 2:3:0 -no-undefined
AM_CFLAGS = -I$(top_srcdir)/shared-core
libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c \
xf86drmMode.c dri_bufmgr.c libdrm_lists.h
xf86drmMode.c
libdrm_la_LIBADD = intel/libdrm_intel.la
libdrmincludedir = ${includedir}
libdrminclude_HEADERS = xf86drm.h xf86drmMode.h dri_bufmgr.h
libdrminclude_HEADERS = xf86drm.h xf86drmMode.h
EXTRA_DIST = ChangeLog TODO

View file

@ -1,178 +0,0 @@
/**************************************************************************
*
* Copyright © 2007 Intel Corporation
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
* Eric Anholt <eric@anholt.net>
*/
#ifndef _DRI_BUFMGR_H_
#define _DRI_BUFMGR_H_
#include <xf86drm.h>
typedef struct _dri_bufmgr dri_bufmgr;
typedef struct _dri_bo dri_bo;
struct _dri_bo {
/**
* Size in bytes of the buffer object.
*
* The size may be larger than the size originally requested for the
* allocation, such as being aligned to page size.
*/
unsigned long size;
/**
* Card virtual address (offset from the beginning of the aperture) for the
* object. Only valid while validated.
*/
unsigned long offset;
/**
* Virtual address for accessing the buffer data. Only valid while mapped.
*/
void *virtual;
/** Buffer manager context associated with this buffer object */
dri_bufmgr *bufmgr;
/**
* MM-specific handle for accessing object
*/
int handle;
};
/**
* Context for a buffer manager instance.
*
* Contains public methods followed by private storage for the buffer manager.
*/
struct _dri_bufmgr {
/**
* Allocate a buffer object.
*
* Buffer objects are not necessarily initially mapped into CPU virtual
* address space or graphics device aperture. They must be mapped using
* bo_map() to be used by the CPU, and validated for use using bo_validate()
* to be used from the graphics device.
*/
dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name,
unsigned long size, unsigned int alignment);
/** Takes a reference on a buffer object */
void (*bo_reference)(dri_bo *bo);
/**
* Releases a reference on a buffer object, freeing the data if
* rerefences remain.
*/
void (*bo_unreference)(dri_bo *bo);
/**
* Maps the buffer into userspace.
*
* This function will block waiting for any existing execution on the
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
int (*bo_map)(dri_bo *buf, int write_enable);
/** Reduces the refcount on the userspace mapping of the buffer object. */
int (*bo_unmap)(dri_bo *buf);
/**
* Write data into an object.
*
* This is an optional function, if missing,
* dri_bo will map/memcpy/unmap.
*/
int (*bo_subdata) (dri_bo *buf, unsigned long offset,
unsigned long size, const void *data);
/**
* Read data from an object
*
* This is an optional function, if missing,
* dri_bo will map/memcpy/unmap.
*/
int (*bo_get_subdata) (dri_bo *bo, unsigned long offset,
unsigned long size, void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
*
* This is not required for any access to the BO by bo_map, bo_subdata, etc.
* It is merely a way for the driver to implement glFinish.
*/
void (*bo_wait_rendering) (dri_bo *bo);
/**
* Tears down the buffer manager instance.
*/
void (*destroy)(dri_bufmgr *bufmgr);
/**
* Processes the relocations, either in userland or by converting the list
* for use in batchbuffer submission.
*
* Kernel-based implementations will return a pointer to the arguments
* to be handed with batchbuffer submission to the kernel. The userland
* implementation performs the buffer validation and emits relocations
* into them the appopriate order.
*
* \param batch_buf buffer at the root of the tree of relocations
* \return argument to be completed and passed to the execbuffers ioctl
* (if any).
*/
void *(*process_relocs)(dri_bo *batch_buf);
void (*post_submit)(dri_bo *batch_buf);
int (*check_aperture_space)(dri_bo **bo_array, int count);
int debug; /**< Enables verbose debugging printouts */
};
dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
unsigned int alignment);
void dri_bo_reference(dri_bo *bo);
void dri_bo_unreference(dri_bo *bo);
int dri_bo_map(dri_bo *buf, int write_enable);
int dri_bo_unmap(dri_bo *buf);
int dri_bo_subdata(dri_bo *bo, unsigned long offset,
unsigned long size, const void *data);
int dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
unsigned long size, void *data);
void dri_bo_wait_rendering(dri_bo *bo);
void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug);
void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
void *dri_process_relocs(dri_bo *batch_buf);
void dri_post_process_relocs(dri_bo *batch_buf);
void dri_post_submit(dri_bo *batch_buf);
int dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count);
#endif

View file

@ -25,11 +25,16 @@
AM_CFLAGS = \
$(WARN_CFLAGS) \
-I$(top_srcdir)/libdrm \
-I$(top_srcdir)/libdrm/intel \
-I$(top_srcdir)/shared-core
noinst_LTLIBRARIES = libdrm_intel.la
libdrm_intel_la_LTLIBRARIES = libdrm_intel.la
libdrm_intel_ladir = $(libdir)
libdrm_intel_la_LDFLAGS = -version-number 1:0:0 -no-undefined
libdrm_intel_la_SOURCES = \
intel_bufmgr.c \
intel_bufmgr_priv.h \
intel_bufmgr_fake.c \
intel_bufmgr_gem.c \
mm.c \

View file

@ -25,10 +25,19 @@
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include "dri_bufmgr.h"
#include <errno.h>
#include <drm.h>
#include <i915_drm.h>
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
/** @file dri_bufmgr.c
*
@ -118,14 +127,12 @@ dri_bufmgr_destroy(dri_bufmgr *bufmgr)
bufmgr->destroy(bufmgr);
}
void *dri_process_relocs(dri_bo *batch_buf)
int
dri_bo_exec(dri_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4)
{
return batch_buf->bufmgr->process_relocs(batch_buf);
}
void dri_post_submit(dri_bo *batch_buf)
{
batch_buf->bufmgr->post_submit(batch_buf);
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
}
void
@ -139,3 +146,49 @@ dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count)
{
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
}
int
dri_bo_flink(dri_bo *bo, uint32_t *name)
{
if (bo->bufmgr->bo_flink)
return bo->bufmgr->bo_flink(bo, name);
return -ENODEV;
}
int
dri_bo_emit_reloc(dri_bo *reloc_buf,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target_buf)
{
return reloc_buf->bufmgr->bo_emit_reloc(reloc_buf,
read_domains, write_domain,
delta, offset, target_buf);
}
int
dri_bo_pin(dri_bo *bo, uint32_t alignment)
{
if (bo->bufmgr->bo_pin)
return bo->bufmgr->bo_pin(bo, alignment);
return -ENODEV;
}
int
dri_bo_unpin(dri_bo *bo)
{
if (bo->bufmgr->bo_unpin)
return bo->bufmgr->bo_unpin(bo);
return -ENODEV;
}
int dri_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
{
if (bo->bufmgr->bo_set_tiling)
return bo->bufmgr->bo_set_tiling(bo, tiling_mode);
*tiling_mode = I915_TILING_NONE;
return 0;
}

View file

@ -31,65 +31,64 @@
* Public definitions of Intel-specific bufmgr functions.
*/
#ifndef INTEL_BUFMGR_GEM_H
#define INTEL_BUFMGR_GEM_H
#ifndef INTEL_BUFMGR_H
#define INTEL_BUFMGR_H
#include "dri_bufmgr.h"
#include <stdint.h>
/**
* Intel-specific bufmgr bits that follow immediately after the
* generic bufmgr structure.
*/
struct intel_bufmgr {
typedef struct _dri_bufmgr dri_bufmgr;
typedef struct _dri_bo dri_bo;
struct _dri_bo {
/**
* Add relocation entry in reloc_buf, which will be updated with the
* target buffer's real offset on on command submission.
* Size in bytes of the buffer object.
*
* Relocations remain in place for the lifetime of the buffer object.
*
* \param reloc_buf Buffer to write the relocation into.
* \param read_domains GEM read domains which the buffer will be read into
* by the command that this relocation is part of.
* \param write_domains GEM read domains which the buffer will be dirtied
* in by the command that this relocation is part of.
* \param delta Constant value to be added to the relocation target's
* offset.
* \param offset Byte offset within batch_buf of the relocated pointer.
* \param target Buffer whose offset should be written into the relocation
* entry.
* The size may be larger than the size originally requested for the
* allocation, such as being aligned to page size.
*/
int (*emit_reloc)(dri_bo *reloc_buf,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target);
unsigned long size;
/**
* Pin a buffer to the aperture and fix the offset until unpinned
*
* \param buf Buffer to pin
* \param alignment Required alignment for aperture, in bytes
* Card virtual address (offset from the beginning of the aperture) for the
* object. Only valid while validated.
*/
int (*pin) (dri_bo *buf, uint32_t alignment);
unsigned long offset;
/**
* Unpin a buffer from the aperture, allowing it to be removed
*
* \param buf Buffer to unpin
* Virtual address for accessing the buffer data. Only valid while mapped.
*/
int (*unpin) (dri_bo *buf);
/**
* Ask that the buffer be placed in tiling mode
*
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
int (*set_tiling) (dri_bo *bo, uint32_t *tiling_mode);
/**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
* \param name Returned name
*/
int (*flink) (dri_bo *buf, uint32_t *name);
void *virtual;
/** Buffer manager context associated with this buffer object */
dri_bufmgr *bufmgr;
};
dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
unsigned int alignment);
void dri_bo_reference(dri_bo *bo);
void dri_bo_unreference(dri_bo *bo);
int dri_bo_map(dri_bo *buf, int write_enable);
int dri_bo_unmap(dri_bo *buf);
int dri_bo_subdata(dri_bo *bo, unsigned long offset,
unsigned long size, const void *data);
int dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
unsigned long size, void *data);
void dri_bo_wait_rendering(dri_bo *bo);
void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug);
void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
int dri_bo_exec(dri_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4);
int dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count);
int dri_bo_emit_reloc(dri_bo *reloc_buf,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target_buf);
int dri_bo_pin(dri_bo *buf, uint32_t alignment);
int dri_bo_unpin(dri_bo *buf);
int dri_bo_set_tiling(dri_bo *buf, uint32_t *tiling_mode);
int dri_bo_flink(dri_bo *buf, uint32_t *name);
/* intel_bufmgr_gem.c */
dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
dri_bo *intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
@ -97,34 +96,32 @@ dri_bo *intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
void intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr);
/* intel_bufmgr_fake.c */
dri_bufmgr *intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
dri_bufmgr *intel_bufmgr_fake_init(int fd,
unsigned long low_offset, void *low_virtual,
unsigned long size,
unsigned int (*fence_emit)(void *private),
int (*fence_wait)(void *private,
unsigned int cookie),
void *driver_priv);
volatile unsigned int *last_dispatch);
void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr,
volatile unsigned int *last_dispatch);
void intel_bufmgr_fake_set_exec_callback(dri_bufmgr *bufmgr,
int (*exec)(dri_bo *bo,
unsigned int used,
void *priv),
void *priv);
void intel_bufmgr_fake_set_fence_callback(dri_bufmgr *bufmgr,
unsigned int (*emit)(void *priv),
void (*wait)(unsigned int fence,
void *priv),
void *priv);
dri_bo *intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
unsigned long offset, unsigned long size,
void *virtual);
void intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
void intel_bo_fake_disable_backing_store(dri_bo *bo,
void (*invalidate_cb)(dri_bo *bo,
void *ptr),
void *ptr);
void intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
void intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr);
int intel_bo_emit_reloc(dri_bo *reloc_buf,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target_buf);
int intel_bo_pin(dri_bo *buf, uint32_t alignment);
int intel_bo_unpin(dri_bo *buf);
int intel_bo_set_tiling(dri_bo *buf, uint32_t *tiling_mode);
int intel_bo_flink(dri_bo *buf, uint32_t *name);
#endif /* INTEL_BUFMGR_GEM_H */
#endif /* INTEL_BUFMGR_H */

View file

@ -34,11 +34,17 @@
* the bugs in the old texture manager.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "dri_bufmgr.h"
#include <errno.h>
#include <xf86drm.h>
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "drm.h"
#include "i915_drm.h"
#include "mm.h"
@ -105,7 +111,6 @@ struct block {
typedef struct _bufmgr_fake {
dri_bufmgr bufmgr;
struct intel_bufmgr intel_bufmgr;
unsigned long low_offset;
unsigned long size;
@ -138,14 +143,32 @@ typedef struct _bufmgr_fake {
/**
* Driver callback to emit a fence, returning the cookie.
*
* This allows the driver to hook in a replacement for the DRM usage in
* bufmgr_fake.
*
* Currently, this also requires that a write flush be emitted before
* emitting the fence, but this should change.
*/
unsigned int (*fence_emit)(void *private);
/** Driver callback to wait for a fence cookie to have passed. */
int (*fence_wait)(void *private, unsigned int fence_cookie);
void (*fence_wait)(unsigned int fence, void *private);
void *fence_priv;
/**
* Driver callback to execute a buffer.
*
* This allows the driver to hook in a replacement for the DRM usage in
* bufmgr_fake.
*/
int (*exec)(dri_bo *bo, unsigned int used, void *priv);
void *exec_priv;
/** Driver-supplied argument to driver callbacks */
void *driver_priv;
/* Pointer to kernel-updated sarea data for the last completed user irq */
volatile int *last_dispatch;
int fd;
int debug;
@ -211,24 +234,161 @@ static int FENCE_LTE( unsigned a, unsigned b )
return 0;
}
void intel_bufmgr_fake_set_fence_callback(dri_bufmgr *bufmgr,
unsigned int (*emit)(void *priv),
void (*wait)(unsigned int fence,
void *priv),
void *priv)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
bufmgr_fake->fence_emit = emit;
bufmgr_fake->fence_wait = wait;
bufmgr_fake->fence_priv = priv;
}
static unsigned int
_fence_emit_internal(dri_bufmgr_fake *bufmgr_fake)
{
bufmgr_fake->last_fence = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
struct drm_i915_irq_emit ie;
int ret, seq = 1;
if (bufmgr_fake->fence_emit != NULL)
return bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
ie.irq_seq = &seq;
ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
&ie, sizeof(ie));
if (ret) {
drmMsg("%s: drm_i915_irq_emit: %d\n", __FUNCTION__, ret);
abort();
}
DBG("emit 0x%08x\n", seq);
bufmgr_fake->last_fence = seq;
return bufmgr_fake->last_fence;
}
static void
_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, unsigned int cookie)
_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, int seq)
{
struct drm_i915_irq_wait iw;
int hw_seq, busy_count = 0;
int ret;
int kernel_lied;
if (bufmgr_fake->fence_wait != NULL) {
bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv);
return;
}
DBG("wait 0x%08x\n", iw.irq_seq);
iw.irq_seq = seq;
/* The kernel IRQ_WAIT implementation is all sorts of broken.
* 1) It returns 1 to 0x7fffffff instead of using the full 32-bit unsigned
* range.
* 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
* signed range.
* 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
* signed range.
* 4) It returns -EBUSY in 3 seconds even if the hardware is still
* successfully chewing through buffers.
*
* Assume that in userland we treat sequence numbers as ints, which makes
* some of the comparisons convenient, since the sequence numbers are
* all postive signed integers.
*
* From this we get several cases we need to handle. Here's a timeline.
* 0x2 0x7 0x7ffffff8 0x7ffffffd
* | | | |
* -------------------------------------------------------------------
*
* A) Normal wait for hw to catch up
* hw_seq seq
* | |
* -------------------------------------------------------------------
* seq - hw_seq = 5. If we call IRQ_WAIT, it will wait for hw to catch up.
*
* B) Normal wait for a sequence number that's already passed.
* seq hw_seq
* | |
* -------------------------------------------------------------------
* seq - hw_seq = -5. If we call IRQ_WAIT, it returns 0 quickly.
*
* C) Hardware has already wrapped around ahead of us
* hw_seq seq
* | |
* -------------------------------------------------------------------
* seq - hw_seq = 0x80000000 - 5. If we called IRQ_WAIT, it would wait
* for hw_seq >= seq, which may never occur. Thus, we want to catch this
* in userland and return 0.
*
* D) We've wrapped around ahead of the hardware.
* seq hw_seq
* | |
* -------------------------------------------------------------------
* seq - hw_seq = -(0x80000000 - 5). If we called IRQ_WAIT, it would return
* 0 quickly because hw_seq >= seq, even though the hardware isn't caught up.
* Thus, we need to catch this early return in userland and bother the
* kernel until the hardware really does catch up.
*
* E) Hardware might wrap after we test in userland.
* hw_seq seq
* | |
* -------------------------------------------------------------------
* seq - hw_seq = 5. If we call IRQ_WAIT, it will likely see seq >= hw_seq
* and wait. However, suppose hw_seq wraps before we make it into the
* kernel. The kernel sees hw_seq >= seq and waits for 3 seconds then
* returns -EBUSY. This is case C). We should catch this and then return
* successfully.
*
* F) Hardware might take a long time on a buffer.
* hw_seq seq
* | |
* -------------------------------------------------------------------
* seq - hw_seq = 5. If we call IRQ_WAIT, if sequence 2 through 5 take too
* long, it will return -EBUSY. Batchbuffers in the gltestperf demo were
* seen to take up to 7 seconds. We should catch early -EBUSY return
* and keep trying.
*/
do {
/* Keep a copy of last_dispatch so that if the wait -EBUSYs because the
* hardware didn't catch up in 3 seconds, we can see if it at least made
* progress and retry.
*/
hw_seq = *bufmgr_fake->last_dispatch;
/* Catch case C */
if (seq - hw_seq > 0x40000000)
return;
ret = drmCommandWrite(bufmgr_fake->fd, DRM_I915_IRQ_WAIT,
&iw, sizeof(iw));
/* Catch case D */
kernel_lied = (ret == 0) && (seq - *bufmgr_fake->last_dispatch <
-0x40000000);
/* Catch case E */
if (ret == -EBUSY && (seq - *bufmgr_fake->last_dispatch > 0x40000000))
ret = 0;
/* Catch case F: Allow up to 15 seconds chewing on one buffer. */
if ((ret == -EBUSY) && (hw_seq != *bufmgr_fake->last_dispatch))
busy_count = 0;
else
busy_count++;
} while (kernel_lied || ret == -EAGAIN || ret == -EINTR ||
(ret == -EBUSY && busy_count < 5));
ret = bufmgr_fake->fence_wait(bufmgr_fake->driver_priv, cookie);
if (ret != 0) {
drmMsg("%s:%d: Error %d waiting for fence.\n", __FILE__, __LINE__);
drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__, __LINE__,
strerror(-ret));
abort();
}
clear_fenced(bufmgr_fake, cookie);
clear_fenced(bufmgr_fake, seq);
}
static int
@ -540,7 +700,7 @@ dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake)
{
unsigned int cookie;
cookie = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
cookie = _fence_emit_internal(bufmgr_fake);
_fence_wait_internal(bufmgr_fake, cookie);
}
@ -1052,38 +1212,6 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo)
return dri_fake_bo_validate(bo);
}
static void *
dri_fake_process_relocs(dri_bo *batch_buf)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
dri_bo_fake *batch_fake = (dri_bo_fake *)batch_buf;
int ret;
int retry_count = 0;
bufmgr_fake->performed_rendering = 0;
dri_fake_calculate_domains(batch_buf);
batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
/* we've ran out of RAM so blow the whole lot away and retry */
restart:
ret = dri_fake_reloc_and_validate_buffer(batch_buf);
if (bufmgr_fake->fail == 1) {
if (retry_count == 0) {
retry_count++;
dri_fake_kick_all(bufmgr_fake);
bufmgr_fake->fail = 0;
goto restart;
} else /* dump out the memory here */
mmDumpMemInfo(bufmgr_fake->heap);
}
assert(ret == 0);
return NULL;
}
static void
dri_bo_fake_post_submit(dri_bo *bo)
{
@ -1110,12 +1238,74 @@ dri_bo_fake_post_submit(dri_bo *bo)
}
static void
dri_fake_post_submit(dri_bo *batch_buf)
void intel_bufmgr_fake_set_exec_callback(dri_bufmgr *bufmgr,
int (*exec)(dri_bo *bo,
unsigned int used,
void *priv),
void *priv)
{
dri_fake_fence_validated(batch_buf->bufmgr);
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
dri_bo_fake_post_submit(batch_buf);
bufmgr_fake->exec = exec;
bufmgr_fake->exec_priv = priv;
}
static int
dri_fake_bo_exec(dri_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
dri_bo_fake *batch_fake = (dri_bo_fake *)bo;
struct drm_i915_batchbuffer batch;
int ret;
int retry_count = 0;
bufmgr_fake->performed_rendering = 0;
dri_fake_calculate_domains(bo);
batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
/* we've ran out of RAM so blow the whole lot away and retry */
restart:
ret = dri_fake_reloc_and_validate_buffer(bo);
if (bufmgr_fake->fail == 1) {
if (retry_count == 0) {
retry_count++;
dri_fake_kick_all(bufmgr_fake);
bufmgr_fake->fail = 0;
goto restart;
} else /* dump out the memory here */
mmDumpMemInfo(bufmgr_fake->heap);
}
assert(ret == 0);
if (bufmgr_fake->exec != NULL) {
int ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
if (ret != 0)
return ret;
} else {
batch.start = bo->offset;
batch.used = used;
batch.cliprects = cliprects;
batch.num_cliprects = num_cliprects;
batch.DR1 = 0;
batch.DR4 = DR4;
if (drmCommandWrite(bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
sizeof(batch))) {
drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
return -errno;
}
}
dri_fake_fence_validated(bo->bufmgr);
dri_bo_fake_post_submit(bo);
return 0;
}
/**
@ -1187,13 +1377,19 @@ intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)
free_block(bufmgr_fake, block);
}
}
void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr,
volatile unsigned int *last_dispatch)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
}
dri_bufmgr *
intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
intel_bufmgr_fake_init(int fd,
unsigned long low_offset, void *low_virtual,
unsigned long size,
unsigned int (*fence_emit)(void *private),
int (*fence_wait)(void *private, unsigned int cookie),
void *driver_priv)
volatile unsigned int *last_dispatch)
{
dri_bufmgr_fake *bufmgr_fake;
@ -1216,16 +1412,14 @@ intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
bufmgr_fake->bufmgr.bo_wait_rendering = dri_fake_bo_wait_rendering;
bufmgr_fake->bufmgr.bo_emit_reloc = dri_fake_emit_reloc;
bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs;
bufmgr_fake->bufmgr.post_submit = dri_fake_post_submit;
bufmgr_fake->bufmgr.bo_exec = dri_fake_bo_exec;
bufmgr_fake->bufmgr.check_aperture_space = dri_fake_check_aperture_space;
bufmgr_fake->bufmgr.debug = 0;
bufmgr_fake->intel_bufmgr.emit_reloc = dri_fake_emit_reloc;
bufmgr_fake->fence_emit = fence_emit;
bufmgr_fake->fence_wait = fence_wait;
bufmgr_fake->driver_priv = driver_priv;
bufmgr_fake->fd = fd;
bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
return &bufmgr_fake->bufmgr;
}

View file

@ -34,6 +34,10 @@
* Dave Airlie <airlied@linux.ie>
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <xf86drm.h>
#include <stdio.h>
#include <stdlib.h>
@ -44,8 +48,8 @@
#include <sys/mman.h>
#include "errno.h"
#include "dri_bufmgr.h"
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "string.h"
#include "i915_drm.h"
@ -76,8 +80,6 @@ struct dri_gem_bo_bucket {
typedef struct _dri_bufmgr_gem {
dri_bufmgr bufmgr;
struct intel_bufmgr intel_bufmgr;
int fd;
int max_relocs;
@ -89,8 +91,6 @@ typedef struct _dri_bufmgr_gem {
/** Array of lists of cached gem objects of power-of-two sizes */
struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS];
struct drm_i915_gem_execbuffer exec_arg;
} dri_bufmgr_gem;
struct _dri_bo_gem {
@ -316,7 +316,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
bo_gem->gem_handle = create.handle;
bo_gem->bo.handle = bo_gem->gem_handle;
//bo_gem->bo.handle = bo_gem->gem_handle;
if (ret != 0) {
free(bo_gem);
return NULL;
@ -358,7 +358,7 @@ intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
if (ret != 0) {
fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(-ret));
name, handle, strerror(errno));
free(bo_gem);
return NULL;
}
@ -370,6 +370,7 @@ intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
bo_gem->refcount = 1;
bo_gem->validate_index = -1;
bo_gem->gem_handle = open_arg.handle;
bo_gem->global_name = handle;
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
@ -401,7 +402,7 @@ dri_gem_bo_free(dri_bo *bo)
if (ret != 0) {
fprintf(stderr,
"DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
bo_gem->gem_handle, bo_gem->name, strerror(-ret));
bo_gem->gem_handle, bo_gem->name, strerror(errno));
}
free(bo);
}
@ -650,8 +651,8 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
* last known offset in target_bo.
*/
static int
dri_gem_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target_bo)
dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target_bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
@ -708,27 +709,6 @@ dri_gem_bo_process_reloc(dri_bo *bo)
}
}
static void *
dri_gem_process_reloc(dri_bo *batch_buf)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *) batch_buf->bufmgr;
/* Update indices and set up the validate list. */
dri_gem_bo_process_reloc(batch_buf);
/* Add the batch buffer to the validation list. There are no relocations
* pointing to it.
*/
intel_add_validate_buffer(batch_buf);
bufmgr_gem->exec_arg.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
bufmgr_gem->exec_arg.buffer_count = bufmgr_gem->exec_count;
bufmgr_gem->exec_arg.batch_start_offset = 0;
bufmgr_gem->exec_arg.batch_len = 0; /* written in intel_exec_ioctl */
return &bufmgr_gem->exec_arg;
}
static void
intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
{
@ -748,11 +728,35 @@ intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
}
}
static void
dri_gem_post_submit(dri_bo *batch_buf)
static int
dri_gem_bo_exec(dri_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr;
int i;
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
struct drm_i915_gem_execbuffer execbuf;
int ret, i;
/* Update indices and set up the validate list. */
dri_gem_bo_process_reloc(bo);
/* Add the batch buffer to the validation list. There are no relocations
* pointing to it.
*/
intel_add_validate_buffer(bo);
execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
execbuf.buffer_count = bufmgr_gem->exec_count;
execbuf.batch_start_offset = 0;
execbuf.batch_len = used;
execbuf.cliprects_ptr = (uintptr_t)cliprects;
execbuf.num_cliprects = num_cliprects;
execbuf.DR1 = 0;
execbuf.DR4 = DR4;
do {
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER, &execbuf);
} while (ret == -EAGAIN);
intel_update_buffer_offsets (bufmgr_gem);
@ -772,10 +776,12 @@ dri_gem_post_submit(dri_bo *batch_buf)
bufmgr_gem->exec_bos[i] = NULL;
}
bufmgr_gem->exec_count = 0;
return 0;
}
static int
dri_gem_pin(dri_bo *bo, uint32_t alignment)
dri_gem_bo_pin(dri_bo *bo, uint32_t alignment)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
@ -794,7 +800,7 @@ dri_gem_pin(dri_bo *bo, uint32_t alignment)
}
static int
dri_gem_unpin(dri_bo *bo)
dri_gem_bo_unpin(dri_bo *bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
@ -811,7 +817,7 @@ dri_gem_unpin(dri_bo *bo)
}
static int
dri_gem_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
dri_gem_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
@ -832,7 +838,7 @@ dri_gem_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
}
static int
dri_gem_flink(dri_bo *bo, uint32_t *name)
dri_gem_bo_flink(dri_bo *bo, uint32_t *name)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
@ -910,16 +916,15 @@ intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->bufmgr.bo_subdata = dri_gem_bo_subdata;
bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata;
bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering;
bufmgr_gem->bufmgr.bo_emit_reloc = dri_gem_bo_emit_reloc;
bufmgr_gem->bufmgr.bo_pin = dri_gem_bo_pin;
bufmgr_gem->bufmgr.bo_unpin = dri_gem_bo_unpin;
bufmgr_gem->bufmgr.bo_set_tiling = dri_gem_bo_set_tiling;
bufmgr_gem->bufmgr.bo_flink = dri_gem_bo_flink;
bufmgr_gem->bufmgr.bo_exec = dri_gem_bo_exec;
bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc;
bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit;
bufmgr_gem->bufmgr.debug = 0;
bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
bufmgr_gem->intel_bufmgr.emit_reloc = dri_gem_emit_reloc;
bufmgr_gem->intel_bufmgr.pin = dri_gem_pin;
bufmgr_gem->intel_bufmgr.unpin = dri_gem_unpin;
bufmgr_gem->intel_bufmgr.set_tiling = dri_gem_set_tiling;
bufmgr_gem->intel_bufmgr.flink = dri_gem_flink;
/* Initialize the linked lists for BO reuse cache. */
for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++)
bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
@ -927,67 +932,3 @@ intel_bufmgr_gem_init(int fd, int batch_size)
return &bufmgr_gem->bufmgr;
}
int
intel_bo_emit_reloc(dri_bo *reloc_buf,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target_buf)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(reloc_buf->bufmgr + 1);
return intel_bufmgr->emit_reloc(reloc_buf, read_domains, write_domain,
delta, offset, target_buf);
}
int
intel_bo_pin(dri_bo *bo, uint32_t alignment)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->pin)
return intel_bufmgr->pin(bo, alignment);
return 0;
}
int
intel_bo_unpin(dri_bo *bo)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->unpin)
return intel_bufmgr->unpin(bo);
return 0;
}
int intel_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->set_tiling)
return intel_bufmgr->set_tiling (bo, tiling_mode);
*tiling_mode = I915_TILING_NONE;
return 0;
}
int intel_bo_flink(dri_bo *bo, uint32_t *name)
{
struct intel_bufmgr *intel_bufmgr;
intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1);
if (intel_bufmgr->flink)
return intel_bufmgr->flink (bo, name);
return -ENODEV;
}

View file

@ -0,0 +1,165 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
/**
* @file intel_bufmgr_priv.h
*
* Private definitions of Intel-specific bufmgr functions and structures.
*/
#ifndef INTEL_BUFMGR_PRIV_H
#define INTEL_BUFMGR_PRIV_H
/**
* Context for a buffer manager instance.
*
* Contains public methods followed by private storage for the buffer manager.
*/
struct _dri_bufmgr {
/**
* Allocate a buffer object.
*
* Buffer objects are not necessarily initially mapped into CPU virtual
* address space or graphics device aperture. They must be mapped using
* bo_map() to be used by the CPU, and validated for use using bo_validate()
* to be used from the graphics device.
*/
dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name,
unsigned long size, unsigned int alignment);
/** Takes a reference on a buffer object */
void (*bo_reference)(dri_bo *bo);
/**
* Releases a reference on a buffer object, freeing the data if
* rerefences remain.
*/
void (*bo_unreference)(dri_bo *bo);
/**
* Maps the buffer into userspace.
*
* This function will block waiting for any existing execution on the
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
int (*bo_map)(dri_bo *buf, int write_enable);
/** Reduces the refcount on the userspace mapping of the buffer object. */
int (*bo_unmap)(dri_bo *buf);
/**
* Write data into an object.
*
* This is an optional function, if missing,
* dri_bo will map/memcpy/unmap.
*/
int (*bo_subdata) (dri_bo *buf, unsigned long offset,
unsigned long size, const void *data);
/**
* Read data from an object
*
* This is an optional function, if missing,
* dri_bo will map/memcpy/unmap.
*/
int (*bo_get_subdata) (dri_bo *bo, unsigned long offset,
unsigned long size, void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
*
* This is not required for any access to the BO by bo_map, bo_subdata, etc.
* It is merely a way for the driver to implement glFinish.
*/
void (*bo_wait_rendering) (dri_bo *bo);
/**
* Tears down the buffer manager instance.
*/
void (*destroy)(dri_bufmgr *bufmgr);
/**
* Add relocation entry in reloc_buf, which will be updated with the
* target buffer's real offset on on command submission.
*
* Relocations remain in place for the lifetime of the buffer object.
*
* \param reloc_buf Buffer to write the relocation into.
* \param read_domains GEM read domains which the buffer will be read into
* by the command that this relocation is part of.
* \param write_domains GEM read domains which the buffer will be dirtied
* in by the command that this relocation is part of.
* \param delta Constant value to be added to the relocation target's
* offset.
* \param offset Byte offset within batch_buf of the relocated pointer.
* \param target Buffer whose offset should be written into the relocation
* entry.
*/
int (*bo_emit_reloc)(dri_bo *reloc_buf,
uint32_t read_domains, uint32_t write_domain,
uint32_t delta, uint32_t offset, dri_bo *target);
/** Executes the command buffer pointed to by bo. */
int (*bo_exec)(dri_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4);
/**
* Pin a buffer to the aperture and fix the offset until unpinned
*
* \param buf Buffer to pin
* \param alignment Required alignment for aperture, in bytes
*/
int (*bo_pin) (dri_bo *buf, uint32_t alignment);
/**
* Unpin a buffer from the aperture, allowing it to be removed
*
* \param buf Buffer to unpin
*/
int (*bo_unpin) (dri_bo *buf);
/**
* Ask that the buffer be placed in tiling mode
*
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
int (*bo_set_tiling) (dri_bo *bo, uint32_t *tiling_mode);
/**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
* \param name Returned name
*/
int (*bo_flink) (dri_bo *buf, uint32_t *name);
int (*check_aperture_space)(dri_bo **bo_array, int count);
int debug; /**< Enables verbose debugging printouts */
};
#endif /* INTEL_BUFMGR_PRIV_H */

View file

@ -178,7 +178,7 @@ static char *drmStrdup(const char *s)
* Call ioctl, restarting if it is interupted
*/
static int
drmIoctl(int fd, int request, void *arg)
drmIoctl(int fd, unsigned long request, void *arg)
{
int ret;
@ -300,6 +300,7 @@ static int drmOpenDevice(long dev, int minor, int type)
group = (serv_group >= 0) ? serv_group : DRM_DEV_GID;
}
#if !defined(UDEV)
if (stat(DRM_DIR_NAME, &st)) {
if (!isroot)
return DRM_ERR_NOT_ROOT;
@ -320,6 +321,30 @@ static int drmOpenDevice(long dev, int minor, int type)
chown(buf, user, group);
chmod(buf, devmode);
}
#else
/* if we modprobed then wait for udev */
{
int udev_count = 0;
wait_for_udev:
if (stat(DRM_DIR_NAME, &st)) {
usleep(20);
udev_count++;
if (udev_count == 50)
return -1;
goto wait_for_udev;
}
if (stat(buf, &st)) {
usleep(20);
udev_count++;
if (udev_count == 50)
return -1;
goto wait_for_udev;
}
}
#endif
fd = open(buf, O_RDWR, 0);
drmMsg("drmOpenDevice: open result is %d, (%s)\n",

View file

@ -346,7 +346,7 @@ CONFIG_DRM_I915 := m
endif
endif
GIT_REVISION := $(shell cd "$(DRMSRCDIR)" && git-describe --abbrev=17)
GIT_REVISION := $(shell cd "$(DRMSRCDIR)" && git describe --abbrev=17)
ifneq ($(GIT_REVISION),)
EXTRA_CFLAGS+=-D"GIT_REVISION=\"$(GIT_REVISION)\""
endif

View file

@ -841,27 +841,31 @@ static void drm_locked_tasklet_func(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
unsigned long irqflags;
void (*tasklet_func)(struct drm_device *);
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
tasklet_func = dev->locked_tasklet_func;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
if (!dev->locked_tasklet_func ||
if (!tasklet_func ||
!drm_lock_take(&dev->primary->master->lock,
DRM_KERNEL_CONTEXT)) {
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
return;
}
dev->primary->master->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
dev->locked_tasklet_func(dev);
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
tasklet_func = dev->locked_tasklet_func;
dev->locked_tasklet_func = NULL;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
if (tasklet_func != NULL)
tasklet_func(dev);
drm_lock_free(&dev->primary->master->lock,
DRM_KERNEL_CONTEXT);
dev->locked_tasklet_func = NULL;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
}
/**

View file

@ -158,6 +158,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
unsigned long irqflags;
void (*tasklet_func)(struct drm_device *);
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
@ -166,14 +167,11 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
if (dev->locked_tasklet_func) {
dev->locked_tasklet_func(dev);
dev->locked_tasklet_func = NULL;
}
tasklet_func = dev->locked_tasklet_func;
dev->locked_tasklet_func = NULL;
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
if (tasklet_func != NULL)
tasklet_func(dev);
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);

View file

@ -183,7 +183,7 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
if (!(pt = kmalloc(size, GFP_KERNEL)))
return NULL;
if (oldpt && oldsize) {
memcpy(pt, oldpt, oldsize);
memcpy(pt, oldpt, DRM_MIN(oldsize,size));
kfree(oldpt);
}
return pt;

View file

@ -511,7 +511,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
err_out_files:
if (i > 0)
for (j = 0; j < i; j++)
device_remove_file(&minor->kdev, &dri_attrs[i]);
device_remove_file(&minor->kdev, &dri_attrs[j]);
device_unregister(&minor->kdev);
err_out:

View file

@ -72,7 +72,11 @@ void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages)
return;
}
#endif
if (drm_on_each_cpu(drm_ttm_ipi_handler, NULL, 1) != 0)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1))
#else
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
#endif
DRM_ERROR("Timed out waiting for drm cache flush.\n");
}
EXPORT_SYMBOL(drm_ttm_cache_flush);

View file

@ -48,7 +48,11 @@ nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
page, o,
NV_CTXDMA_PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
if (pci_dma_mapping_error(nvbe->dev->pdev, nvbe->pagelist[d])) {
#else
if (pci_dma_mapping_error(nvbe->pagelist[d])) {
#endif
be->func->clear(be);
DRM_ERROR("pci_map_page failed\n");
return -EINVAL;
@ -223,7 +227,11 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.sg_dummy_page =
alloc_page(GFP_KERNEL|__GFP_DMA32);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
set_page_locked(dev_priv->gart_info.sg_dummy_page);
#else
SetPageLocked(dev_priv->gart_info.sg_dummy_page);
#endif
dev_priv->gart_info.sg_dummy_bus =
pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);

View file

@ -25,6 +25,7 @@
klibdrmincludedir = ${includedir}/drm
klibdrminclude_HEADERS = \
drm.h \
drm_mode.h \
drm_sarea.h \
i915_drm.h \
mach64_drm.h \

View file

@ -814,7 +814,7 @@ struct drm_gem_open {
#define DRM_COMMAND_END 0xA0
/* typedef area */
#if !defined(__KERNEL__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
typedef struct drm_tex_region drm_tex_region_t;
typedef struct drm_hw_lock drm_hw_lock_t;

View file

@ -63,7 +63,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
last_head = ring->head;
last_acthd = acthd;
msleep_interruptible (10);
DRM_UDELAY(10 * 1000);
}
return -EBUSY;
@ -81,14 +81,16 @@ void i915_ring_validate(struct drm_device *dev, const char *func, int line)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
u32 tail = I915_READ(LP_RING+RING_TAIL) & HEAD_ADDR;
u32 head = I915_READ(LP_RING+RING_HEAD) & HEAD_ADDR;
u32 tail = I915_READ(PRB0_TAIL) & HEAD_ADDR;
u32 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
if (tail != ring->tail) {
DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
func, line,
ring->head, head, ring->tail, tail);
#ifdef __linux__
BUG_ON(1);
#endif
}
}
#endif
@ -1000,7 +1002,6 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
return 0;
}
@ -1023,6 +1024,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
#ifdef I915_HAVE_GEM
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
@ -1039,6 +1041,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
#endif
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);

View file

@ -43,6 +43,7 @@
#if defined(__linux__)
#define I915_HAVE_BUFFER
#define I915_HAVE_GEM
#endif
/* Interface history:
@ -301,8 +302,9 @@ struct drm_i915_private {
u8 saveCR[37];
struct {
#ifdef __linux__
struct drm_mm gtt_space;
#endif
/**
* List of objects currently involved in rendering from the
* ringbuffer.
@ -335,7 +337,7 @@ struct drm_i915_private {
* outstanding.
*/
struct list_head request_list;
#ifdef __linux__
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
@ -344,7 +346,7 @@ struct drm_i915_private {
* fires, go retire requests.
*/
struct delayed_work retire_work;
#endif
uint32_t next_gem_seqno;
/**
@ -501,6 +503,7 @@ extern int i915_dma_cleanup(struct drm_device *dev);
extern int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch);
extern int i915_quiescent(struct drm_device *dev);
extern void i915_free_hardware_status(struct drm_device *dev);
int i915_emit_box(struct drm_device * dev,
struct drm_clip_rect __user * boxes,
@ -610,8 +613,6 @@ extern unsigned int i915_fbpercrtc;
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark);
#if WATCH_INACTIVE
void i915_verify_inactive(struct drm_device *dev, char *file, int line);
#else

View file

@ -101,21 +101,28 @@ int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size,
}
static int
i915_init_hws_phys(struct drm_device *dev)
i915_init_hardware_status(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_dma_handle_t *dmah;
int ret = 0;
dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
#ifdef __FreeBSD__
DRM_UNLOCK();
#endif
dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
0xffffffff);
if (!dev_priv->status_page_dmah) {
#ifdef __FreeBSD__
DRM_LOCK();
#endif
if (!dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
ret = -ENOMEM;
goto out;
}
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
dev_priv->status_page_dmah = dmah;
dev_priv->hw_status_page = dmah->vaddr;
dev_priv->dma_status_page = dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
@ -126,6 +133,23 @@ out:
return ret;
}
void i915_free_hardware_status(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
/* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000);
}
if (dev_priv->status_gfx_addr) {
dev_priv->status_gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
I915_WRITE(0x02080, 0x1ffff000);
}
}
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -139,9 +163,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Let GEM Manage from end of prealloc space to end of aperture */
i915_gem_do_init(dev, prealloc_size, agp_size);
if (!I915_NEED_GFX_HWS(dev))
i915_init_hws_phys(dev);
ret = i915_gem_init_ringbuffer(dev);
if (ret)
goto out;
@ -310,6 +331,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_hardware_status(dev);
if (ret)
return ret;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
if (ret < 0) {
@ -333,6 +360,8 @@ int i915_driver_unload(struct drm_device *dev)
u32 temp;
i915_free_hardware_status(dev);
dev_priv->vblank_pipe = 0;
dev_priv->irq_enabled = 0;

View file

@ -408,7 +408,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
if (!i915_pipe_enabled(dev, pipe)) {
DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
return 0;
}
@ -492,16 +492,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
u32 pipea_stats = 0, pipeb_stats = 0, tvdac;
int hotplug = 0;
int vblank = 0;
#ifdef __linux__
if (dev->pdev->msi_enabled)
I915_WRITE(IMR, ~0);
#endif
iir = I915_READ(IIR);
atomic_inc(&dev_priv->irq_received);
if (iir == 0) {
#ifdef __linux__
if (dev->pdev->msi_enabled) {
I915_WRITE(IMR, dev_priv->irq_mask_reg);
(void) I915_READ(IMR);
}
#endif
return IRQ_NONE;
}
@ -520,8 +523,11 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
I915_WRITE(IIR, iir);
#ifdef __linux__
if (dev->pdev->msi_enabled)
I915_WRITE(IMR, dev_priv->irq_mask_reg);
#endif
(void) I915_READ(IIR); /* Flush posted writes */
/* This is a global event, and not a pipe A event */
@ -552,7 +558,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
#endif
if (iir & I915_USER_INTERRUPT) {
#ifdef I915_HAVE_GEM
dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
#endif
DRM_WAKEUP(&dev_priv->irq_queue);
}
@ -634,7 +642,9 @@ void i915_user_irq_off(struct drm_device *dev)
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
DRM_SPINLOCK(&dev_priv->user_irq_lock);
#ifdef __linux__
BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
#endif
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0))
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
@ -1110,11 +1120,9 @@ int i915_driver_irq_postinstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, num_pipes = 2;
DRM_SPININIT(&dev_priv->swaps_lock, "swap");
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
dev_priv->user_irq_refcount = 0;
dev_priv->irq_mask_reg = ~0;

View file

@ -834,8 +834,14 @@ static int mach64_bm_dma_test(struct drm_device * dev)
/* FIXME: get a dma buffer from the freelist here */
DRM_DEBUG("Allocating data memory ...\n");
#ifdef __FreeBSD__
DRM_UNLOCK();
#endif
cpu_addr_dmah =
drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful);
#ifdef __FreeBSD__
DRM_LOCK();
#endif
if (!cpu_addr_dmah) {
DRM_INFO("data-memory allocation failed!\n");
return -ENOMEM;

View file

@ -178,5 +178,7 @@ struct drm_nouveau_sarea {
#define DRM_NOUVEAU_MEM_ALLOC 0x08
#define DRM_NOUVEAU_MEM_FREE 0x09
#define DRM_NOUVEAU_MEM_TILE 0x0a
#define DRM_NOUVEAU_SUSPEND 0x0b
#define DRM_NOUVEAU_RESUME 0x0c
#endif /* __NOUVEAU_DRM_H__ */

View file

@ -335,6 +335,13 @@ struct drm_nouveau_private {
unsigned char i2c_read[MAX_NUM_DCB_ENTRIES];
unsigned char i2c_write[MAX_NUM_DCB_ENTRIES];
} dcb_table;
struct nouveau_suspend_resume {
uint32_t fifo_mode;
uint32_t graph_ctx_control;
uint32_t graph_state;
uint32_t *ramin_copy;
uint64_t ramin_size;
} susres;
};
#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \
@ -369,6 +376,10 @@ extern void nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_resume(struct drm_device *, void *data,
struct drm_file *);
/* nouveau_mem.c */
extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
@ -417,6 +428,7 @@ extern int nouveau_fifo_alloc(struct drm_device *dev,
struct mem_block *pushbuf,
uint32_t fb_ctxdma, uint32_t tt_ctxdma);
extern void nouveau_fifo_free(struct nouveau_channel *);
extern int nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_object.c */
extern int nouveau_gpuobj_early_init(struct drm_device *);

View file

@ -390,7 +390,7 @@ nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return 0;
}
static int
int
nouveau_channel_idle(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
@ -594,6 +594,8 @@ struct drm_ioctl_desc nouveau_ioctls[] = {
DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_TILE, nouveau_ioctl_mem_tile, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_SUSPEND, nouveau_ioctl_suspend, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_RESUME, nouveau_ioctl_resume, DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);

View file

@ -739,7 +739,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
PAGE_SIZE,
DMA_BIDIRECTIONAL);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
/* Not a 100% sure this is the right kdev in all cases. */
if (dma_mapping_error(&dev->primary->kdev, dev->sg->busaddr[idx])) {
#else
if (dma_mapping_error(dev->sg->busaddr[idx])) {
#endif
return -ENOMEM;
}
}
@ -937,7 +942,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
/* RAMFC */
size += 0x1000;
/* PGRAPH context */
size += 0x60000;
size += 0x70000;
}
DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",

View file

@ -1,5 +1,6 @@
/*
* Copyright 2005 Stephane Marchesin
* Copyright 2008 Stuart Bennett
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -246,6 +247,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
case 0x90:
case 0xA0:
engine->instmem.init = nv50_instmem_init;
engine->instmem.takedown= nv50_instmem_takedown;
engine->instmem.populate = nv50_instmem_populate;
@ -749,3 +751,172 @@ void nouveau_wait_for_idle(struct drm_device *dev)
}
}
}
static int nouveau_suspend(struct drm_device *dev)
{
struct mem_block *p;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_suspend_resume *susres = &dev_priv->susres;
struct nouveau_engine *engine = &dev_priv->Engine;
int i;
drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER);
susres->ramin_size = 0;
list_for_each(p, dev_priv->ramin_heap)
if (p->file_priv && (p->start + p->size) > susres->ramin_size)
susres->ramin_size = p->start + p->size;
if (!(susres->ramin_copy = drm_alloc(susres->ramin_size, DRM_MEM_DRIVER))) {
DRM_ERROR("Couldn't alloc RAMIN backing for suspend\n");
return -ENOMEM;
}
for (i = 0; i < engine->fifo.channels; i++) {
uint64_t t_start = engine->timer.read(dev);
if (dev_priv->fifos[i] == NULL)
continue;
/* Give the channel a chance to idle, wait 2s (hopefully) */
while (!nouveau_channel_idle(dev_priv->fifos[i]))
if (engine->timer.read(dev) - t_start > 2000000000ULL) {
DRM_ERROR("Failed to idle channel %d before"
"suspend.", dev_priv->fifos[i]->id);
return -EBUSY;
}
}
nouveau_wait_for_idle(dev);
NV_WRITE(NV04_PGRAPH_FIFO, 0);
/* disable the fifo caches */
NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1);
NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
susres->fifo_mode = NV_READ(NV04_PFIFO_MODE);
if (dev_priv->card_type >= NV_10) {
susres->graph_state = NV_READ(NV10_PGRAPH_STATE);
susres->graph_ctx_control = NV_READ(NV10_PGRAPH_CTX_CONTROL);
} else {
susres->graph_state = NV_READ(NV04_PGRAPH_STATE);
susres->graph_ctx_control = NV_READ(NV04_PGRAPH_CTX_CONTROL);
}
engine->fifo.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]);
engine->graph.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]);
nouveau_wait_for_idle(dev);
for (i = 0; i < susres->ramin_size / 4; i++)
susres->ramin_copy[i] = NV_RI32(i << 2);
/* reenable the fifo caches */
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
NV_WRITE(NV04_PGRAPH_FIFO, 1);
return 0;
}
static int nouveau_resume(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_suspend_resume *susres = &dev_priv->susres;
struct nouveau_engine *engine = &dev_priv->Engine;
int i;
if (!susres->ramin_copy)
return -EINVAL;
DRM_DEBUG("Doing resume\n");
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
struct drm_agp_info info;
struct drm_agp_mode mode;
/* agp bridge drivers don't re-enable agp on resume. lame. */
if ((i = drm_agp_info(dev, &info))) {
DRM_ERROR("Unable to get AGP info: %d\n", i);
return i;
}
mode.mode = info.mode;
if ((i = drm_agp_enable(dev, mode))) {
DRM_ERROR("Unable to enable AGP: %d\n", i);
return i;
}
}
for (i = 0; i < susres->ramin_size / 4; i++)
NV_WI32(i << 2, susres->ramin_copy[i]);
engine->mc.init(dev);
engine->timer.init(dev);
engine->fb.init(dev);
engine->graph.init(dev);
engine->fifo.init(dev);
NV_WRITE(NV04_PGRAPH_FIFO, 0);
/* disable the fifo caches */
NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1);
NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
/* PMC power cycling PFIFO in init clobbers some of the stuff stored in
* PRAMIN (such as NV04_PFIFO_CACHE1_DMA_INSTANCE). this is unhelpful
*/
for (i = 0; i < susres->ramin_size / 4; i++)
NV_WI32(i << 2, susres->ramin_copy[i]);
engine->fifo.load_context(dev_priv->fifos[0]);
NV_WRITE(NV04_PFIFO_MODE, susres->fifo_mode);
engine->graph.load_context(dev_priv->fifos[0]);
nouveau_wait_for_idle(dev);
if (dev_priv->card_type >= NV_10) {
NV_WRITE(NV10_PGRAPH_STATE, susres->graph_state);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, susres->graph_ctx_control);
} else {
NV_WRITE(NV04_PGRAPH_STATE, susres->graph_state);
NV_WRITE(NV04_PGRAPH_CTX_CONTROL, susres->graph_ctx_control);
}
/* reenable the fifo caches */
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
NV_WRITE(NV04_PGRAPH_FIFO, 0x1);
if (dev->irq_enabled)
nouveau_irq_postinstall(dev);
drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER);
susres->ramin_copy = NULL;
susres->ramin_size = 0;
return 0;
}
int nouveau_ioctl_suspend(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
return nouveau_suspend(dev);
}
int nouveau_ioctl_resume(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
return nouveau_resume(dev);
}

View file

@ -694,13 +694,15 @@ int nv20_graph_init(struct drm_device *dev) {
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
/* Create Context Pointer Table */
dev_priv->ctx_table_size = 32 * 4;
if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
dev_priv->ctx_table_size, 16,
NVOBJ_FLAG_ZERO_ALLOC,
&dev_priv->ctx_table)))
return ret;
if (!dev_priv->ctx_table) {
/* Create Context Pointer Table */
dev_priv->ctx_table_size = 32 * 4;
if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
dev_priv->ctx_table_size, 16,
NVOBJ_FLAG_ZERO_ALLOC,
&dev_priv->ctx_table)))
return ret;
}
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE,
dev_priv->ctx_table->instance >> 4);
@ -812,13 +814,15 @@ int nv30_graph_init(struct drm_device *dev)
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
/* Create Context Pointer Table */
dev_priv->ctx_table_size = 32 * 4;
if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
dev_priv->ctx_table_size, 16,
NVOBJ_FLAG_ZERO_ALLOC,
&dev_priv->ctx_table)))
return ret;
if (!dev_priv->ctx_table) {
/* Create Context Pointer Table */
dev_priv->ctx_table_size = 32 * 4;
if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
dev_priv->ctx_table_size, 16,
NVOBJ_FLAG_ZERO_ALLOC,
&dev_priv->ctx_table)))
return ret;
}
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE,
dev_priv->ctx_table->instance >> 4);

File diff suppressed because it is too large Load diff

View file

@ -772,7 +772,6 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
dev_priv->ring.size_l2qw);
#endif
/* Initialize the scratch register pointer. This will cause
* the scratch register values to be written out to memory
* whenever they are updated.

View file

@ -24,6 +24,7 @@ TESTS = auth \
setversion \
updatedraw \
gem_basic \
gem_flink \
gem_readwrite \
gem_mmap \
radeon_gem_mmap \

128
tests/gem_flink.c Normal file
View file

@ -0,0 +1,128 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <fcntl.h>
#include <inttypes.h>
#include <errno.h>
#include <sys/stat.h>
#include "drm.h"
#include "i915_drm.h"
static void
test_flink(int fd)
{
struct drm_i915_gem_create create;
struct drm_gem_flink flink;
struct drm_gem_open open;
int ret;
printf("Testing flink and open.\n");
memset(&create, 0, sizeof(create));
create.size = 16 * 1024;
ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
flink.handle = create.handle;
ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
assert(ret == 0);
open.name = flink.name;
ret = ioctl(fd, DRM_IOCTL_GEM_OPEN, &open);
assert(ret == 0);
assert(open.handle != 0);
}
static void
test_double_flink(int fd)
{
struct drm_i915_gem_create create;
struct drm_gem_flink flink;
struct drm_gem_flink flink2;
int ret;
printf("Testing repeated flink.\n");
memset(&create, 0, sizeof(create));
create.size = 16 * 1024;
ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
flink.handle = create.handle;
ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
assert(ret == 0);
flink2.handle = create.handle;
ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink2);
assert(ret == 0);
assert(flink2.name == flink.name);
}
static void
test_bad_flink(int fd)
{
struct drm_gem_flink flink;
int ret;
printf("Testing error return on bad flink ioctl.\n");
flink.handle = 0x10101010;
ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
assert(ret == -1 && errno == EBADF);
}
static void
test_bad_open(int fd)
{
struct drm_gem_open open;
int ret;
printf("Testing error return on bad open ioctl.\n");
open.name = 0x10101010;
ret = ioctl(fd, DRM_IOCTL_GEM_OPEN, &open);
assert(ret == -1 && errno == ENOENT);
}
int main(int argc, char **argv)
{
int fd;
fd = drm_open_any();
test_flink(fd);
test_double_flink(fd);
test_bad_flink(fd);
test_bad_open(fd);
return 0;
}