Merge branch 'master' into r6xx-r7xx-support

This commit is contained in:
Alex Deucher 2009-03-30 01:54:54 -04:00
commit c3c2ae466c
177 changed files with 24964 additions and 10420 deletions

2
.gitignore vendored
View file

@ -36,6 +36,8 @@ i915.kld
install-sh
libdrm/config.h.in
libdrm.pc
libdrm_intel.pc
libdrm_nouveau.pc
libtool
ltmain.sh
mach64.kld

View file

@ -25,6 +25,6 @@ AUTOMAKE_OPTIONS = foreign
SUBDIRS = libdrm shared-core tests
pkgconfigdir = @pkgconfigdir@
pkgconfig_DATA = libdrm.pc
pkgconfig_DATA = libdrm.pc libdrm_intel.pc
EXTRA_DIST = libdrm.pc.in
EXTRA_DIST = libdrm.pc.in libdrm_intel.pc.in

9
README
View file

@ -3,8 +3,11 @@ DRM README file
There are two main parts to this package: the DRM client library/interface
(libdrm.so) and kernel/hardware-specific device modules (such as i915.ko).
(libdrm.so) and kernel/hardware-specific device modules (such as radeon.ko).
The kernel device modules are not shipped with libdrm releases and should only
be built from the git tree by developers and bleeding-edge testers of
non-Intel hardware. The Intel kernel modules are developed in the Linux
kernel tree.
Compiling
@ -19,7 +22,7 @@ Then,
make install
To build the device-specific kernel modules:
To build the device-specific kernel modules from the git tree:
cd linux-core/
make

View file

@ -1,6 +1,6 @@
SHARED= ../shared-core
SUBDIR = drm mach64 mga r128 radeon savage sis tdfx i915 # via
SUBDIR = drm mach64 mga r128 radeon savage sis tdfx i915 #nouveau
.include <bsd.obj.mk>

View file

@ -60,7 +60,6 @@ struct drm_file;
#include <sys/signalvar.h>
#include <sys/poll.h>
#include <sys/tree.h>
#include <sys/taskqueue.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_extern.h>
@ -84,6 +83,7 @@ struct drm_file;
#include <sys/agpio.h>
#include <sys/mutex.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <sys/selinfo.h>
#include <sys/bus.h>
@ -208,15 +208,6 @@ enum {
#define DRM_MTRR_WC MDF_WRITECOMBINE
#define jiffies ticks
/* Capabilities taken from src/sys/dev/pci/pcireg.h. */
#ifndef PCIY_AGP
#define PCIY_AGP 0x02
#endif
#ifndef PCIY_EXPRESS
#define PCIY_EXPRESS 0x10
#endif
typedef unsigned long dma_addr_t;
typedef u_int64_t u64;
typedef u_int32_t u32;
@ -246,17 +237,23 @@ typedef u_int8_t u8;
#endif
#define DRM_READ8(map, offset) \
*(volatile u_int8_t *) (((unsigned long)(map)->handle) + (offset))
*(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset))
#define DRM_READ16(map, offset) \
*(volatile u_int16_t *) (((unsigned long)(map)->handle) + (offset))
*(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset))
#define DRM_READ32(map, offset) \
*(volatile u_int32_t *)(((unsigned long)(map)->handle) + (offset))
*(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset))
#define DRM_WRITE8(map, offset, val) \
*(volatile u_int8_t *) (((unsigned long)(map)->handle) + (offset)) = val
*(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset)) = val
#define DRM_WRITE16(map, offset, val) \
*(volatile u_int16_t *) (((unsigned long)(map)->handle) + (offset)) = val
*(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset)) = val
#define DRM_WRITE32(map, offset, val) \
*(volatile u_int32_t *)(((unsigned long)(map)->handle) + (offset)) = val
*(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset)) = val
#define DRM_VERIFYAREA_READ( uaddr, size ) \
(!useracc(__DECONST(caddr_t, uaddr), size, VM_PROT_READ))
@ -299,22 +296,22 @@ for ( ret = 0 ; !ret && !(condition) ; ) { \
DRM_UNLOCK(); \
mtx_lock(&dev->irq_lock); \
if (!(condition)) \
ret = -mtx_sleep(&(queue), &dev->irq_lock, \
PZERO | PCATCH, "drmwtq", (timeout)); \
ret = -mtx_sleep(&(queue), &dev->irq_lock, \
PCATCH, "drmwtq", (timeout)); \
mtx_unlock(&dev->irq_lock); \
DRM_LOCK(); \
}
#define DRM_ERROR(fmt, arg...) \
#define DRM_ERROR(fmt, ...) \
printf("error: [" DRM_NAME ":pid%d:%s] *ERROR* " fmt, \
DRM_CURRENTPID, __func__ , ## arg)
DRM_CURRENTPID, __func__ , ##__VA_ARGS__)
#define DRM_INFO(fmt, arg...) printf("info: [" DRM_NAME "] " fmt , ## arg)
#define DRM_INFO(fmt, ...) printf("info: [" DRM_NAME "] " fmt , ##__VA_ARGS__)
#define DRM_DEBUG(fmt, arg...) do { \
#define DRM_DEBUG(fmt, ...) do { \
if (drm_debug_flag) \
printf("[" DRM_NAME ":pid%d:%s] " fmt, DRM_CURRENTPID, \
__func__ , ## arg); \
__func__ , ##__VA_ARGS__); \
} while (0)
typedef struct drm_pci_id_list
@ -325,6 +322,12 @@ typedef struct drm_pci_id_list
char *name;
} drm_pci_id_list_t;
struct drm_msi_blacklist_entry
{
int vendor;
int device;
};
#define DRM_AUTH 0x1
#define DRM_MASTER 0x2
#define DRM_ROOT_ONLY 0x4
@ -470,11 +473,13 @@ typedef struct drm_agp_head {
} drm_agp_head_t;
typedef struct drm_sg_mem {
unsigned long handle;
void *virtual;
int pages;
dma_addr_t *busaddr;
drm_dma_handle_t *dmah; /* Handle to PCI memory for ATI PCIGART table */
unsigned long handle;
void *virtual;
int pages;
dma_addr_t *busaddr;
struct drm_dma_handle *sg_dmah; /* Handle for sg_pages */
struct drm_dma_handle *dmah; /* Handle to PCI memory */
/* for ATI PCIGART table */
} drm_sg_mem_t;
typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
@ -605,7 +610,7 @@ struct drm_driver_info {
};
/* Length for the array of resource pointers for drm_get_resource_*. */
#define DRM_MAX_PCI_RESOURCE 3
#define DRM_MAX_PCI_RESOURCE 6
/**
* DRM device functions structure
@ -631,7 +636,6 @@ struct drm_device {
struct mtx irq_lock; /* protects irq condition checks */
struct mtx dev_lock; /* protects everything else */
DRM_SPINTYPE drw_lock;
DRM_SPINTYPE tsk_lock;
/* Usage Counters */
int open_count; /* Outstanding files open */
@ -660,6 +664,7 @@ struct drm_device {
/* Context support */
int irq; /* Interrupt used by board */
int irq_enabled; /* True if the irq handler is enabled */
int msi_enabled; /* MSI enabled */
int irqrid; /* Interrupt used by board */
struct resource *irqr; /* Resource for interrupt used by board */
void *irqh; /* Handle from bus_setup_intr */
@ -698,9 +703,6 @@ struct drm_device {
struct unrhdr *drw_unrhdr;
/* RB tree of drawable infos */
RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
struct task locked_task;
void (*locked_task_call)(struct drm_device *dev);
};
static __inline__ int drm_core_check_feature(struct drm_device *dev,
@ -721,10 +723,10 @@ static inline int drm_core_has_AGP(struct drm_device *dev)
extern int drm_debug_flag;
/* Device setup support (drm_drv.c) */
int drm_probe(device_t nbdev, drm_pci_id_list_t *idlist);
int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist);
int drm_probe(device_t kdev, drm_pci_id_list_t *idlist);
int drm_attach(device_t kdev, drm_pci_id_list_t *idlist);
void drm_close(void *data);
int drm_detach(device_t nbdev);
int drm_detach(device_t kdev);
d_ioctl_t drm_ioctl;
d_open_t drm_open;
d_read_t drm_read;
@ -795,6 +797,7 @@ void drm_handle_vblank(struct drm_device *dev, int crtc);
u32 drm_vblank_count(struct drm_device *dev, int crtc);
int drm_vblank_get(struct drm_device *dev, int crtc);
void drm_vblank_put(struct drm_device *dev, int crtc);
void drm_vblank_cleanup(struct drm_device *dev);
int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
int drm_vblank_init(struct drm_device *dev, int num_crtcs);
void drm_vbl_send_signals(struct drm_device *dev, int crtc);
@ -902,8 +905,8 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_addbufs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_addbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_infobufs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_markbufs(struct drm_device *dev, void *data,
@ -921,8 +924,6 @@ int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_locked_tasklet(struct drm_device *dev,
void (*tasklet)(struct drm_device *dev));
/* AGP/GART support (drm_agpsupport.c) */
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,

View file

@ -213,7 +213,7 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
DRM_LOCK();
return EINVAL;
}
map->offset = map->offset + dev->sg->handle;
map->offset += dev->sg->handle;
break;
case _DRM_CONSISTENT:
/* Unfortunately, we don't get any alignment specification from
@ -877,8 +877,7 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
return ret;
}
int drm_addbufs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_buf_desc *request = data;
int err;
@ -1050,10 +1049,10 @@ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
#if __FreeBSD_version >= 600023
retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, dev->devnode, foff);
VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE, dev->devnode, foff);
#else
retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&dev->devnode->si_hlist),
VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, SLIST_FIRST(&dev->devnode->si_hlist),
foff);
#endif
if (retcode)
@ -1103,7 +1102,7 @@ int drm_order(unsigned long size)
if (size == 0)
return 0;
order = ffsl(size) - 1;
order = flsl(size) - 1;
if (size & ~(1ul << order))
++order;

View file

@ -79,7 +79,7 @@ static drm_ioctl_desc_t drm_ioctls[256] = {
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
@ -92,10 +92,11 @@ static drm_ioctl_desc_t drm_ioctls[256] = {
DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
@ -114,7 +115,6 @@ static drm_ioctl_desc_t drm_ioctls[256] = {
DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@ -128,52 +128,83 @@ static struct cdevsw drm_cdevsw = {
.d_poll = drm_poll,
.d_mmap = drm_mmap,
.d_name = "drm",
.d_flags = D_TRACKCLOSE | D_NEEDGIANT
.d_flags = D_TRACKCLOSE
};
int drm_probe(device_t dev, drm_pci_id_list_t *idlist)
int drm_msi = 1; /* Enable by default. */
TUNABLE_INT("hw.drm.msi", &drm_msi);
static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
{0x8086, 0x2772}, /* Intel i945G */ \
{0x8086, 0x27A2}, /* Intel i945GM */ \
{0x8086, 0x27AE}, /* Intel i945GME */ \
{0, 0}
};
static int drm_msi_is_blacklisted(int vendor, int device)
{
int i = 0;
for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
if ((drm_msi_blacklist[i].vendor == vendor) &&
(drm_msi_blacklist[i].device == device)) {
return 1;
}
}
return 0;
}
int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
{
drm_pci_id_list_t *id_entry;
int vendor, device;
#if __FreeBSD_version < 700010
device_t realdev;
if (!strcmp(device_get_name(dev), "drmsub"))
realdev = device_get_parent(dev);
if (!strcmp(device_get_name(kdev), "drmsub"))
realdev = device_get_parent(kdev);
else
realdev = dev;
realdev = kdev;
vendor = pci_get_vendor(realdev);
device = pci_get_device(realdev);
#else
vendor = pci_get_vendor(dev);
device = pci_get_device(dev);
vendor = pci_get_vendor(kdev);
device = pci_get_device(kdev);
#endif
if (pci_get_class(kdev) != PCIC_DISPLAY
|| pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
return ENXIO;
id_entry = drm_find_description(vendor, device, idlist);
if (id_entry != NULL) {
device_set_desc(dev, id_entry->name);
if (!device_get_desc(kdev)) {
DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
device_set_desc(kdev, id_entry->name);
}
return 0;
}
return ENXIO;
}
int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist)
int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
{
struct drm_device *dev;
drm_pci_id_list_t *id_entry;
int unit;
int unit, msicount;
unit = device_get_unit(nbdev);
dev = device_get_softc(nbdev);
unit = device_get_unit(kdev);
dev = device_get_softc(kdev);
#if __FreeBSD_version < 700010
if (!strcmp(device_get_name(nbdev), "drmsub"))
dev->device = device_get_parent(nbdev);
if (!strcmp(device_get_name(kdev), "drmsub"))
dev->device = device_get_parent(kdev);
else
dev->device = nbdev;
dev->device = kdev;
#else
dev->device = nbdev;
dev->device = kdev;
#endif
dev->devnode = make_dev(&drm_cdevsw,
unit,
@ -182,22 +213,67 @@ int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist)
DRM_DEV_MODE,
"dri/card%d", unit);
#if __FreeBSD_version >= 700053
dev->pci_domain = pci_get_domain(dev->device);
#else
dev->pci_domain = 0;
#endif
dev->pci_bus = pci_get_bus(dev->device);
dev->pci_slot = pci_get_slot(dev->device);
dev->pci_func = pci_get_function(dev->device);
dev->pci_vendor = pci_get_vendor(dev->device);
dev->pci_device = pci_get_device(dev->device);
if (drm_msi &&
!drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) {
msicount = pci_msi_count(dev->device);
DRM_DEBUG("MSI count = %d\n", msicount);
if (msicount > 1)
msicount = 1;
if (pci_alloc_msi(dev->device, &msicount) == 0) {
DRM_INFO("MSI enabled %d message(s)\n", msicount);
dev->msi_enabled = 1;
dev->irqrid = 1;
}
}
dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
&dev->irqrid, RF_SHAREABLE);
if (!dev->irqr) {
return ENOENT;
}
dev->irq = (int) rman_get_start(dev->irqr);
mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
mtx_init(&dev->tsk_lock, "drmtsk", NULL, MTX_DEF);
id_entry = drm_find_description(pci_get_vendor(dev->device),
pci_get_device(dev->device), idlist);
id_entry = drm_find_description(dev->pci_vendor,
dev->pci_device, idlist);
dev->id_entry = id_entry;
return drm_load(dev);
}
int drm_detach(device_t dev)
int drm_detach(device_t kdev)
{
drm_unload(device_get_softc(dev));
struct drm_device *dev;
dev = device_get_softc(kdev);
drm_unload(dev);
bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid, dev->irqr);
if (dev->msi_enabled) {
pci_release_msi(dev->device);
DRM_INFO("MSI released\n");
}
return 0;
}
@ -214,7 +290,8 @@ drm_pci_id_list_t *drm_find_description(int vendor, int device,
for (i = 0; idlist[i].vendor != 0; i++) {
if ((idlist[i].vendor == vendor) &&
(idlist[i].device == device)) {
((idlist[i].device == device) ||
(idlist[i].device == 0))) {
return &idlist[i];
}
}
@ -346,19 +423,6 @@ static int drm_load(struct drm_device *dev)
DRM_DEBUG("\n");
dev->irq = pci_get_irq(dev->device);
#if __FreeBSD_version >= 700053
dev->pci_domain = pci_get_domain(dev->device);
#else
dev->pci_domain = 0;
#endif
dev->pci_bus = pci_get_bus(dev->device);
dev->pci_slot = pci_get_slot(dev->device);
dev->pci_func = pci_get_function(dev->device);
dev->pci_vendor = pci_get_vendor(dev->device);
dev->pci_device = pci_get_device(dev->device);
TAILQ_INIT(&dev->maplist);
drm_mem_init();
@ -433,7 +497,6 @@ error:
DRM_UNLOCK();
destroy_dev(dev->devnode);
mtx_destroy(&dev->tsk_lock);
mtx_destroy(&dev->drw_lock);
mtx_destroy(&dev->vbl_lock);
mtx_destroy(&dev->irq_lock);
@ -461,6 +524,8 @@ static void drm_unload(struct drm_device *dev)
DRM_DEBUG("mtrr_del = %d", retcode);
}
drm_vblank_cleanup(dev);
DRM_LOCK();
drm_lastclose(dev);
DRM_UNLOCK();
@ -496,14 +561,12 @@ static void drm_unload(struct drm_device *dev)
if (pci_disable_busmaster(dev->device))
DRM_ERROR("Request to disable bus-master failed.\n");
mtx_destroy(&dev->tsk_lock);
mtx_destroy(&dev->drw_lock);
mtx_destroy(&dev->vbl_lock);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->dev_lock);
}
int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_version *version = data;
@ -604,7 +667,7 @@ void drm_close(void *data)
}
/* Contention */
retcode = mtx_sleep((void *)&dev->lock.lock_queue,
&dev->dev_lock, PZERO | PCATCH, "drmlk2", 0);
&dev->dev_lock, PCATCH, "drmlk2", 0);
if (retcode)
break;
}
@ -651,9 +714,7 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
int is_driver_ioctl = 0;
struct drm_file *file_priv;
DRM_LOCK();
retcode = devfs_get_cdevpriv((void **)&file_priv);
DRM_UNLOCK();
if (retcode != 0) {
DRM_ERROR("can't find authenticator\n");
return EINVAL;

View file

@ -33,8 +33,6 @@
#include "drmP.h"
#include "drm.h"
static void drm_locked_task(void *context, int pending __unused);
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@ -95,7 +93,7 @@ static void vblank_disable_fn(void *arg)
}
}
static void drm_vblank_cleanup(struct drm_device *dev)
void drm_vblank_cleanup(struct drm_device *dev)
{
unsigned long irqflags;
@ -171,13 +169,6 @@ int drm_irq_install(struct drm_device *dev)
DRM_UNLOCK();
/* Install handler */
dev->irqrid = 0;
dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
&dev->irqrid, RF_SHAREABLE);
if (!dev->irqr) {
retcode = ENOENT;
goto err;
}
#if __FreeBSD_version >= 700031
retcode = bus_setup_intr(dev->device, dev->irqr,
INTR_TYPE_TTY | INTR_MPSAFE,
@ -195,30 +186,21 @@ int drm_irq_install(struct drm_device *dev)
dev->driver->irq_postinstall(dev);
DRM_UNLOCK();
TASK_INIT(&dev->locked_task, 0, drm_locked_task, dev);
return 0;
err:
DRM_LOCK();
dev->irq_enabled = 0;
if (dev->irqrid != 0) {
bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
dev->irqr);
dev->irqrid = 0;
}
DRM_UNLOCK();
return retcode;
}
int drm_irq_uninstall(struct drm_device *dev)
{
int irqrid;
if (!dev->irq_enabled)
return EINVAL;
dev->irq_enabled = 0;
irqrid = dev->irqrid;
dev->irqrid = 0;
DRM_DEBUG("irq=%d\n", dev->irq);
@ -226,11 +208,8 @@ int drm_irq_uninstall(struct drm_device *dev)
DRM_UNLOCK();
bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
bus_release_resource(dev->device, SYS_RES_IRQ, irqrid, dev->irqr);
DRM_LOCK();
drm_vblank_cleanup(dev);
return 0;
}
@ -301,6 +280,7 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
atomic_add_acq_int(&dev->vblank[crtc].refcount, 1);
DRM_DEBUG("vblank refcount = %d\n", dev->vblank[crtc].refcount);
if (dev->vblank[crtc].refcount == 1 &&
!dev->vblank[crtc].enabled) {
ret = dev->driver->enable_vblank(dev, crtc);
@ -323,6 +303,7 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
/* Last user schedules interrupt disable */
atomic_subtract_acq_int(&dev->vblank[crtc].refcount, 1);
DRM_DEBUG("vblank refcount = %d\n", dev->vblank[crtc].refcount);
if (dev->vblank[crtc].refcount == 0)
callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ,
(timeout_t *)vblank_disable_fn, (void *)dev);
@ -385,8 +366,8 @@ out:
int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
union drm_wait_vblank *vblwait = data;
unsigned int flags, seq, crtc;
int ret = 0;
int flags, seq, crtc;
if (!dev->irq_enabled)
return EINVAL;
@ -406,8 +387,10 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
return EINVAL;
ret = drm_vblank_get(dev, crtc);
if (ret)
if (ret) {
DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
return ret;
}
seq = drm_vblank_count(dev, crtc);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
@ -446,14 +429,20 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
#endif
ret = EINVAL;
} else {
DRM_LOCK();
/* shared code returns -errno */
DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
((drm_vblank_count(dev, crtc)
- vblwait->request.sequence) <= (1 << 23)));
DRM_UNLOCK();
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
for ( ret = 0 ; !ret && !((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ; ) {
mtx_lock(&dev->irq_lock);
if (!((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)))
ret = mtx_sleep(&dev->vblank[crtc].queue,
&dev->irq_lock, PCATCH, "vblwtq",
3 * DRM_HZ);
mtx_unlock(&dev->irq_lock);
}
DRM_DEBUG("return = %d\n", ret);
if (ret != EINTR) {
struct timeval now;
@ -461,6 +450,10 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
vblwait->reply.sequence = drm_vblank_count(dev, crtc);
DRM_DEBUG("returning %d to client\n",
vblwait->reply.sequence);
} else {
DRM_DEBUG("vblank wait interrupted by signal\n");
}
}
@ -504,46 +497,3 @@ void drm_handle_vblank(struct drm_device *dev, int crtc)
drm_vbl_send_signals(dev, crtc);
}
static void drm_locked_task(void *context, int pending __unused)
{
struct drm_device *dev = context;
DRM_SPINLOCK(&dev->tsk_lock);
DRM_LOCK(); /* XXX drm_lock_take() should do it's own locking */
if (dev->locked_task_call == NULL ||
drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT) == 0) {
DRM_UNLOCK();
DRM_SPINUNLOCK(&dev->tsk_lock);
return;
}
dev->lock.file_priv = NULL; /* kernel owned */
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
DRM_UNLOCK();
dev->locked_task_call(dev);
drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
dev->locked_task_call = NULL;
DRM_SPINUNLOCK(&dev->tsk_lock);
}
void
drm_locked_tasklet(struct drm_device *dev,
void (*tasklet)(struct drm_device *dev))
{
DRM_SPINLOCK(&dev->tsk_lock);
if (dev->locked_task_call != NULL) {
DRM_SPINUNLOCK(&dev->tsk_lock);
return;
}
dev->locked_task_call = tasklet;
DRM_SPINUNLOCK(&dev->tsk_lock);
taskqueue_enqueue(taskqueue_swi, &dev->locked_task);
}

View file

@ -64,6 +64,10 @@ list_del(struct list_head *entry) {
#define list_for_each(entry, head) \
for (entry = (head)->next; entry != head; entry = (entry)->next)
#define list_for_each_prev(entry, head) \
for (entry = (head)->prev; entry != (head); \
entry = entry->prev)
#define list_for_each_safe(entry, temp, head) \
for (entry = (head)->next, temp = (entry)->next; \
entry != head; \

View file

@ -79,7 +79,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
/* Contention */
ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
PZERO | PCATCH, "drmlk2", 0);
PCATCH, "drmlk2", 0);
if (ret != 0)
break;
}
@ -112,13 +112,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
return EINVAL;
}
DRM_SPINLOCK(&dev->tsk_lock);
if (dev->locked_task_call != NULL) {
dev->locked_task_call(dev);
dev->locked_task_call = NULL;
}
DRM_SPINUNLOCK(&dev->tsk_lock);
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
DRM_LOCK();

View file

@ -80,15 +80,15 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
NULL, NULL, /* filtfunc, filtfuncargs */
size, 1, size, /* maxsize, nsegs, maxsegsize */
BUS_DMA_ALLOCNOW, NULL, NULL, /* flags, lockfunc, lockfuncargs */
0, NULL, NULL, /* flags, lockfunc, lockfuncargs */
&dmah->tag);
if (ret != 0) {
free(dmah, DRM_MEM_DMA);
return NULL;
}
ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr, BUS_DMA_NOWAIT,
&dmah->map);
ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr,
BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmah->map);
if (ret != 0) {
bus_dma_tag_destroy(dmah->tag);
free(dmah, DRM_MEM_DMA);
@ -96,7 +96,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
}
ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size,
drm_pci_busdma_callback, dmah, 0);
drm_pci_busdma_callback, dmah, BUS_DMA_NOWAIT | BUS_DMA_NOCACHE);
if (ret != 0) {
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
bus_dma_tag_destroy(dmah->tag);

View file

@ -36,20 +36,16 @@
#include "drmP.h"
#define DEBUG_SCATTER 0
static void drm_sg_alloc_cb(void *arg, bus_dma_segment_t *segs,
int nsegs, int error);
void drm_sg_cleanup(drm_sg_mem_t *entry)
int
drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather *request)
{
free((void *)entry->handle, DRM_MEM_PAGES);
free(entry->busaddr, DRM_MEM_PAGES);
free(entry, DRM_MEM_SGLISTS);
}
int drm_sg_alloc(struct drm_device * dev, struct drm_scatter_gather * request)
{
drm_sg_mem_t *entry;
struct drm_sg_mem *entry;
struct drm_dma_handle *dmah;
unsigned long pages;
int i;
int ret;
if (dev->sg)
return EINVAL;
@ -66,21 +62,57 @@ int drm_sg_alloc(struct drm_device * dev, struct drm_scatter_gather * request)
entry->busaddr = malloc(pages * sizeof(*entry->busaddr), DRM_MEM_PAGES,
M_WAITOK | M_ZERO);
if (!entry->busaddr) {
drm_sg_cleanup(entry);
free(entry, DRM_MEM_SGLISTS);
return ENOMEM;
}
entry->handle = (long)malloc(pages << PAGE_SHIFT, DRM_MEM_PAGES,
M_WAITOK | M_ZERO);
if (entry->handle == 0) {
drm_sg_cleanup(entry);
dmah = malloc(sizeof(struct drm_dma_handle), DRM_MEM_DMA,
M_ZERO | M_NOWAIT);
if (dmah == NULL) {
free(entry->busaddr, DRM_MEM_PAGES);
free(entry, DRM_MEM_SGLISTS);
return ENOMEM;
}
for (i = 0; i < pages; i++) {
entry->busaddr[i] = vtophys(entry->handle + i * PAGE_SIZE);
ret = bus_dma_tag_create(NULL, PAGE_SIZE, 0, /* tag, align, boundary */
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
NULL, NULL, /* filtfunc, filtfuncargs */
request->size, pages, /* maxsize, nsegs */
PAGE_SIZE, 0, /* maxsegsize, flags */
NULL, NULL, /* lockfunc, lockfuncargs */
&dmah->tag);
if (ret != 0) {
free(dmah, DRM_MEM_DMA);
free(entry->busaddr, DRM_MEM_PAGES);
free(entry, DRM_MEM_SGLISTS);
return ENOMEM;
}
ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr,
BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmah->map);
if (ret != 0) {
bus_dma_tag_destroy(dmah->tag);
free(dmah, DRM_MEM_DMA);
free(entry->busaddr, DRM_MEM_PAGES);
free(entry, DRM_MEM_SGLISTS);
return ENOMEM;
}
ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr,
request->size, drm_sg_alloc_cb, entry,
BUS_DMA_NOWAIT | BUS_DMA_NOCACHE);
if (ret != 0) {
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
bus_dma_tag_destroy(dmah->tag);
free(dmah, DRM_MEM_DMA);
free(entry->busaddr, DRM_MEM_PAGES);
free(entry, DRM_MEM_SGLISTS);
return ENOMEM;
}
entry->sg_dmah = dmah;
entry->handle = (unsigned long)dmah->vaddr;
DRM_DEBUG("sg alloc handle = %08lx\n", entry->handle);
entry->virtual = (void *)entry->handle;
@ -98,22 +130,49 @@ int drm_sg_alloc(struct drm_device * dev, struct drm_scatter_gather * request)
return 0;
}
int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
static void
drm_sg_alloc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
struct drm_scatter_gather *request = data;
int ret;
struct drm_sg_mem *entry = arg;
int i;
DRM_DEBUG("%s\n", __FUNCTION__);
if (error != 0)
return;
ret = drm_sg_alloc(dev, request);
return ret;
for(i = 0 ; i < nsegs ; i++) {
entry->busaddr[i] = segs[i].ds_addr;
}
}
int drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
int
drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
drm_sg_mem_t *entry;
DRM_DEBUG("\n");
return drm_sg_alloc(dev, request);
}
void
drm_sg_cleanup(struct drm_sg_mem *entry)
{
struct drm_dma_handle *dmah = entry->sg_dmah;
bus_dmamap_unload(dmah->tag, dmah->map);
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
bus_dma_tag_destroy(dmah->tag);
free(dmah, DRM_MEM_DMA);
free(entry->busaddr, DRM_MEM_PAGES);
free(entry, DRM_MEM_SGLISTS);
}
int
drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
DRM_LOCK();
entry = dev->sg;

View file

@ -182,8 +182,8 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
DRM_UNLOCK();
DRM_SYSCTL_PRINT("\nslot offset size type flags "
"address mtrr\n");
DRM_SYSCTL_PRINT("\nslot offset size "
"type flags address mtrr\n");
for (i = 0; i < mapcount; i++) {
map = &tempmaps[i];
@ -199,7 +199,7 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
yesno = "yes";
DRM_SYSCTL_PRINT(
"%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx %s\n", i,
"%4d 0x%016lx 0x%08lx %4.4s 0x%02x 0x%016lx %s\n", i,
map->offset, map->size, type, map->flags,
(unsigned long)map->handle, yesno);
}

View file

@ -83,8 +83,13 @@ int drm_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr,
}
if (map == NULL) {
DRM_DEBUG("Can't find map, requested offset = %016lx\n",
offset);
TAILQ_FOREACH(map, &dev->maplist, link) {
DRM_DEBUG("map offset = %016lx, handle = %016lx\n",
map->offset, (unsigned long)map->handle);
}
DRM_UNLOCK();
DRM_DEBUG("can't find map\n");
return -1;
}
if (((map->flags&_DRM_RESTRICTED) && !DRM_SUSER(DRM_CURPROC))) {

View file

@ -40,9 +40,9 @@ static drm_pci_id_list_t i915_pciidlist[] = {
i915_PCI_IDS
};
static int i915_suspend(device_t nbdev)
static int i915_suspend(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev || !dev_priv) {
@ -54,16 +54,16 @@ static int i915_suspend(device_t nbdev)
i915_save_state(dev);
return (bus_generic_suspend(nbdev));
return (bus_generic_suspend(kdev));
}
static int i915_resume(device_t nbdev)
static int i915_resume(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
i915_restore_state(dev);
return (bus_generic_resume(nbdev));
return (bus_generic_resume(kdev));
}
static void i915_configure(struct drm_device *dev)
@ -75,11 +75,9 @@ static void i915_configure(struct drm_device *dev)
dev->driver->buf_priv_size = sizeof(drm_i915_private_t);
dev->driver->load = i915_driver_load;
dev->driver->unload = i915_driver_unload;
dev->driver->firstopen = i915_driver_firstopen;
dev->driver->preclose = i915_driver_preclose;
dev->driver->lastclose = i915_driver_lastclose;
dev->driver->device_is_agp = i915_driver_device_is_agp;
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
dev->driver->irq_preinstall = i915_driver_irq_preinstall;
@ -99,31 +97,31 @@ static void i915_configure(struct drm_device *dev)
}
static int
i915_probe(device_t dev)
i915_probe(device_t kdev)
{
return drm_probe(dev, i915_pciidlist);
return drm_probe(kdev, i915_pciidlist);
}
static int
i915_attach(device_t nbdev)
i915_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
i915_configure(dev);
return drm_attach(nbdev, i915_pciidlist);
return drm_attach(kdev, i915_pciidlist);
}
static int
i915_detach(device_t nbdev)
i915_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(nbdev);
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);

1
bsd-core/i915_reg.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/i915_reg.h

View file

@ -51,6 +51,7 @@ static void mach64_configure(struct drm_device *dev)
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
dev->driver->buf_priv_size = 1; /* No dev_priv */
dev->driver->load = mach64_driver_load;
dev->driver->lastclose = mach64_driver_lastclose;
dev->driver->get_vblank_counter = mach64_get_vblank_counter;
dev->driver->enable_vblank = mach64_enable_vblank;
@ -73,31 +74,37 @@ static void mach64_configure(struct drm_device *dev)
}
static int
mach64_probe(device_t dev)
mach64_probe(device_t kdev)
{
return drm_probe(dev, mach64_pciidlist);
return drm_probe(kdev, mach64_pciidlist);
}
static int
mach64_attach(device_t nbdev)
mach64_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
mach64_configure(dev);
return drm_attach(nbdev, mach64_pciidlist);
return drm_attach(kdev, mach64_pciidlist);
}
int
mach64_driver_load(struct drm_device * dev, unsigned long flags)
{
return drm_vblank_init(dev, 1);
}
static int
mach64_detach(device_t nbdev)
mach64_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(nbdev);
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);

View file

@ -117,31 +117,31 @@ static void mga_configure(struct drm_device *dev)
}
static int
mga_probe(device_t dev)
mga_probe(device_t kdev)
{
return drm_probe(dev, mga_pciidlist);
return drm_probe(kdev, mga_pciidlist);
}
static int
mga_attach(device_t nbdev)
mga_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
mga_configure(dev);
return drm_attach(nbdev, mga_pciidlist);
return drm_attach(kdev, mga_pciidlist);
}
static int
mga_detach(device_t nbdev)
mga_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(nbdev);
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);

1
bsd-core/nouveau/@ Symbolic link
View file

@ -0,0 +1 @@
/usr/src/sys

33
bsd-core/nouveau/Makefile Normal file
View file

@ -0,0 +1,33 @@
# $FreeBSD$
.PATH: ${.CURDIR}/..
KMOD = nouveau
NO_MAN = YES
SRCS = nouveau_drv.c nouveau_state.c nouveau_mem.c nouveau_object.c \
nouveau_sgdma.c nouveau_fifo.c nouveau_notifier.c nouveau_dma.c \
nouveau_irq.c nouveau_swmthd.c \
nv04_timer.c \
nv04_mc.c nv40_mc.c nv50_mc.c \
nv04_fb.c nv10_fb.c nv40_fb.c \
nv04_fifo.c nv10_fifo.c nv40_fifo.c nv50_fifo.c \
nv04_graph.c nv10_graph.c nv20_graph.c \
nv40_graph.c nv50_graph.c \
nv04_instmem.c nv50_instmem.c
# nouveau_bo.c nouveau_fence.c \
SRCS += device_if.h bus_if.h pci_if.h opt_drm.h
CFLAGS += ${DEBUG_FLAGS} -I. -I..
.if defined(DRM_DEBUG)
DRM_DEBUG_OPT= "\#define DRM_DEBUG 1"
.endif
.if !defined(DRM_NOLINUX)
DRM_LINUX_OPT= "\#define DRM_LINUX 1"
.endif
opt_drm.h:
touch opt_drm.h
echo $(DRM_DEBUG_OPT) >> opt_drm.h
echo $(DRM_LINUX_OPT) >> opt_drm.h
.include <bsd.kmod.mk>

1
bsd-core/nouveau/machine Symbolic link
View file

@ -0,0 +1 @@
/usr/src/sys/amd64/include

1
bsd-core/nouveau_dma.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_dma.c

1
bsd-core/nouveau_dma.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_dma.h

1
bsd-core/nouveau_drm.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_drm.h

148
bsd-core/nouveau_drv.c Normal file
View file

@ -0,0 +1,148 @@
/* nouveau_drv.c.c -- nouveau nouveau driver -*- linux-c -*-
* Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
*/
/*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "drm_pciids.h"
extern struct drm_ioctl_desc nouveau_ioctls[];
extern int nouveau_max_ioctl;
/* drv_PCI_IDs for nouveau is just to match the vendor id */
static struct drm_pci_id_list nouveau_pciidlist[] = {
{0x10DE, 0, 0, "NVidia Display Adapter"}, \
{0, 0, 0, NULL}
};
static void nouveau_configure(struct drm_device *dev)
{
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ;
dev->driver->buf_priv_size = sizeof(struct drm_nouveau_private);
dev->driver->load = nouveau_load;
dev->driver->unload = nouveau_unload;
dev->driver->firstopen = nouveau_firstopen;
dev->driver->preclose = nouveau_preclose;
dev->driver->lastclose = nouveau_lastclose;
dev->driver->irq_preinstall = nouveau_irq_preinstall;
dev->driver->irq_postinstall = nouveau_irq_postinstall;
dev->driver->irq_uninstall = nouveau_irq_uninstall;
dev->driver->irq_handler = nouveau_irq_handler;
dev->driver->ioctls = nouveau_ioctls;
dev->driver->max_ioctl = nouveau_max_ioctl;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
static int
nouveau_probe(device_t kdev)
{
int vendor;
if (pci_get_class(kdev) == PCIC_DISPLAY) {
vendor = pci_get_vendor(kdev);
if (vendor == 0x10de) {
const char *ident;
char model[64];
if (pci_get_vpd_ident(kdev, &ident) == 0) {
snprintf(model, 64, "%s", ident);
device_set_desc_copy(kdev, model);
DRM_DEBUG("VPD : %s\n", model);
}
return drm_probe(kdev, nouveau_pciidlist);
}
}
return ENXIO;
}
static int
nouveau_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
nouveau_configure(dev);
return drm_attach(kdev, nouveau_pciidlist);
}
static int
nouveau_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);
return ret;
}
static device_method_t nouveau_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, nouveau_probe),
DEVMETHOD(device_attach, nouveau_attach),
DEVMETHOD(device_detach, nouveau_detach),
{ 0, 0 }
};
static driver_t nouveau_driver = {
#if __FreeBSD_version >= 700010
"drm",
#else
"drmsub",
#endif
nouveau_methods,
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
#if __FreeBSD_version >= 700010
DRIVER_MODULE(nouveau, vgapci, nouveau_driver, drm_devclass, 0, 0);
#else
DRIVER_MODULE(nouveau, agp, nouveau_driver, drm_devclass, 0, 0);
#endif
MODULE_DEPEND(nouveau, drm, 1, 1, 1);

1
bsd-core/nouveau_drv.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_drv.h

1
bsd-core/nouveau_fifo.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_fifo.c

1
bsd-core/nouveau_irq.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_irq.c

1
bsd-core/nouveau_mem.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_mem.c

1
bsd-core/nouveau_notifier.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_notifier.c

1
bsd-core/nouveau_object.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_object.c

1
bsd-core/nouveau_reg.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_reg.h

357
bsd-core/nouveau_sgdma.c Normal file
View file

@ -0,0 +1,357 @@
#include "drmP.h"
#include "nouveau_drv.h"
#define NV_CTXDMA_PAGE_SHIFT 12
#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
#if 0
struct nouveau_sgdma_be {
struct drm_ttm_backend backend;
struct drm_device *dev;
int pages;
int pages_populated;
dma_addr_t *pagelist;
int is_bound;
unsigned int pte_start;
};
static int
nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)
{
return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
}
static int
nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
struct page **pages, struct page *dummy_read_page)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
int p, d, o;
DRM_DEBUG("num_pages = %ld\n", num_pages);
if (nvbe->pagelist)
return -EINVAL;
nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT;
nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t),
DRM_MEM_PAGES);
nvbe->pages_populated = d = 0;
for (p = 0; p < num_pages; p++) {
for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) {
struct page *page = pages[p];
if (!page)
page = dummy_read_page;
#ifdef __linux__
nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev,
page, o,
NV_CTXDMA_PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
if (pci_dma_mapping_error(nvbe->dev->pdev, nvbe->pagelist[d])) {
#else
if (pci_dma_mapping_error(nvbe->pagelist[d])) {
#endif
be->func->clear(be);
DRM_ERROR("pci_map_page failed\n");
return -EINVAL;
}
#endif
nvbe->pages_populated = ++d;
}
}
return 0;
}
static void
nouveau_sgdma_clear(struct drm_ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
#ifdef __linux__
int d;
#endif
DRM_DEBUG("\n");
if (nvbe && nvbe->pagelist) {
if (nvbe->is_bound)
be->func->unbind(be);
#ifdef __linux__
for (d = 0; d < nvbe->pages_populated; d++) {
pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d],
NV_CTXDMA_PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
}
#endif
drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t),
DRM_MEM_PAGES);
}
}
static int
nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
uint64_t offset = (mem->mm_node->start << PAGE_SHIFT);
uint32_t i;
DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start,
(unsigned long long)offset,
(mem->flags & DRM_BO_FLAG_CACHED) == 1);
if (offset & NV_CTXDMA_PAGE_MASK)
return -EINVAL;
nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT);
if (dev_priv->card_type < NV_50)
nvbe->pte_start += 2; /* skip ctxdma header */
for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) {
uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start];
if (pteval & NV_CTXDMA_PAGE_MASK) {
DRM_ERROR("Bad pteval 0x%llx\n",
(unsigned long long)pteval);
return -EINVAL;
}
if (dev_priv->card_type < NV_50) {
INSTANCE_WR(gpuobj, i, pteval | 3);
} else {
INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21);
INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000);
}
}
nvbe->is_bound = 1;
return 0;
}
static int
nouveau_sgdma_unbind(struct drm_ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
DRM_DEBUG("\n");
if (nvbe->is_bound) {
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
unsigned int pte;
pte = nvbe->pte_start;
while (pte < (nvbe->pte_start + nvbe->pages)) {
uint64_t pteval = dev_priv->gart_info.sg_dummy_bus;
if (dev_priv->card_type < NV_50) {
INSTANCE_WR(gpuobj, pte, pteval | 3);
} else {
INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21);
INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000);
}
pte++;
}
nvbe->is_bound = 0;
}
return 0;
}
static void
nouveau_sgdma_destroy(struct drm_ttm_backend *be)
{
DRM_DEBUG("\n");
if (be) {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
if (nvbe) {
if (nvbe->pagelist)
be->func->clear(be);
drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM);
}
}
}
static struct drm_ttm_backend_func nouveau_sgdma_backend = {
.needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust,
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
.bind = nouveau_sgdma_bind,
.unbind = nouveau_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
struct drm_ttm_backend *
nouveau_sgdma_init_ttm(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_sgdma_be *nvbe;
if (!dev_priv->gart_info.sg_ctxdma)
return NULL;
nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM);
if (!nvbe)
return NULL;
nvbe->dev = dev;
nvbe->backend.func = &nouveau_sgdma_backend;
return &nvbe->backend;
}
#endif
int
nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = NULL;
uint32_t aper_size, obj_size;
int i, ret;
if (dev_priv->card_type < NV_50) {
aper_size = (64 * 1024 * 1024);
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
obj_size += 8; /* ctxdma header */
} else {
/* 1 entire VM page table */
aper_size = (512 * 1024 * 1024);
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
}
if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
NVOBJ_FLAG_ALLOW_NO_REFS |
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &gpuobj))) {
DRM_ERROR("Error creating sgdma object: %d\n", ret);
return ret;
}
#ifdef __linux__
dev_priv->gart_info.sg_dummy_page =
alloc_page(GFP_KERNEL|__GFP_DMA32);
set_page_locked(dev_priv->gart_info.sg_dummy_page);
dev_priv->gart_info.sg_dummy_bus =
pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
#endif
if (dev_priv->card_type < NV_50) {
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
* confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
* on those cards? */
INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
(1 << 12) /* PT present */ |
(0 << 13) /* PT *not* linear */ |
(NV_DMA_ACCESS_RW << 14) |
(NV_DMA_TARGET_PCI << 16));
INSTANCE_WR(gpuobj, 1, aper_size - 1);
for (i=2; i<2+(aper_size>>12); i++) {
INSTANCE_WR(gpuobj, i,
dev_priv->gart_info.sg_dummy_bus | 3);
}
} else {
for (i=0; i<obj_size; i+=8) {
INSTANCE_WR(gpuobj, (i+0)/4,
dev_priv->gart_info.sg_dummy_bus | 0x21);
INSTANCE_WR(gpuobj, (i+4)/4, 0);
}
}
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
dev_priv->gart_info.aper_base = 0;
dev_priv->gart_info.aper_size = aper_size;
dev_priv->gart_info.sg_ctxdma = gpuobj;
return 0;
}
void
nouveau_sgdma_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (dev_priv->gart_info.sg_dummy_page) {
#ifdef __linux__
pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
unlock_page(dev_priv->gart_info.sg_dummy_page);
__free_page(dev_priv->gart_info.sg_dummy_page);
#endif
dev_priv->gart_info.sg_dummy_page = NULL;
dev_priv->gart_info.sg_dummy_bus = 0;
}
nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
}
#if 0
int
nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_ttm_backend *be;
struct drm_scatter_gather sgreq;
struct drm_mm_node mm_node;
struct drm_bo_mem_reg mem;
int ret;
dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
if (!dev_priv->gart_info.sg_be)
return -ENOMEM;
be = dev_priv->gart_info.sg_be;
/* Hack the aperture size down to the amount of system memory
* we're going to bind into it.
*/
if (dev_priv->gart_info.aper_size > 32*1024*1024)
dev_priv->gart_info.aper_size = 32*1024*1024;
sgreq.size = dev_priv->gart_info.aper_size;
if ((ret = drm_sg_alloc(dev, &sgreq))) {
DRM_ERROR("drm_sg_alloc failed: %d\n", ret);
return ret;
}
dev_priv->gart_info.sg_handle = sgreq.handle;
if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {
DRM_ERROR("failed populate: %d\n", ret);
return ret;
}
mm_node.start = 0;
mem.mm_node = &mm_node;
if ((ret = be->func->bind(be, &mem))) {
DRM_ERROR("failed bind: %d\n", ret);
return ret;
}
return 0;
}
void
nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev)
{
}
#endif
int
nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
int pte;
pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
if (dev_priv->card_type < NV_50) {
*page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
return 0;
}
DRM_ERROR("Unimplemented on NV50\n");
return -EINVAL;
}

1
bsd-core/nouveau_state.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_state.c

1
bsd-core/nouveau_swmthd.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_swmthd.c

1
bsd-core/nouveau_swmthd.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nouveau_swmthd.h

1
bsd-core/nv04_fb.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv04_fb.c

1
bsd-core/nv04_fifo.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv04_fifo.c

1
bsd-core/nv04_graph.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv04_graph.c

1
bsd-core/nv04_instmem.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv04_instmem.c

1
bsd-core/nv04_mc.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv04_mc.c

1
bsd-core/nv04_timer.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv04_timer.c

1
bsd-core/nv10_fb.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv10_fb.c

1
bsd-core/nv10_fifo.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv10_fifo.c

1
bsd-core/nv10_graph.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv10_graph.c

1
bsd-core/nv20_graph.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv20_graph.c

1
bsd-core/nv40_fb.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv40_fb.c

1
bsd-core/nv40_fifo.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv40_fifo.c

1
bsd-core/nv40_graph.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv40_graph.c

1
bsd-core/nv40_mc.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv40_mc.c

1
bsd-core/nv50_fifo.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv50_fifo.c

1
bsd-core/nv50_graph.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv50_graph.c

1
bsd-core/nv50_grctx.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv50_grctx.h

1
bsd-core/nv50_instmem.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv50_instmem.c

1
bsd-core/nv50_mc.c Symbolic link
View file

@ -0,0 +1 @@
../shared-core/nv50_mc.c

View file

@ -49,6 +49,7 @@ static void r128_configure(struct drm_device *dev)
DRIVER_SG | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
dev->driver->buf_priv_size = sizeof(drm_r128_buf_priv_t);
dev->driver->load = r128_driver_load;
dev->driver->preclose = r128_driver_preclose;
dev->driver->lastclose = r128_driver_lastclose;
dev->driver->get_vblank_counter = r128_get_vblank_counter;
@ -72,31 +73,36 @@ static void r128_configure(struct drm_device *dev)
}
static int
r128_probe(device_t dev)
r128_probe(device_t kdev)
{
return drm_probe(dev, r128_pciidlist);
return drm_probe(kdev, r128_pciidlist);
}
static int
r128_attach(device_t nbdev)
r128_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
r128_configure(dev);
return drm_attach(nbdev, r128_pciidlist);
return drm_attach(kdev, r128_pciidlist);
}
int r128_driver_load(struct drm_device * dev, unsigned long flags)
{
return drm_vblank_init(dev, 1);
}
static int
r128_detach(device_t nbdev)
r128_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(nbdev);
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);

View file

@ -77,31 +77,31 @@ static void radeon_configure(struct drm_device *dev)
}
static int
radeon_probe(device_t dev)
radeon_probe(device_t kdev)
{
return drm_probe(dev, radeon_pciidlist);
return drm_probe(kdev, radeon_pciidlist);
}
static int
radeon_attach(device_t nbdev)
radeon_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
radeon_configure(dev);
return drm_attach(nbdev, radeon_pciidlist);
return drm_attach(kdev, radeon_pciidlist);
}
static int
radeon_detach(device_t nbdev)
radeon_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(nbdev);
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);

View file

@ -63,31 +63,31 @@ static void savage_configure(struct drm_device *dev)
}
static int
savage_probe(device_t dev)
savage_probe(device_t kdev)
{
return drm_probe(dev, savage_pciidlist);
return drm_probe(kdev, savage_pciidlist);
}
static int
savage_attach(device_t nbdev)
savage_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
savage_configure(dev);
return drm_attach(nbdev, savage_pciidlist);
return drm_attach(kdev, savage_pciidlist);
}
static int
savage_detach(device_t nbdev)
savage_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(nbdev);
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);

View file

@ -57,31 +57,31 @@ static void sis_configure(struct drm_device *dev)
}
static int
sis_probe(device_t dev)
sis_probe(device_t kdev)
{
return drm_probe(dev, sis_pciidlist);
return drm_probe(kdev, sis_pciidlist);
}
static int
sis_attach(device_t nbdev)
sis_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
sis_configure(dev);
return drm_attach(nbdev, sis_pciidlist);
return drm_attach(kdev, sis_pciidlist);
}
static int
sis_detach(device_t nbdev)
sis_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(nbdev);
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);

View file

@ -59,31 +59,31 @@ static void tdfx_configure(struct drm_device *dev)
}
static int
tdfx_probe(device_t dev)
tdfx_probe(device_t kdev)
{
return drm_probe(dev, tdfx_pciidlist);
return drm_probe(kdev, tdfx_pciidlist);
}
static int
tdfx_attach(device_t nbdev)
tdfx_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
tdfx_configure(dev);
return drm_attach(nbdev, tdfx_pciidlist);
return drm_attach(kdev, tdfx_pciidlist);
}
static int
tdfx_detach(device_t nbdev)
tdfx_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(nbdev);
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);

View file

@ -70,31 +70,31 @@ static void via_configure(struct drm_device *dev)
}
static int
via_probe(device_t dev)
via_probe(device_t kdev)
{
return drm_probe(dev, via_pciidlist);
return drm_probe(kdev, via_pciidlist);
}
static int
via_attach(device_t nbdev)
via_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
via_configure(dev);
return drm_attach(nbdev, via_pciidlist);
return drm_attach(kdev, via_pciidlist);
}
static int
via_detach(device_t nbdev)
via_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(nbdev);
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);

View file

@ -19,7 +19,7 @@
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
AC_PREREQ(2.57)
AC_INIT([libdrm], 2.4.3, [dri-devel@lists.sourceforge.net], libdrm)
AC_INIT([libdrm], 2.4.5, [dri-devel@lists.sourceforge.net], libdrm)
AC_CONFIG_SRCDIR([Makefile.am])
AM_INIT_AUTOMAKE([dist-bzip2])
@ -42,6 +42,10 @@ AC_ARG_ENABLE(udev, AS_HELP_STRING([--enable-udev],
[Enable support for using udev instead of mknod (default: disabled)]),
[UDEV=$enableval], [UDEV=no])
AC_ARG_ENABLE(nouveau-experimental-api,
AS_HELP_STRING([--enable-nouveau-experimental-api],
[Enable support for nouveau's experimental API (default: disabled)]),
[NOUVEAU=$enableval], [NOUVEAU=no])
dnl ===========================================================================
dnl check compiler flags
@ -62,6 +66,13 @@ AC_DEFUN([LIBDRM_CC_TRY_FLAG], [
AC_MSG_RESULT([$libdrm_cc_flag])
])
dnl We use clock_gettime to check for timeouts in drmWaitVBlank
AC_CHECK_FUNCS([clock_gettime], [CLOCK_LIB=],
[AC_CHECK_LIB([rt], [clock_gettime], [CLOCK_LIB=-lrt],
[AC_MSG_ERROR([Couldn't find clock_gettime])])])
AC_SUBST([CLOCK_LIB])
dnl Use lots of warning flags with with gcc and compatible compilers
dnl Note: if you change the following variable, the cache is automatically
@ -112,13 +123,25 @@ if test "x$UDEV" = xyes; then
AC_DEFINE(UDEV, 1, [Have UDEV support])
fi
AM_CONDITIONAL(HAVE_NOUVEAU, [test "x$NOUVEAU" = xyes])
PKG_CHECK_MODULES(CAIRO, cairo, [HAVE_CAIRO=yes], [HAVE_CAIRO=no])
if test "x$HAVE_CAIRO" = xyes; then
AC_DEFINE(HAVE_CAIRO, 1, [Have cairo support])
fi
AM_CONDITIONAL(HAVE_CAIRO, [test "x$HAVE_CAIRO" = xyes])
AC_SUBST(WARN_CFLAGS)
AC_OUTPUT([
Makefile
libdrm/Makefile
libdrm/intel/Makefile
libdrm/nouveau/Makefile
libdrm/nouveau/libdrm_nouveau.pc
shared-core/Makefile
tests/Makefile
tests/modeprint/Makefile
tests/modetest/Makefile
libdrm.pc])
libdrm.pc
libdrm_intel.pc])

View file

@ -18,11 +18,16 @@
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
SUBDIRS = . intel
if HAVE_NOUVEAU
NOUVEAU_SUBDIR = nouveau
endif
SUBDIRS = . intel $(NOUVEAU_SUBDIR)
libdrm_la_LTLIBRARIES = libdrm.la
libdrm_ladir = $(libdir)
libdrm_la_LDFLAGS = -version-number 2:4:0 -no-undefined
libdrm_la_LIBADD = @CLOCK_LIB@
AM_CFLAGS = -I$(top_srcdir)/shared-core
libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c \

View file

@ -39,6 +39,7 @@ libdrm_intel_la_SOURCES = \
intel_bufmgr_priv.h \
intel_bufmgr_fake.c \
intel_bufmgr_gem.c \
intel_chipset.h \
mm.c \
mm.h

View file

@ -51,6 +51,13 @@ drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
drm_intel_bo *
drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
}
void
drm_intel_bo_reference(drm_intel_bo *bo)
{

View file

@ -75,6 +75,10 @@ struct _drm_intel_bo {
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
void drm_intel_bo_reference(drm_intel_bo *bo);
void drm_intel_bo_unreference(drm_intel_bo *bo);
int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
@ -111,6 +115,7 @@ drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
unsigned int handle);
void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
/* drm_intel_bufmgr_fake.c */

View file

@ -444,7 +444,8 @@ alloc_block(drm_intel_bo *bo)
/* Release the card storage associated with buf:
*/
static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block)
static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
int skip_dirty_copy)
{
drm_intel_bo_fake *bo_fake;
DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced);
@ -453,7 +454,11 @@ static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block)
return;
bo_fake = (drm_intel_bo_fake *)block->bo;
if (!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)) && (bo_fake->card_dirty == 1)) {
if (bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE))
skip_dirty_copy = 1;
if (!skip_dirty_copy && (bo_fake->card_dirty == 1)) {
memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
bo_fake->card_dirty = 0;
bo_fake->dirty = 1;
@ -534,7 +539,7 @@ evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
set_dirty(&bo_fake->bo);
bo_fake->block = NULL;
free_block(bufmgr_fake, block);
free_block(bufmgr_fake, block, 0);
return 1;
}
@ -557,7 +562,7 @@ evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
set_dirty(&bo_fake->bo);
bo_fake->block = NULL;
free_block(bufmgr_fake, block);
free_block(bufmgr_fake, block, 0);
return 1;
}
@ -833,7 +838,7 @@ drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr, const char *name,
bo_fake->refcount = 1;
bo_fake->id = ++bufmgr_fake->buf_nr;
bo_fake->name = name;
bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE;
bo_fake->flags = BM_PINNED;
bo_fake->is_static = 1;
DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
@ -872,7 +877,7 @@ drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
assert(bo_fake->map_count == 0);
/* No remaining references, so free it */
if (bo_fake->block)
free_block(bufmgr_fake, bo_fake->block);
free_block(bufmgr_fake, bo_fake->block, 1);
free_backing_store(bo);
for (i = 0; i < bo_fake->nr_relocs; i++)
@ -1064,7 +1069,7 @@ drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
block->on_hardware = 0;
free_block(bufmgr_fake, block);
free_block(bufmgr_fake, block, 0);
bo_fake->block = NULL;
bo_fake->validated = 0;
if (!(bo_fake->flags & BM_NO_BACKING_STORE))
@ -1463,7 +1468,7 @@ drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
/* Releases the memory, and memcpys dirty contents out if necessary. */
free_block(bufmgr_fake, block);
free_block(bufmgr_fake, block, 0);
}
pthread_mutex_unlock(&bufmgr_fake->lock);
@ -1503,6 +1508,7 @@ drm_intel_bufmgr_fake_init(int fd,
/* Hook in methods */
bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
bufmgr_fake->bufmgr.bo_alloc_for_render = drm_intel_fake_bo_alloc;
bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;

View file

@ -52,8 +52,10 @@
#include <sys/types.h>
#include "errno.h"
#include "libdrm_lists.h"
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "intel_chipset.h"
#include "string.h"
#include "i915_drm.h"
@ -66,7 +68,8 @@
typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
struct drm_intel_gem_bo_bucket {
drm_intel_bo_gem *head, **tail;
drmMMListHead head;
/**
* Limit on the number of entries in this bucket.
*
@ -99,6 +102,8 @@ typedef struct _drm_intel_bufmgr_gem {
struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
uint64_t gtt_size;
int available_fences;
int pci_device;
} drm_intel_bufmgr_gem;
struct _drm_intel_bo_gem {
@ -140,10 +145,12 @@ struct _drm_intel_bo_gem {
/** Number of entries in relocs */
int reloc_count;
/** Mapped address for the buffer, saved across map/unmap cycles */
void *virtual;
void *mem_virtual;
/** GTT virtual address for the buffer, saved across map/unmap cycles */
void *gtt_virtual;
/** free list */
drm_intel_bo_gem *next;
/** BO cache list */
drmMMListHead head;
/**
* Boolean of whether this BO and its children have been included in
@ -165,6 +172,11 @@ struct _drm_intel_bo_gem {
* the common case.
*/
int reloc_tree_size;
/**
* Number of potential fence registers required by this buffer and its
* relocations.
*/
int reloc_tree_fences;
};
static void drm_intel_gem_bo_reference_locked(drm_intel_bo *bo);
@ -315,8 +327,9 @@ drm_intel_setup_reloc_list(drm_intel_bo *bo)
}
static drm_intel_bo *
drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment,
int for_render)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
drm_intel_bo_gem *bo_gem;
@ -345,18 +358,35 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
/* Get a buffer out of the cache if available */
if (bucket != NULL && bucket->num_entries > 0) {
struct drm_i915_gem_busy busy;
bo_gem = bucket->head;
busy.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
alloc_from_cache = (ret == 0 && busy.busy == 0);
if (alloc_from_cache) {
bucket->head = bo_gem->next;
if (bo_gem->next == NULL)
bucket->tail = &bucket->head;
if (for_render) {
/* Allocate new render-target BOs from the tail (MRU)
* of the list, as it will likely be hot in the GPU cache
* and in the aperture for us.
*/
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.prev, head);
DRMLISTDEL(&bo_gem->head);
bucket->num_entries--;
alloc_from_cache = 1;
} else {
/* For non-render-target BOs (where we're probably going to map it
* first thing in order to fill it with data), check if the
* last BO in the cache is unbusy, and only reuse in that case.
* Otherwise, allocating a new buffer is probably faster than
* waiting for the GPU to finish.
*/
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
memset(&busy, 0, sizeof(busy));
busy.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
alloc_from_cache = (ret == 0 && busy.busy == 0);
if (alloc_from_cache) {
DRMLISTDEL(&bo_gem->head);
bucket->num_entries--;
}
}
}
pthread_mutex_unlock(&bufmgr_gem->lock);
@ -386,6 +416,7 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
bo_gem->refcount = 1;
bo_gem->validate_index = -1;
bo_gem->reloc_tree_size = bo_gem->bo.size;
bo_gem->reloc_tree_fences = 0;
bo_gem->used_as_reloc_target = 0;
bo_gem->tiling_mode = I915_TILING_NONE;
bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
@ -396,6 +427,20 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
return &bo_gem->bo;
}
static drm_intel_bo *
drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment, 1);
}
static drm_intel_bo *
drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment, 0);
}
/**
* Returns a drm_intel_bo wrapping the given buffer object handle.
*
@ -435,6 +480,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
bo_gem->gem_handle = open_arg.handle;
bo_gem->global_name = handle;
memset(&get_tiling, 0, sizeof(get_tiling));
get_tiling.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
if (ret != 0) {
@ -443,6 +489,10 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
}
bo_gem->tiling_mode = get_tiling.tiling_mode;
bo_gem->swizzle_mode = get_tiling.swizzle_mode;
if (bo_gem->tiling_mode == I915_TILING_NONE)
bo_gem->reloc_tree_fences = 0;
else
bo_gem->reloc_tree_fences = 1;
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
@ -476,10 +526,13 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
struct drm_gem_close close;
int ret;
if (bo_gem->virtual)
munmap (bo_gem->virtual, bo_gem->bo.size);
if (bo_gem->mem_virtual)
munmap (bo_gem->mem_virtual, bo_gem->bo.size);
if (bo_gem->gtt_virtual)
munmap (bo_gem->gtt_virtual, bo_gem->bo.size);
/* Close this object */
memset(&close, 0, sizeof(close));
close.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
if (ret != 0) {
@ -529,9 +582,7 @@ drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
bo_gem->reloc_target_bo = NULL;
bo_gem->reloc_count = 0;
bo_gem->next = NULL;
*bucket->tail = bo_gem;
bucket->tail = &bo_gem->next;
DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
bucket->num_entries++;
} else {
drm_intel_gem_bo_free(bo);
@ -562,7 +613,7 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
/* Allow recursive mapping. Mesa may recursively map buffers with
* nested display loops.
*/
if (!bo_gem->virtual) {
if (!bo_gem->mem_virtual) {
struct drm_i915_gem_mmap mmap_arg;
DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
@ -579,12 +630,12 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
bo_gem->mem_virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
bo_gem->swrast = 0;
}
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
bo_gem->virtual);
bo->virtual = bo_gem->virtual;
bo_gem->mem_virtual);
bo->virtual = bo_gem->mem_virtual;
if (bo_gem->global_name != 0 || !bo_gem->swrast) {
set_domain.handle = bo_gem->gem_handle;
@ -622,7 +673,7 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
pthread_mutex_lock(&bufmgr_gem->lock);
/* Get a mapping of the buffer if we haven't before. */
if (bo_gem->virtual == NULL) {
if (bo_gem->gtt_virtual == NULL) {
struct drm_i915_gem_mmap_gtt mmap_arg;
DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
@ -643,10 +694,10 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
}
/* and mmap it */
bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
MAP_SHARED, bufmgr_gem->fd,
mmap_arg.offset);
if (bo_gem->virtual == MAP_FAILED) {
bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
MAP_SHARED, bufmgr_gem->fd,
mmap_arg.offset);
if (bo_gem->gtt_virtual == MAP_FAILED) {
fprintf(stderr,
"%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__,
@ -657,10 +708,10 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
}
}
bo->virtual = bo_gem->virtual;
bo->virtual = bo_gem->gtt_virtual;
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
bo_gem->virtual);
bo_gem->gtt_virtual);
/* Now move it to the GTT domain so that the CPU caches are flushed */
set_domain.handle = bo_gem->gem_handle;
@ -672,7 +723,7 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
fprintf (stderr, "%s:%d: Error setting domain %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
}
@ -681,6 +732,26 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
return 0;
}
int
drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_i915_gem_sw_finish sw_finish;
int ret = 0;
if (bo == NULL)
return 0;
assert(bo_gem->gtt_virtual != NULL);
pthread_mutex_lock(&bufmgr_gem->lock);
bo->virtual = NULL;
pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
static int
drm_intel_gem_bo_unmap(drm_intel_bo *bo)
{
@ -692,7 +763,7 @@ drm_intel_gem_bo_unmap(drm_intel_bo *bo)
if (bo == NULL)
return 0;
assert(bo_gem->virtual != NULL);
assert(bo_gem->mem_virtual != NULL);
pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->swrast) {
@ -703,6 +774,7 @@ drm_intel_gem_bo_unmap(drm_intel_bo *bo)
} while (ret == -1 && errno == EINTR);
bo_gem->swrast = 0;
}
bo->virtual = NULL;
pthread_mutex_unlock(&bufmgr_gem->lock);
return 0;
}
@ -811,10 +883,9 @@ drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
drm_intel_bo_gem *bo_gem;
while ((bo_gem = bucket->head) != NULL) {
bucket->head = bo_gem->next;
if (bo_gem->next == NULL)
bucket->tail = &bucket->head;
while (!DRMLISTEMPTY(&bucket->head)) {
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
DRMLISTDEL(&bo_gem->head);
bucket->num_entries--;
drm_intel_gem_bo_free(&bo_gem->bo);
@ -860,6 +931,7 @@ drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
*/
assert(!bo_gem->used_as_reloc_target);
bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
/* Flag the target to disallow further relocations in it. */
target_bo_gem->used_as_reloc_target = 1;
@ -996,10 +1068,14 @@ drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
struct drm_i915_gem_pin pin;
int ret;
memset(&pin, 0, sizeof(pin));
pin.handle = bo_gem->gem_handle;
pin.alignment = alignment;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
do {
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
} while (ret == -1 && errno == EINTR);
if (ret != 0)
return -errno;
@ -1015,6 +1091,7 @@ drm_intel_gem_bo_unpin(drm_intel_bo *bo)
struct drm_i915_gem_unpin unpin;
int ret;
memset(&unpin, 0, sizeof(unpin));
unpin.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
@ -1036,6 +1113,11 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
return 0;
/* If we're going from non-tiling to tiling, bump fence count */
if (bo_gem->tiling_mode == I915_TILING_NONE)
bo_gem->reloc_tree_fences++;
memset(&set_tiling, 0, sizeof(set_tiling));
set_tiling.handle = bo_gem->gem_handle;
set_tiling.tiling_mode = *tiling_mode;
set_tiling.stride = stride;
@ -1048,6 +1130,10 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
bo_gem->tiling_mode = set_tiling.tiling_mode;
bo_gem->swizzle_mode = set_tiling.swizzle_mode;
/* If we're going from tiling to non-tiling, drop fence count */
if (bo_gem->tiling_mode == I915_TILING_NONE)
bo_gem->reloc_tree_fences--;
*tiling_mode = bo_gem->tiling_mode;
return 0;
}
@ -1072,6 +1158,7 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t *name)
int ret;
if (!bo_gem->global_name) {
memset(&flink, 0, sizeof(flink));
flink.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
@ -1125,6 +1212,31 @@ drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
return total;
}
/**
* Count the number of buffers in this list that need a fence reg
*
* If the count is greater than the number of available regs, we'll have
* to ask the caller to resubmit a batch with fewer tiled buffers.
*
* This function over-counts if the same buffer is used multiple times.
*/
static unsigned int
drm_intel_gem_total_fences(drm_intel_bo **bo_array, int count)
{
int i;
unsigned int total = 0;
for (i = 0; i < count; i++) {
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
if (bo_gem == NULL)
continue;
total += bo_gem->reloc_tree_fences;
}
return total;
}
/**
* Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
* for the next drm_intel_bufmgr_check_aperture_space() call.
@ -1173,8 +1285,21 @@ drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
int i;
unsigned int total = 0;
for (i = 0; i < count; i++)
for (i = 0; i < count; i++) {
total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
/* For the first buffer object in the array, we get an accurate count
* back for its reloc_tree size (since nothing had been flagged as
* being counted yet). We can save that value out as a more
* conservative reloc_tree_size that avoids double-counting target
* buffers. Since the first buffer happens to usually be the batch
* buffer in our callers, this can pull us back from doing the tree
* walk on every new batch emit.
*/
if (i == 0) {
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
bo_gem->reloc_tree_size = total;
}
}
for (i = 0; i < count; i++)
drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
@ -1203,9 +1328,17 @@ drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo_array[0]->bufmgr;
unsigned int total = 0;
unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
int total_fences;
/* Check for fence reg constraints if necessary */
if (bufmgr_gem->available_fences) {
total_fences = drm_intel_gem_total_fences(bo_array, count);
if (total_fences > bufmgr_gem->available_fences)
return -1;
}
total = drm_intel_gem_estimate_batch_space(bo_array, count);
if (total > threshold)
total = drm_intel_gem_compute_batch_space(bo_array, count);
@ -1231,6 +1364,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
{
drm_intel_bufmgr_gem *bufmgr_gem;
struct drm_i915_gem_get_aperture aperture;
drm_i915_getparam_t gp;
int ret, i;
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
@ -1254,6 +1388,25 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
(int)bufmgr_gem->gtt_size / 1024);
}
gp.param = I915_PARAM_CHIPSET_ID;
gp.value = &bufmgr_gem->pci_device;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret) {
fprintf(stderr, "get chip id failed: %d\n", ret);
fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
}
if (!IS_I965G(bufmgr_gem)) {
gp.param = I915_PARAM_NUM_FENCES_AVAIL;
gp.value = &bufmgr_gem->available_fences;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret) {
fprintf(stderr, "get fences failed: %d\n", ret);
fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
bufmgr_gem->available_fences = 0;
}
}
/* Let's go with one relocation per every 2 dwords (but round down a bit
* since a power of two will mean an extra page allocation for the reloc
* buffer).
@ -1263,6 +1416,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
bufmgr_gem->bufmgr.bo_alloc_for_render = drm_intel_gem_bo_alloc_for_render;
bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
@ -1282,7 +1436,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space;
/* Initialize the linked lists for BO reuse cache. */
for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++)
bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
return &bufmgr_gem->bufmgr;
}

View file

@ -51,6 +51,16 @@ struct _drm_intel_bufmgr {
drm_intel_bo *(*bo_alloc)(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
/**
* Allocate a buffer object, hinting that it will be used as a render target.
*
* This is otherwise the same as bo_alloc.
*/
drm_intel_bo *(*bo_alloc_for_render)(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
/** Takes a reference on a buffer object */
void (*bo_reference)(drm_intel_bo *bo);

View file

@ -0,0 +1,71 @@
/*
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _INTEL_CHIPSET_H
#define _INTEL_CHIPSET_H
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
#define IS_I855(dev) ((dev)->pci_device == 0x3582)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
(dev)->pci_device == 0x27AE)
#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
(dev)->pci_device == 0x2982 || \
(dev)->pci_device == 0x2992 || \
(dev)->pci_device == 0x29A2 || \
(dev)->pci_device == 0x2A02 || \
(dev)->pci_device == 0x2A12 || \
(dev)->pci_device == 0x2A42 || \
(dev)->pci_device == 0x2E02 || \
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22)
#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
(dev)->pci_device == 0x2E12 || \
(dev)->pci_device == 0x2E22)
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
(dev)->pci_device == 0x29B2 || \
(dev)->pci_device == 0x29D2)
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
#endif /* _INTEL_CHIPSET_H */

View file

@ -29,6 +29,8 @@
* list handling. No list looping yet.
*/
#include <stddef.h>
typedef struct _drmMMListHead
{
struct _drmMMListHead *prev;

View file

@ -0,0 +1,42 @@
AM_CFLAGS = \
$(WARN_CFLAGS) \
-I$(top_srcdir)/libdrm \
-I$(top_srcdir)/libdrm/nouveau \
$(PTHREADSTUBS_CFLAGS) \
-I$(top_srcdir)/shared-core
libdrm_nouveau_la_LTLIBRARIES = libdrm_nouveau.la
libdrm_nouveau_ladir = $(libdir)
libdrm_nouveau_la_LDFLAGS = -version-number 1:0:0 -no-undefined
libdrm_nouveau_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
libdrm_nouveau_la_SOURCES = \
nouveau_device.c \
nouveau_channel.c \
nouveau_pushbuf.c \
nouveau_grobj.c \
nouveau_notifier.c \
nouveau_bo.c \
nouveau_resource.c \
nouveau_dma.c \
nouveau_fence.c \
nouveau_dma.h \
nouveau_private.h
libdrm_nouveaucommonincludedir = ${includedir}/nouveau
libdrm_nouveaucommoninclude_HEADERS = \
nouveau_device.h \
nouveau_channel.h \
nouveau_grobj.h \
nouveau_notifier.h \
nouveau_pushbuf.h \
nouveau_bo.h \
nouveau_resource.h \
nouveau_class.h
libdrm_nouveauincludedir = ${includedir}/drm
libdrm_nouveauinclude_HEADERS = \
nouveau_drmif.h
pkgconfigdir = @pkgconfigdir@
pkgconfig_DATA = libdrm_nouveau.pc

View file

@ -0,0 +1,10 @@
prefix=@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
includedir=@includedir@
Name: libdrm_nouveau
Description: Userspace interface to nouveau kernel DRM services
Version: 0.5
Libs: -L${libdir} -ldrm_nouveau
Cflags: -I${includedir} -I${includedir}/drm -I${includedir}/nouveau

848
libdrm/nouveau/nouveau_bo.c Normal file
View file

@ -0,0 +1,848 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdint.h>
#include <stdlib.h>
#include <errno.h>
#include <assert.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include "nouveau_private.h"
int
nouveau_bo_init(struct nouveau_device *dev)
{
return 0;
}
void
nouveau_bo_takedown(struct nouveau_device *dev)
{
}
static int
nouveau_bo_allocated(struct nouveau_bo_priv *nvbo)
{
if (nvbo->sysmem || nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN))
return 1;
return 0;
}
static int
nouveau_bo_ualloc(struct nouveau_bo_priv *nvbo)
{
if (nvbo->user || nvbo->sysmem) {
assert(nvbo->sysmem);
return 0;
}
nvbo->sysmem = malloc(nvbo->size);
if (!nvbo->sysmem)
return -ENOMEM;
return 0;
}
static void
nouveau_bo_ufree(struct nouveau_bo_priv *nvbo)
{
if (nvbo->sysmem) {
if (!nvbo->user)
free(nvbo->sysmem);
nvbo->sysmem = NULL;
}
}
static void
nouveau_bo_kfree_nomm(struct nouveau_bo_priv *nvbo)
{
struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
struct drm_nouveau_mem_free req;
if (nvbo->map) {
drmUnmap(nvbo->map, nvbo->size);
nvbo->map = NULL;
}
req.offset = nvbo->offset;
if (nvbo->domain & NOUVEAU_BO_GART)
req.flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI;
else
if (nvbo->domain & NOUVEAU_BO_VRAM)
req.flags = NOUVEAU_MEM_FB;
drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_FREE, &req, sizeof(req));
nvbo->handle = 0;
}
static void
nouveau_bo_kfree(struct nouveau_bo_priv *nvbo)
{
struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
struct drm_gem_close req;
if (!nvbo->handle)
return;
if (!nvdev->mm_enabled) {
nouveau_bo_kfree_nomm(nvbo);
return;
}
if (nvbo->map) {
munmap(nvbo->map, nvbo->size);
nvbo->map = NULL;
}
req.handle = nvbo->handle;
nvbo->handle = 0;
ioctl(nvdev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
static int
nouveau_bo_kalloc_nomm(struct nouveau_bo_priv *nvbo)
{
struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
struct drm_nouveau_mem_alloc req;
int ret;
if (nvbo->handle)
return 0;
if (!(nvbo->flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART)))
nvbo->flags |= (NOUVEAU_BO_GART | NOUVEAU_BO_VRAM);
req.size = nvbo->size;
req.alignment = nvbo->align;
req.flags = 0;
if (nvbo->flags & NOUVEAU_BO_VRAM)
req.flags |= NOUVEAU_MEM_FB;
if (nvbo->flags & NOUVEAU_BO_GART)
req.flags |= (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI);
if (nvbo->flags & NOUVEAU_BO_TILED) {
req.flags |= NOUVEAU_MEM_TILE;
if (nvbo->flags & NOUVEAU_BO_ZTILE)
req.flags |= NOUVEAU_MEM_TILE_ZETA;
}
req.flags |= NOUVEAU_MEM_MAPPED;
ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_MEM_ALLOC,
&req, sizeof(req));
if (ret)
return ret;
nvbo->handle = req.map_handle;
nvbo->size = req.size;
nvbo->offset = req.offset;
if (req.flags & (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI))
nvbo->domain = NOUVEAU_BO_GART;
else
if (req.flags & NOUVEAU_MEM_FB)
nvbo->domain = NOUVEAU_BO_VRAM;
return 0;
}
static int
nouveau_bo_kalloc(struct nouveau_bo_priv *nvbo, struct nouveau_channel *chan)
{
struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
struct drm_nouveau_gem_new req;
int ret;
if (nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN))
return 0;
if (!nvdev->mm_enabled)
return nouveau_bo_kalloc_nomm(nvbo);
req.channel_hint = chan ? chan->id : 0;
req.size = nvbo->size;
req.align = nvbo->align;
req.domain = 0;
if (nvbo->flags & NOUVEAU_BO_VRAM)
req.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
if (nvbo->flags & NOUVEAU_BO_GART)
req.domain |= NOUVEAU_GEM_DOMAIN_GART;
if (nvbo->flags & NOUVEAU_BO_TILED) {
req.domain |= NOUVEAU_GEM_DOMAIN_TILE;
if (nvbo->flags & NOUVEAU_BO_ZTILE)
req.domain |= NOUVEAU_GEM_DOMAIN_TILE_ZETA;
}
if (!req.domain) {
req.domain |= (NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART);
}
ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_NEW,
&req, sizeof(req));
if (ret)
return ret;
nvbo->handle = nvbo->base.handle = req.handle;
nvbo->size = req.size;
nvbo->domain = req.domain;
nvbo->offset = req.offset;
return 0;
}
static int
nouveau_bo_kmap_nomm(struct nouveau_bo_priv *nvbo)
{
struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
int ret;
ret = drmMap(nvdev->fd, nvbo->handle, nvbo->size, &nvbo->map);
if (ret) {
nvbo->map = NULL;
return ret;
}
return 0;
}
static int
nouveau_bo_kmap(struct nouveau_bo_priv *nvbo)
{
struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
struct drm_nouveau_gem_mmap req;
int ret;
if (nvbo->map)
return 0;
if (!nvbo->handle)
return -EINVAL;
if (!nvdev->mm_enabled)
return nouveau_bo_kmap_nomm(nvbo);
req.handle = nvbo->handle;
ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_MMAP,
&req, sizeof(req));
if (ret)
return ret;
nvbo->map = (void *)(unsigned long)req.vaddr;
return 0;
}
int
nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align,
int size, struct nouveau_bo **bo)
{
struct nouveau_bo_priv *nvbo;
int ret;
if (!dev || !bo || *bo)
return -EINVAL;
nvbo = calloc(1, sizeof(struct nouveau_bo_priv));
if (!nvbo)
return -ENOMEM;
nvbo->base.device = dev;
nvbo->base.size = size;
nvbo->refcount = 1;
/* Don't set NOUVEAU_BO_PIN here, or nouveau_bo_allocated() will
* decided the buffer's already allocated when it's not. The
* call to nouveau_bo_pin() later will set this flag.
*/
nvbo->flags = (flags & ~NOUVEAU_BO_PIN);
nvbo->size = size;
nvbo->align = align;
/*XXX: murder me violently */
if (flags & NOUVEAU_BO_TILED) {
nvbo->base.tiled = 1;
if (flags & NOUVEAU_BO_ZTILE)
nvbo->base.tiled |= 2;
}
if (flags & NOUVEAU_BO_PIN) {
ret = nouveau_bo_pin((void *)nvbo, nvbo->flags);
if (ret) {
nouveau_bo_ref(NULL, (void *)nvbo);
return ret;
}
}
*bo = &nvbo->base;
return 0;
}
int
nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size,
struct nouveau_bo **bo)
{
struct nouveau_bo_priv *nvbo;
int ret;
ret = nouveau_bo_new(dev, 0, 0, size, bo);
if (ret)
return ret;
nvbo = nouveau_bo(*bo);
nvbo->sysmem = ptr;
nvbo->user = 1;
return 0;
}
int
nouveau_bo_fake(struct nouveau_device *dev, uint64_t offset, uint32_t flags,
uint32_t size, void *map, struct nouveau_bo **bo)
{
struct nouveau_bo_priv *nvbo;
int ret;
ret = nouveau_bo_new(dev, flags & ~NOUVEAU_BO_PIN, 0, size, bo);
if (ret)
return ret;
nvbo = nouveau_bo(*bo);
nvbo->flags = flags | NOUVEAU_BO_PIN;
nvbo->domain = (flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART));
nvbo->offset = offset;
nvbo->size = nvbo->base.size = size;
nvbo->map = map;
nvbo->base.flags = nvbo->flags;
nvbo->base.offset = nvbo->offset;
return 0;
}
int
nouveau_bo_handle_get(struct nouveau_bo *bo, uint32_t *handle)
{
struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
int ret;
if (!bo || !handle)
return -EINVAL;
if (!nvbo->global_handle) {
struct drm_gem_flink req;
ret = nouveau_bo_kalloc(nvbo, NULL);
if (ret)
return ret;
if (nvdev->mm_enabled) {
req.handle = nvbo->handle;
ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_FLINK, &req);
if (ret) {
nouveau_bo_kfree(nvbo);
return ret;
}
nvbo->global_handle = req.name;
} else {
nvbo->global_handle = nvbo->offset;
}
}
*handle = nvbo->global_handle;
return 0;
}
int
nouveau_bo_handle_ref(struct nouveau_device *dev, uint32_t handle,
struct nouveau_bo **bo)
{
struct nouveau_device_priv *nvdev = nouveau_device(dev);
struct nouveau_bo_priv *nvbo;
struct drm_gem_open req;
int ret;
ret = nouveau_bo_new(dev, 0, 0, 0, bo);
if (ret)
return ret;
nvbo = nouveau_bo(*bo);
if (!nvdev->mm_enabled) {
nvbo->handle = 0;
nvbo->offset = handle;
nvbo->domain = NOUVEAU_BO_VRAM;
nvbo->flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_PIN;
nvbo->base.offset = nvbo->offset;
nvbo->base.flags = nvbo->flags;
} else {
req.name = handle;
ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_OPEN, &req);
if (ret) {
nouveau_bo_ref(NULL, bo);
return ret;
}
nvbo->size = req.size;
nvbo->handle = req.handle;
}
return 0;
}
static void
nouveau_bo_del_cb(void *priv)
{
struct nouveau_bo_priv *nvbo = priv;
nouveau_fence_ref(NULL, &nvbo->fence);
nouveau_fence_ref(NULL, &nvbo->wr_fence);
nouveau_bo_kfree(nvbo);
free(nvbo);
}
static void
nouveau_bo_del(struct nouveau_bo **bo)
{
struct nouveau_bo_priv *nvbo;
if (!bo || !*bo)
return;
nvbo = nouveau_bo(*bo);
*bo = NULL;
if (--nvbo->refcount)
return;
if (nvbo->pending) {
nvbo->pending = NULL;
nouveau_pushbuf_flush(nvbo->pending_channel, 0);
}
nouveau_bo_ufree(nvbo);
if (!nouveau_device(nvbo->base.device)->mm_enabled && nvbo->fence) {
nouveau_fence_flush(nvbo->fence->channel);
if (nouveau_fence(nvbo->fence)->signalled) {
nouveau_bo_del_cb(nvbo);
} else {
nouveau_fence_signal_cb(nvbo->fence,
nouveau_bo_del_cb, nvbo);
}
} else {
nouveau_bo_del_cb(nvbo);
}
}
int
nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pbo)
{
if (!pbo)
return -EINVAL;
if (ref)
nouveau_bo(ref)->refcount++;
if (*pbo)
nouveau_bo_del(pbo);
*pbo = ref;
return 0;
}
static int
nouveau_bo_wait_nomm(struct nouveau_bo *bo, int cpu_write)
{
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
int ret = 0;
if (cpu_write)
ret = nouveau_fence_wait(&nvbo->fence);
else
ret = nouveau_fence_wait(&nvbo->wr_fence);
if (ret)
return ret;
nvbo->write_marker = 0;
return 0;
}
static int
nouveau_bo_wait(struct nouveau_bo *bo, int cpu_write)
{
struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
struct drm_nouveau_gem_cpu_prep req;
int ret;
if (!nvbo->global_handle && !nvbo->write_marker && !cpu_write)
return 0;
if (nvbo->pending &&
(nvbo->pending->write_domains || cpu_write)) {
nvbo->pending = NULL;
nouveau_pushbuf_flush(nvbo->pending_channel, 0);
}
if (!nvdev->mm_enabled)
return nouveau_bo_wait_nomm(bo, cpu_write);
req.handle = nvbo->handle;
ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_PREP,
&req, sizeof(req));
if (ret)
return ret;
nvbo->write_marker = 0;
return 0;
}
int
nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags)
{
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
int ret;
if (!nvbo || bo->map)
return -EINVAL;
if (!nouveau_bo_allocated(nvbo)) {
if (nvbo->flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)) {
ret = nouveau_bo_kalloc(nvbo, NULL);
if (ret)
return ret;
}
if (!nouveau_bo_allocated(nvbo)) {
ret = nouveau_bo_ualloc(nvbo);
if (ret)
return ret;
}
}
if (nvbo->sysmem) {
bo->map = nvbo->sysmem;
} else {
ret = nouveau_bo_kmap(nvbo);
if (ret)
return ret;
ret = nouveau_bo_wait(bo, (flags & NOUVEAU_BO_WR));
if (ret)
return ret;
bo->map = nvbo->map;
}
return 0;
}
void
nouveau_bo_unmap(struct nouveau_bo *bo)
{
struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
if (nvdev->mm_enabled && bo->map && !nvbo->sysmem) {
struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
struct drm_nouveau_gem_cpu_fini req;
req.handle = nvbo->handle;
drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_FINI,
&req, sizeof(req));
}
bo->map = NULL;
}
int
nouveau_bo_validate_nomm(struct nouveau_bo_priv *nvbo, uint32_t flags)
{
struct nouveau_bo *new = NULL;
uint32_t t_handle, t_domain, t_offset, t_size;
void *t_map;
int ret;
if ((flags & NOUVEAU_BO_VRAM) && nvbo->domain == NOUVEAU_BO_VRAM)
return 0;
if ((flags & NOUVEAU_BO_GART) && nvbo->domain == NOUVEAU_BO_GART)
return 0;
assert(flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART));
/* Keep tiling info */
flags |= (nvbo->flags & (NOUVEAU_BO_TILED|NOUVEAU_BO_ZTILE));
ret = nouveau_bo_new(nvbo->base.device, flags, 0, nvbo->size, &new);
if (ret)
return ret;
ret = nouveau_bo_kalloc(nouveau_bo(new), NULL);
if (ret) {
nouveau_bo_ref(NULL, &new);
return ret;
}
if (nvbo->handle || nvbo->sysmem) {
nouveau_bo_kmap(nouveau_bo(new));
if (!nvbo->base.map) {
nouveau_bo_map(&nvbo->base, NOUVEAU_BO_RD);
memcpy(nouveau_bo(new)->map, nvbo->base.map, nvbo->base.size);
nouveau_bo_unmap(&nvbo->base);
} else {
memcpy(nouveau_bo(new)->map, nvbo->base.map, nvbo->base.size);
}
}
t_handle = nvbo->handle;
t_domain = nvbo->domain;
t_offset = nvbo->offset;
t_size = nvbo->size;
t_map = nvbo->map;
nvbo->handle = nouveau_bo(new)->handle;
nvbo->domain = nouveau_bo(new)->domain;
nvbo->offset = nouveau_bo(new)->offset;
nvbo->size = nouveau_bo(new)->size;
nvbo->map = nouveau_bo(new)->map;
nouveau_bo(new)->handle = t_handle;
nouveau_bo(new)->domain = t_domain;
nouveau_bo(new)->offset = t_offset;
nouveau_bo(new)->size = t_size;
nouveau_bo(new)->map = t_map;
nouveau_bo_ref(NULL, &new);
return 0;
}
static int
nouveau_bo_pin_nomm(struct nouveau_bo *bo, uint32_t flags)
{
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
int ret;
if (!nvbo->handle) {
if (!(flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)))
return -EINVAL;
ret = nouveau_bo_validate_nomm(nvbo, flags & ~NOUVEAU_BO_PIN);
if (ret)
return ret;
}
nvbo->pinned = 1;
/* Fill in public nouveau_bo members */
bo->flags = nvbo->domain;
bo->offset = nvbo->offset;
return 0;
}
int
nouveau_bo_pin(struct nouveau_bo *bo, uint32_t flags)
{
struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
struct drm_nouveau_gem_pin req;
int ret;
if (nvbo->pinned)
return 0;
if (!nvdev->mm_enabled)
return nouveau_bo_pin_nomm(bo, flags);
/* Ensure we have a kernel object... */
if (!nvbo->handle) {
if (!(flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)))
return -EINVAL;
nvbo->flags = flags;
ret = nouveau_bo_kalloc(nvbo, NULL);
if (ret)
return ret;
}
/* Now force it to stay put :) */
req.handle = nvbo->handle;
req.domain = 0;
if (nvbo->flags & NOUVEAU_BO_VRAM)
req.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
if (nvbo->flags & NOUVEAU_BO_GART)
req.domain |= NOUVEAU_GEM_DOMAIN_GART;
ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_PIN, &req,
sizeof(struct drm_nouveau_gem_pin));
if (ret)
return ret;
nvbo->offset = req.offset;
nvbo->domain = req.domain;
nvbo->pinned = 1;
nvbo->flags |= NOUVEAU_BO_PIN;
/* Fill in public nouveau_bo members */
if (nvbo->domain & NOUVEAU_GEM_DOMAIN_VRAM)
bo->flags = NOUVEAU_BO_VRAM;
if (nvbo->domain & NOUVEAU_GEM_DOMAIN_GART)
bo->flags = NOUVEAU_BO_GART;
bo->offset = nvbo->offset;
return 0;
}
void
nouveau_bo_unpin(struct nouveau_bo *bo)
{
struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
struct drm_nouveau_gem_unpin req;
if (!nvbo->pinned)
return;
if (nvdev->mm_enabled) {
req.handle = nvbo->handle;
drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_UNPIN,
&req, sizeof(req));
}
nvbo->pinned = bo->offset = bo->flags = 0;
}
int
nouveau_bo_tile(struct nouveau_bo *bo, uint32_t flags, uint32_t delta,
uint32_t size)
{
struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
uint32_t kern_flags = 0;
int ret = 0;
if (flags & NOUVEAU_BO_TILED) {
kern_flags |= NOUVEAU_MEM_TILE;
if (flags & NOUVEAU_BO_ZTILE)
kern_flags |= NOUVEAU_MEM_TILE_ZETA;
}
if (nvdev->mm_enabled) {
struct drm_nouveau_gem_tile req;
req.handle = nvbo->handle;
req.delta = delta;
req.size = size;
req.flags = kern_flags;
ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_TILE,
&req, sizeof(req));
} else {
struct drm_nouveau_mem_tile req;
req.offset = nvbo->offset;
req.delta = delta;
req.size = size;
req.flags = kern_flags;
if (flags & NOUVEAU_BO_VRAM)
req.flags |= NOUVEAU_MEM_FB;
if (flags & NOUVEAU_BO_GART)
req.flags |= NOUVEAU_MEM_AGP;
ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_TILE,
&req, sizeof(req));
}
return 0;
}
int
nouveau_bo_busy(struct nouveau_bo *bo, uint32_t access)
{
struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
if (!nvdev->mm_enabled) {
struct nouveau_fence *fence;
if (nvbo->pending && (nvbo->pending->write_domains ||
(access & NOUVEAU_BO_WR)))
return 1;
if (access & NOUVEAU_BO_WR)
fence = nvbo->fence;
else
fence = nvbo->wr_fence;
return !nouveau_fence(fence)->signalled;
}
return 1;
}
struct drm_nouveau_gem_pushbuf_bo *
nouveau_bo_emit_buffer(struct nouveau_channel *chan, struct nouveau_bo *bo)
{
struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
struct drm_nouveau_gem_pushbuf_bo *pbbo;
struct nouveau_bo *ref = NULL;
int ret;
if (nvbo->pending)
return nvbo->pending;
if (!nvbo->handle) {
ret = nouveau_bo_kalloc(nvbo, chan);
if (ret)
return NULL;
if (nvbo->sysmem) {
void *sysmem_tmp = nvbo->sysmem;
nvbo->sysmem = NULL;
ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
if (ret)
return NULL;
nvbo->sysmem = sysmem_tmp;
memcpy(bo->map, nvbo->sysmem, nvbo->base.size);
nouveau_bo_unmap(bo);
nouveau_bo_ufree(nvbo);
}
}
if (nvpb->nr_buffers >= NOUVEAU_PUSHBUF_MAX_BUFFERS)
return NULL;
pbbo = nvpb->buffers + nvpb->nr_buffers++;
nvbo->pending = pbbo;
nvbo->pending_channel = chan;
nouveau_bo_ref(bo, &ref);
pbbo->user_priv = (uint64_t)(unsigned long)ref;
pbbo->handle = nvbo->handle;
pbbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART;
pbbo->read_domains = 0;
pbbo->write_domains = 0;
pbbo->presumed_domain = nvbo->domain;
pbbo->presumed_offset = nvbo->offset;
pbbo->presumed_ok = 1;
return pbbo;
}

View file

@ -0,0 +1,97 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NOUVEAU_BO_H__
#define __NOUVEAU_BO_H__
/* Relocation/Buffer type flags */
#define NOUVEAU_BO_VRAM (1 << 0)
#define NOUVEAU_BO_GART (1 << 1)
#define NOUVEAU_BO_RD (1 << 2)
#define NOUVEAU_BO_WR (1 << 3)
#define NOUVEAU_BO_RDWR (NOUVEAU_BO_RD | NOUVEAU_BO_WR)
#define NOUVEAU_BO_MAP (1 << 4)
#define NOUVEAU_BO_PIN (1 << 5)
#define NOUVEAU_BO_LOW (1 << 6)
#define NOUVEAU_BO_HIGH (1 << 7)
#define NOUVEAU_BO_OR (1 << 8)
#define NOUVEAU_BO_LOCAL (1 << 9)
#define NOUVEAU_BO_TILED (1 << 10)
#define NOUVEAU_BO_ZTILE (1 << 11)
#define NOUVEAU_BO_DUMMY (1 << 31)
struct nouveau_bo {
struct nouveau_device *device;
uint32_t handle;
uint64_t size;
void *map;
int tiled;
/* Available when buffer is pinned *only* */
uint32_t flags;
uint64_t offset;
};
int
nouveau_bo_new(struct nouveau_device *, uint32_t flags, int align, int size,
struct nouveau_bo **);
int
nouveau_bo_user(struct nouveau_device *, void *ptr, int size,
struct nouveau_bo **);
int
nouveau_bo_fake(struct nouveau_device *dev, uint64_t offset, uint32_t flags,
uint32_t size, void *map, struct nouveau_bo **);
int
nouveau_bo_handle_get(struct nouveau_bo *, uint32_t *);
int
nouveau_bo_handle_ref(struct nouveau_device *, uint32_t handle,
struct nouveau_bo **);
int
nouveau_bo_ref(struct nouveau_bo *, struct nouveau_bo **);
int
nouveau_bo_map(struct nouveau_bo *, uint32_t flags);
void
nouveau_bo_unmap(struct nouveau_bo *);
int
nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
void
nouveau_bo_unpin(struct nouveau_bo *);
int
nouveau_bo_tile(struct nouveau_bo *, uint32_t flags, uint32_t delta,
uint32_t size);
int
nouveau_bo_busy(struct nouveau_bo *, uint32_t access);
#endif

View file

@ -0,0 +1,178 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "nouveau_private.h"
int
nouveau_channel_alloc(struct nouveau_device *dev, uint32_t fb_ctxdma,
uint32_t tt_ctxdma, struct nouveau_channel **chan)
{
struct nouveau_device_priv *nvdev = nouveau_device(dev);
struct nouveau_channel_priv *nvchan;
unsigned i;
int ret;
if (!nvdev || !chan || *chan)
return -EINVAL;
nvchan = calloc(1, sizeof(struct nouveau_channel_priv));
if (!nvchan)
return -ENOMEM;
nvchan->base.device = dev;
nvchan->drm.fb_ctxdma_handle = fb_ctxdma;
nvchan->drm.tt_ctxdma_handle = tt_ctxdma;
ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_CHANNEL_ALLOC,
&nvchan->drm, sizeof(nvchan->drm));
if (ret) {
free(nvchan);
return ret;
}
nvchan->base.id = nvchan->drm.channel;
if (nouveau_grobj_ref(&nvchan->base, nvchan->drm.fb_ctxdma_handle,
&nvchan->base.vram) ||
nouveau_grobj_ref(&nvchan->base, nvchan->drm.tt_ctxdma_handle,
&nvchan->base.gart)) {
nouveau_channel_free((void *)&nvchan);
return -EINVAL;
}
/* Mark all DRM-assigned subchannels as in-use */
for (i = 0; i < nvchan->drm.nr_subchan; i++) {
struct nouveau_grobj_priv *gr = calloc(1, sizeof(*gr));
gr->base.bound = NOUVEAU_GROBJ_BOUND_EXPLICIT;
gr->base.subc = i;
gr->base.handle = nvchan->drm.subchan[i].handle;
gr->base.grclass = nvchan->drm.subchan[i].grclass;
gr->base.channel = &nvchan->base;
nvchan->base.subc[i].gr = &gr->base;
}
ret = drmMap(nvdev->fd, nvchan->drm.notifier, nvchan->drm.notifier_size,
(drmAddressPtr)&nvchan->notifier_block);
if (ret) {
nouveau_channel_free((void *)&nvchan);
return ret;
}
ret = nouveau_grobj_alloc(&nvchan->base, 0x00000000, 0x0030,
&nvchan->base.nullobj);
if (ret) {
nouveau_channel_free((void *)&nvchan);
return ret;
}
if (!nvdev->mm_enabled) {
ret = drmMap(nvdev->fd, nvchan->drm.ctrl, nvchan->drm.ctrl_size,
(void*)&nvchan->user);
if (ret) {
nouveau_channel_free((void *)&nvchan);
return ret;
}
nvchan->put = &nvchan->user[0x40/4];
nvchan->get = &nvchan->user[0x44/4];
nvchan->ref_cnt = &nvchan->user[0x48/4];
ret = drmMap(nvdev->fd, nvchan->drm.cmdbuf,
nvchan->drm.cmdbuf_size, (void*)&nvchan->pushbuf);
if (ret) {
nouveau_channel_free((void *)&nvchan);
return ret;
}
nouveau_dma_channel_init(&nvchan->base);
}
nouveau_pushbuf_init(&nvchan->base);
if (!nvdev->mm_enabled && dev->chipset < 0x10) {
ret = nouveau_grobj_alloc(&nvchan->base, 0xbeef3904, 0x5039,
&nvchan->fence_grobj);
if (ret) {
nouveau_channel_free((void *)&nvchan);
return ret;
}
ret = nouveau_notifier_alloc(&nvchan->base, 0xbeef3905, 1,
&nvchan->fence_ntfy);
if (ret) {
nouveau_channel_free((void *)&nvchan);
return ret;
}
BEGIN_RING(&nvchan->base, nvchan->fence_grobj, 0x0180, 1);
OUT_RING (&nvchan->base, nvchan->fence_ntfy->handle);
nvchan->fence_grobj->bound = NOUVEAU_GROBJ_BOUND_EXPLICIT;
}
*chan = &nvchan->base;
return 0;
}
void
nouveau_channel_free(struct nouveau_channel **chan)
{
struct nouveau_channel_priv *nvchan;
struct nouveau_device_priv *nvdev;
struct drm_nouveau_channel_free cf;
if (!chan || !*chan)
return;
nvchan = nouveau_channel(*chan);
*chan = NULL;
nvdev = nouveau_device(nvchan->base.device);
FIRE_RING(&nvchan->base);
if (!nvdev->mm_enabled) {
struct nouveau_fence *fence = NULL;
/* Make sure all buffer objects on delayed delete queue
* actually get freed.
*/
nouveau_fence_new(&nvchan->base, &fence);
nouveau_fence_emit(fence);
nouveau_fence_wait(&fence);
}
if (nvchan->notifier_block)
drmUnmap(nvchan->notifier_block, nvchan->drm.notifier_size);
nouveau_grobj_free(&nvchan->base.vram);
nouveau_grobj_free(&nvchan->base.gart);
nouveau_grobj_free(&nvchan->base.nullobj);
nouveau_grobj_free(&nvchan->fence_grobj);
nouveau_notifier_free(&nvchan->fence_ntfy);
cf.channel = nvchan->drm.channel;
drmCommandWrite(nvdev->fd, DRM_NOUVEAU_CHANNEL_FREE, &cf, sizeof(cf));
free(nvchan);
}

View file

@ -0,0 +1,56 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NOUVEAU_CHANNEL_H__
#define __NOUVEAU_CHANNEL_H__
struct nouveau_subchannel {
struct nouveau_grobj *gr;
unsigned sequence;
};
struct nouveau_channel {
struct nouveau_device *device;
int id;
struct nouveau_pushbuf *pushbuf;
struct nouveau_grobj *nullobj;
struct nouveau_grobj *vram;
struct nouveau_grobj *gart;
void *user_private;
void (*hang_notify)(struct nouveau_channel *);
void (*flush_notify)(struct nouveau_channel *);
struct nouveau_subchannel subc[8];
unsigned subc_sequence;
};
int
nouveau_channel_alloc(struct nouveau_device *, uint32_t fb, uint32_t tt,
struct nouveau_channel **);
void
nouveau_channel_free(struct nouveau_channel **);
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,202 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include "nouveau_private.h"
#if NOUVEAU_DRM_HEADER_PATCHLEVEL != 12
#error nouveau_drm.h does not match expected patchlevel, update libdrm.
#endif
int
nouveau_device_open_existing(struct nouveau_device **dev, int close,
int fd, drm_context_t ctx)
{
struct nouveau_device_priv *nvdev;
drmVersionPtr ver;
uint64_t value;
int ret;
if (!dev || *dev)
return -EINVAL;
ver = drmGetVersion(fd);
if (!ver || ver->version_patchlevel != NOUVEAU_DRM_HEADER_PATCHLEVEL)
return -EINVAL;
drmFreeVersion(ver);
nvdev = calloc(1, sizeof(*nvdev));
if (!nvdev)
return -ENOMEM;
nvdev->fd = fd;
nvdev->ctx = ctx;
nvdev->needs_close = close;
ret = drmCommandNone(nvdev->fd, DRM_NOUVEAU_CARD_INIT);
if (ret) {
nouveau_device_close((void *)&nvdev);
return ret;
}
ret = nouveau_device_get_param(&nvdev->base,
NOUVEAU_GETPARAM_MM_ENABLED, &value);
if (ret) {
nouveau_device_close((void *)&nvdev);
return ret;
}
nvdev->mm_enabled = value;
ret = nouveau_device_get_param(&nvdev->base,
NOUVEAU_GETPARAM_VM_VRAM_BASE, &value);
if (ret) {
nouveau_device_close((void *)&nvdev);
return ret;
}
nvdev->base.vm_vram_base = value;
ret = nouveau_device_get_param(&nvdev->base,
NOUVEAU_GETPARAM_FB_SIZE, &value);
if (ret) {
nouveau_device_close((void *)&nvdev);
return ret;
}
nvdev->vram_aper_size = value;
ret = nouveau_device_get_param(&nvdev->base,
NOUVEAU_GETPARAM_AGP_SIZE, &value);
if (ret) {
nouveau_device_close((void *)&nvdev);
return ret;
}
nvdev->gart_aper_size = value;
ret = nouveau_bo_init(&nvdev->base);
if (ret) {
nouveau_device_close((void *)&nvdev);
return ret;
}
ret = nouveau_device_get_param(&nvdev->base,
NOUVEAU_GETPARAM_CHIPSET_ID, &value);
if (ret) {
nouveau_device_close((void *)&nvdev);
return ret;
}
nvdev->base.chipset = value;
*dev = &nvdev->base;
return 0;
}
int
nouveau_device_open(struct nouveau_device **dev, const char *busid)
{
drm_context_t ctx;
int fd, ret;
if (!dev || *dev)
return -EINVAL;
fd = drmOpen("nouveau", busid);
if (fd < 0)
return -EINVAL;
ret = drmCreateContext(fd, &ctx);
if (ret) {
drmClose(fd);
return ret;
}
ret = nouveau_device_open_existing(dev, 1, fd, ctx);
if (ret) {
drmDestroyContext(fd, ctx);
drmClose(fd);
return ret;
}
return 0;
}
void
nouveau_device_close(struct nouveau_device **dev)
{
struct nouveau_device_priv *nvdev;
if (!dev || !*dev)
return;
nvdev = nouveau_device(*dev);
*dev = NULL;
nouveau_bo_takedown(&nvdev->base);
if (nvdev->needs_close) {
drmDestroyContext(nvdev->fd, nvdev->ctx);
drmClose(nvdev->fd);
}
free(nvdev);
}
int
nouveau_device_get_param(struct nouveau_device *dev,
uint64_t param, uint64_t *value)
{
struct nouveau_device_priv *nvdev = nouveau_device(dev);
struct drm_nouveau_getparam g;
int ret;
if (!nvdev || !value)
return -EINVAL;
g.param = param;
ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GETPARAM,
&g, sizeof(g));
if (ret)
return ret;
*value = g.value;
return 0;
}
int
nouveau_device_set_param(struct nouveau_device *dev,
uint64_t param, uint64_t value)
{
struct nouveau_device_priv *nvdev = nouveau_device(dev);
struct drm_nouveau_setparam s;
int ret;
if (!nvdev)
return -EINVAL;
s.param = param;
s.value = value;
ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_SETPARAM,
&s, sizeof(s));
if (ret)
return ret;
return 0;
}

View file

@ -0,0 +1,31 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NOUVEAU_DEVICE_H__
#define __NOUVEAU_DEVICE_H__
struct nouveau_device {
unsigned chipset;
uint64_t vm_vram_base;
};
#endif

View file

@ -0,0 +1,216 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdint.h>
#include <stdio.h>
#include <assert.h>
#include <errno.h>
#include "nouveau_drmif.h"
#include "nouveau_dma.h"
static inline uint32_t
READ_GET(struct nouveau_channel_priv *nvchan)
{
return *nvchan->get;
}
static inline void
WRITE_PUT(struct nouveau_channel_priv *nvchan, uint32_t val)
{
uint32_t put = ((val << 2) + nvchan->dma->base);
volatile int dum;
NOUVEAU_DMA_BARRIER;
dum = READ_GET(nvchan);
*nvchan->put = put;
nvchan->dma->put = val;
#ifdef NOUVEAU_DMA_TRACE
printf("WRITE_PUT %d/0x%08x\n", nvchan->drm.channel, put);
#endif
NOUVEAU_DMA_BARRIER;
}
static inline int
LOCAL_GET(struct nouveau_dma_priv *dma, uint32_t *val)
{
uint32_t get = *val;
if (get >= dma->base && get <= (dma->base + (dma->max << 2))) {
*val = (get - dma->base) >> 2;
return 1;
}
return 0;
}
void
nouveau_dma_channel_init(struct nouveau_channel *chan)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
int i;
nvchan->dma = &nvchan->struct_dma;
nvchan->dma->base = nvchan->drm.put_base;
nvchan->dma->cur = nvchan->dma->put = 0;
nvchan->dma->max = (nvchan->drm.cmdbuf_size >> 2) - 2;
nvchan->dma->free = nvchan->dma->max - nvchan->dma->cur;
RING_SPACE_CH(chan, RING_SKIPS);
for (i = 0; i < RING_SKIPS; i++)
OUT_RING_CH(chan, 0);
}
#define CHECK_TIMEOUT() do { \
if ((NOUVEAU_TIME_MSEC() - t_start) > NOUVEAU_DMA_TIMEOUT) \
return - EBUSY; \
} while(0)
int
nouveau_dma_wait(struct nouveau_channel *chan, unsigned size)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
struct nouveau_dma_priv *dma = nvchan->dma;
uint32_t get, t_start;
FIRE_RING_CH(chan);
t_start = NOUVEAU_TIME_MSEC();
while (dma->free < size) {
CHECK_TIMEOUT();
get = READ_GET(nvchan);
if (!LOCAL_GET(dma, &get))
continue;
if (dma->put >= get) {
dma->free = dma->max - dma->cur;
if (dma->free < size) {
#ifdef NOUVEAU_DMA_DEBUG
dma->push_free = 1;
#endif
OUT_RING_CH(chan, 0x20000000 | dma->base);
if (get <= RING_SKIPS) {
/*corner case - will be idle*/
if (dma->put <= RING_SKIPS)
WRITE_PUT(nvchan,
RING_SKIPS + 1);
do {
CHECK_TIMEOUT();
get = READ_GET(nvchan);
if (!LOCAL_GET(dma, &get))
get = 0;
} while (get <= RING_SKIPS);
}
WRITE_PUT(nvchan, RING_SKIPS);
dma->cur = dma->put = RING_SKIPS;
dma->free = get - (RING_SKIPS + 1);
}
} else {
dma->free = get - dma->cur - 1;
}
}
return 0;
}
#ifdef NOUVEAU_DMA_DUMP_POSTRELOC_PUSHBUF
static void
nouveau_dma_parse_pushbuf(struct nouveau_channel *chan, int get, int put)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
unsigned mthd_count = 0;
while (get != put) {
uint32_t gpuget = (get << 2) + nvchan->drm.put_base;
uint32_t data;
if (get < 0 || get >= nvchan->drm.cmdbuf_size)
assert(0);
data = nvchan->pushbuf[get++];
if (mthd_count) {
printf("0x%08x 0x%08x\n", gpuget, data);
mthd_count--;
continue;
}
switch (data & 0x60000000) {
case 0x00000000:
mthd_count = (data >> 18) & 0x7ff;
printf("0x%08x 0x%08x MTHD "
"Sc %d Mthd 0x%04x Size %d\n",
gpuget, data, (data>>13) & 7, data & 0x1ffc,
mthd_count);
break;
case 0x20000000:
get = (data & 0x1ffffffc) >> 2;
printf("0x%08x 0x%08x JUMP 0x%08x\n",
gpuget, data, data & 0x1ffffffc);
continue;
case 0x40000000:
mthd_count = (data >> 18) & 0x7ff;
printf("0x%08x 0x%08x NINC "
"Sc %d Mthd 0x%04x Size %d\n",
gpuget, data, (data>>13) & 7, data & 0x1ffc,
mthd_count);
break;
case 0x60000000:
/* DMA_OPCODE_CALL apparently, doesn't seem to work on
* my NV40 at least..
*/
/* fall-through */
default:
printf("DMA_PUSHER 0x%08x 0x%08x\n", gpuget, data);
assert(0);
}
}
}
#endif
void
nouveau_dma_kickoff(struct nouveau_channel *chan)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
struct nouveau_dma_priv *dma = nvchan->dma;
if (dma->cur == dma->put)
return;
#ifdef NOUVEAU_DMA_DEBUG
if (dma->push_free) {
printf("Packet incomplete: %d left\n", dma->push_free);
return;
}
#endif
#ifdef NOUVEAU_DMA_DUMP_POSTRELOC_PUSHBUF
nouveau_dma_parse_pushbuf(chan, dma->put, dma->cur);
#endif
WRITE_PUT(nvchan, dma->cur);
}

View file

@ -0,0 +1,154 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NOUVEAU_DMA_H__
#define __NOUVEAU_DMA_H__
#include <string.h>
#include "nouveau_private.h"
//#define NOUVEAU_DMA_DEBUG
//#define NOUVEAU_DMA_TRACE
//#define NOUVEAU_DMA_DUMP_POSTRELOC_PUSHBUF
#if defined(__amd64__)
#define NOUVEAU_DMA_BARRIER asm volatile("lock; addl $0,0(%%rsp)" ::: "memory")
#elif defined(__i386__)
#define NOUVEAU_DMA_BARRIER asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#else
#define NOUVEAU_DMA_BARRIER
#endif
#define NOUVEAU_DMA_TIMEOUT 2000
#define NOUVEAU_TIME_MSEC() 0
#define RING_SKIPS 8
extern int nouveau_dma_wait(struct nouveau_channel *chan, unsigned size);
extern void nouveau_dma_subc_bind(struct nouveau_grobj *);
extern void nouveau_dma_channel_init(struct nouveau_channel *);
extern void nouveau_dma_kickoff(struct nouveau_channel *);
#ifdef NOUVEAU_DMA_DEBUG
static char faulty[1024];
#endif
static inline void
nouveau_dma_out(struct nouveau_channel *chan, uint32_t data)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
struct nouveau_dma_priv *dma = nvchan->dma;
#ifdef NOUVEAU_DMA_DEBUG
if (dma->push_free == 0) {
printf("No space left in packet at %s\n", faulty);
return;
}
dma->push_free--;
#endif
#ifdef NOUVEAU_DMA_TRACE
{
uint32_t offset = (dma->cur << 2) + dma->base;
printf("\tOUT_RING %d/0x%08x -> 0x%08x\n",
nvchan->drm.channel, offset, data);
}
#endif
nvchan->pushbuf[dma->cur + (dma->base - nvchan->drm.put_base)/4] = data;
dma->cur++;
}
static inline void
nouveau_dma_outp(struct nouveau_channel *chan, uint32_t *ptr, int size)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
struct nouveau_dma_priv *dma = nvchan->dma;
(void)dma;
#ifdef NOUVEAU_DMA_DEBUG
if (dma->push_free < size) {
printf("Packet too small. Free=%d, Need=%d\n",
dma->push_free, size);
return;
}
#endif
#ifdef NOUVEAU_DMA_TRACE
while (size--) {
nouveau_dma_out(chan, *ptr);
ptr++;
}
#else
memcpy(&nvchan->pushbuf[dma->cur], ptr, size << 2);
#ifdef NOUVEAU_DMA_DEBUG
dma->push_free -= size;
#endif
dma->cur += size;
#endif
}
static inline void
nouveau_dma_space(struct nouveau_channel *chan, unsigned size)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
struct nouveau_dma_priv *dma = nvchan->dma;
if (dma->free < size) {
if (nouveau_dma_wait(chan, size) && chan->hang_notify)
chan->hang_notify(chan);
}
dma->free -= size;
#ifdef NOUVEAU_DMA_DEBUG
dma->push_free = size;
#endif
}
static inline void
nouveau_dma_begin(struct nouveau_channel *chan, struct nouveau_grobj *grobj,
int method, int size, const char* file, int line)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
struct nouveau_dma_priv *dma = nvchan->dma;
(void)dma;
#ifdef NOUVEAU_DMA_TRACE
printf("BEGIN_RING %d/%08x/%d/0x%04x/%d\n", nvchan->drm.channel,
grobj->handle, grobj->subc, method, size);
#endif
#ifdef NOUVEAU_DMA_DEBUG
if (dma->push_free) {
printf("Previous packet incomplete: %d left at %s\n",
dma->push_free, faulty);
return;
}
sprintf(faulty,"%s:%d",file,line);
#endif
nouveau_dma_space(chan, (size + 1));
nouveau_dma_out(chan, (size << 18) | (grobj->subc << 13) | method);
}
#define RING_SPACE_CH(ch,sz) nouveau_dma_space((ch), (sz))
#define BEGIN_RING_CH(ch,gr,m,sz) nouveau_dma_begin((ch), (gr), (m), (sz), __FUNCTION__, __LINE__ )
#define OUT_RING_CH(ch, data) nouveau_dma_out((ch), (data))
#define OUT_RINGp_CH(ch,ptr,dwords) nouveau_dma_outp((ch), (void*)(ptr), \
(dwords))
#define FIRE_RING_CH(ch) nouveau_dma_kickoff((ch))
#define WAIT_RING_CH(ch,sz) nouveau_dma_wait((ch), (sz))
#endif

View file

@ -0,0 +1,62 @@
/*
* Copyright 2008 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NOUVEAU_DRMIF_H__
#define __NOUVEAU_DRMIF_H__
#include <stdint.h>
#include <xf86drm.h>
#include "nouveau_device.h"
struct nouveau_device_priv {
struct nouveau_device base;
int fd;
drm_context_t ctx;
drmLock *lock;
int needs_close;
int mm_enabled;
/*XXX: move to nouveau_device when interface gets bumped */
uint64_t vram_aper_size;
uint64_t gart_aper_size;
};
#define nouveau_device(n) ((struct nouveau_device_priv *)(n))
int
nouveau_device_open_existing(struct nouveau_device **, int close,
int fd, drm_context_t ctx);
int
nouveau_device_open(struct nouveau_device **, const char *busid);
void
nouveau_device_close(struct nouveau_device **);
int
nouveau_device_get_param(struct nouveau_device *, uint64_t param, uint64_t *v);
int
nouveau_device_set_param(struct nouveau_device *, uint64_t param, uint64_t val);
#endif

View file

@ -0,0 +1,243 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <assert.h>
#include "nouveau_private.h"
#include "nouveau_dma.h"
static void
nouveau_fence_del_unsignalled(struct nouveau_fence *fence)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(fence->channel);
struct nouveau_fence *le;
if (nvchan->fence_head == fence) {
nvchan->fence_head = nouveau_fence(fence)->next;
if (nvchan->fence_head == NULL)
nvchan->fence_tail = NULL;
return;
}
le = nvchan->fence_head;
while (le && nouveau_fence(le)->next != fence)
le = nouveau_fence(le)->next;
assert(le && nouveau_fence(le)->next == fence);
nouveau_fence(le)->next = nouveau_fence(fence)->next;
if (nvchan->fence_tail == fence)
nvchan->fence_tail = le;
}
static void
nouveau_fence_del(struct nouveau_fence **fence)
{
struct nouveau_fence_priv *nvfence;
if (!fence || !*fence)
return;
nvfence = nouveau_fence(*fence);
*fence = NULL;
if (--nvfence->refcount)
return;
if (nvfence->emitted && !nvfence->signalled) {
if (nvfence->signal_cb) {
nvfence->refcount++;
nouveau_fence_wait((void *)&nvfence);
return;
}
nouveau_fence_del_unsignalled(&nvfence->base);
}
free(nvfence);
}
int
nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **fence)
{
struct nouveau_fence_priv *nvfence;
if (!chan || !fence || *fence)
return -EINVAL;
nvfence = calloc(1, sizeof(struct nouveau_fence_priv));
if (!nvfence)
return -ENOMEM;
nvfence->base.channel = chan;
nvfence->refcount = 1;
*fence = &nvfence->base;
return 0;
}
int
nouveau_fence_ref(struct nouveau_fence *ref, struct nouveau_fence **fence)
{
if (!fence)
return -EINVAL;
if (ref)
nouveau_fence(ref)->refcount++;
if (*fence)
nouveau_fence_del(fence);
*fence = ref;
return 0;
}
int
nouveau_fence_signal_cb(struct nouveau_fence *fence, void (*func)(void *),
void *priv)
{
struct nouveau_fence_priv *nvfence = nouveau_fence(fence);
struct nouveau_fence_cb *cb;
if (!nvfence || !func)
return -EINVAL;
cb = malloc(sizeof(struct nouveau_fence_cb));
if (!cb)
return -ENOMEM;
cb->func = func;
cb->priv = priv;
cb->next = nvfence->signal_cb;
nvfence->signal_cb = cb;
return 0;
}
void
nouveau_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(fence->channel);
struct nouveau_fence_priv *nvfence = nouveau_fence(fence);
nvfence->emitted = 1;
nvfence->sequence = ++nvchan->fence_sequence;
if (nvfence->sequence == 0xffffffff)
printf("AII wrap unhandled\n");
if (!nvchan->fence_ntfy) {
/*XXX: assumes subc 0 is populated */
nouveau_dma_space(fence->channel, 2);
nouveau_dma_out (fence->channel, 0x00040050);
nouveau_dma_out (fence->channel, nvfence->sequence);
}
nouveau_dma_kickoff(fence->channel);
if (nvchan->fence_tail) {
nouveau_fence(nvchan->fence_tail)->next = fence;
} else {
nvchan->fence_head = fence;
}
nvchan->fence_tail = fence;
}
static void
nouveau_fence_flush_seq(struct nouveau_channel *chan, uint32_t sequence)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
while (nvchan->fence_head) {
struct nouveau_fence_priv *nvfence;
nvfence = nouveau_fence(nvchan->fence_head);
if (nvfence->sequence > sequence)
break;
nouveau_fence_del_unsignalled(&nvfence->base);
nvfence->signalled = 1;
if (nvfence->signal_cb) {
struct nouveau_fence *fence = NULL;
nouveau_fence_ref(&nvfence->base, &fence);
while (nvfence->signal_cb) {
struct nouveau_fence_cb *cb;
cb = nvfence->signal_cb;
nvfence->signal_cb = cb->next;
cb->func(cb->priv);
free(cb);
}
nouveau_fence_ref(NULL, &fence);
}
}
}
void
nouveau_fence_flush(struct nouveau_channel *chan)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
if (!nvchan->fence_ntfy)
nouveau_fence_flush_seq(chan, *nvchan->ref_cnt);
}
int
nouveau_fence_wait(struct nouveau_fence **fence)
{
struct nouveau_fence_priv *nvfence;
struct nouveau_channel_priv *nvchan;
if (!fence)
return -EINVAL;
nvfence = nouveau_fence(*fence);
if (!nvfence)
return 0;
nvchan = nouveau_channel(nvfence->base.channel);
if (nvfence->emitted) {
if (!nvfence->signalled && nvchan->fence_ntfy) {
struct nouveau_channel *chan = &nvchan->base;
int ret;
/*XXX: NV04/NV05: Full sync + flush all fences */
nouveau_notifier_reset(nvchan->fence_ntfy, 0);
BEGIN_RING(chan, nvchan->fence_grobj, 0x0104, 1);
OUT_RING (chan, 0);
BEGIN_RING(chan, nvchan->fence_grobj, 0x0100, 1);
OUT_RING (chan, 0);
FIRE_RING (chan);
ret = nouveau_notifier_wait_status(nvchan->fence_ntfy,
0, 0, 2.0);
if (ret)
return ret;
nouveau_fence_flush_seq(chan, nvchan->fence_sequence);
}
while (!nvfence->signalled)
nouveau_fence_flush(nvfence->base.channel);
}
nouveau_fence_ref(NULL, fence);
return 0;
}

View file

@ -0,0 +1,138 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdlib.h>
#include <errno.h>
#include "nouveau_private.h"
int
nouveau_grobj_alloc(struct nouveau_channel *chan, uint32_t handle,
int class, struct nouveau_grobj **grobj)
{
struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
struct nouveau_grobj_priv *nvgrobj;
struct drm_nouveau_grobj_alloc g;
int ret;
if (!nvdev || !grobj || *grobj)
return -EINVAL;
nvgrobj = calloc(1, sizeof(*nvgrobj));
if (!nvgrobj)
return -ENOMEM;
nvgrobj->base.channel = chan;
nvgrobj->base.handle = handle;
nvgrobj->base.grclass = class;
nvgrobj->base.bound = NOUVEAU_GROBJ_UNBOUND;
nvgrobj->base.subc = -1;
g.channel = chan->id;
g.handle = handle;
g.class = class;
ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GROBJ_ALLOC,
&g, sizeof(g));
if (ret) {
nouveau_grobj_free((void *)&nvgrobj);
return ret;
}
*grobj = &nvgrobj->base;
return 0;
}
int
nouveau_grobj_ref(struct nouveau_channel *chan, uint32_t handle,
struct nouveau_grobj **grobj)
{
struct nouveau_grobj_priv *nvgrobj;
if (!chan || !grobj || *grobj)
return -EINVAL;
nvgrobj = calloc(1, sizeof(struct nouveau_grobj_priv));
if (!nvgrobj)
return -ENOMEM;
nvgrobj->base.channel = chan;
nvgrobj->base.handle = handle;
nvgrobj->base.grclass = 0;
*grobj = &nvgrobj->base;
return 0;
}
void
nouveau_grobj_free(struct nouveau_grobj **grobj)
{
struct nouveau_device_priv *nvdev;
struct nouveau_channel_priv *chan;
struct nouveau_grobj_priv *nvgrobj;
if (!grobj || !*grobj)
return;
nvgrobj = nouveau_grobj(*grobj);
*grobj = NULL;
chan = nouveau_channel(nvgrobj->base.channel);
nvdev = nouveau_device(chan->base.device);
if (nvgrobj->base.grclass) {
struct drm_nouveau_gpuobj_free f;
f.channel = chan->drm.channel;
f.handle = nvgrobj->base.handle;
drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GPUOBJ_FREE,
&f, sizeof(f));
}
free(nvgrobj);
}
void
nouveau_grobj_autobind(struct nouveau_grobj *grobj)
{
struct nouveau_subchannel *subc = NULL;
int i;
for (i = 0; i < 8; i++) {
struct nouveau_subchannel *scc = &grobj->channel->subc[i];
if (scc->gr && scc->gr->bound == NOUVEAU_GROBJ_BOUND_EXPLICIT)
continue;
if (!subc || scc->sequence < subc->sequence)
subc = scc;
}
if (subc->gr) {
subc->gr->bound = NOUVEAU_GROBJ_UNBOUND;
subc->gr->subc = -1;
}
subc->gr = grobj;
subc->gr->bound = NOUVEAU_GROBJ_BOUND;
subc->gr->subc = subc - &grobj->channel->subc[0];
BEGIN_RING(grobj->channel, grobj, 0x0000, 1);
OUT_RING (grobj->channel, grobj->handle);
}

View file

@ -0,0 +1,48 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NOUVEAU_GROBJ_H__
#define __NOUVEAU_GROBJ_H__
#include "nouveau_channel.h"
struct nouveau_grobj {
struct nouveau_channel *channel;
int grclass;
uint32_t handle;
enum {
NOUVEAU_GROBJ_UNBOUND = 0,
NOUVEAU_GROBJ_BOUND = 1,
NOUVEAU_GROBJ_BOUND_EXPLICIT = 2
} bound;
int subc;
};
int nouveau_grobj_alloc(struct nouveau_channel *, uint32_t handle,
int class, struct nouveau_grobj **);
int nouveau_grobj_ref(struct nouveau_channel *, uint32_t handle,
struct nouveau_grobj **);
void nouveau_grobj_free(struct nouveau_grobj **);
void nouveau_grobj_autobind(struct nouveau_grobj *);
#endif

View file

@ -0,0 +1,146 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdlib.h>
#include <errno.h>
#include <sys/time.h>
#include "nouveau_private.h"
#define NOTIFIER(__v) \
struct nouveau_notifier_priv *nvnotify = nouveau_notifier(notifier); \
volatile uint32_t *__v = (uint32_t *)((char *)nvnotify->map + (id * 32))
int
nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
int count, struct nouveau_notifier **notifier)
{
struct nouveau_notifier_priv *nvnotify;
int ret;
if (!chan || !notifier || *notifier)
return -EINVAL;
nvnotify = calloc(1, sizeof(struct nouveau_notifier_priv));
if (!nvnotify)
return -ENOMEM;
nvnotify->base.channel = chan;
nvnotify->base.handle = handle;
nvnotify->drm.channel = chan->id;
nvnotify->drm.handle = handle;
nvnotify->drm.count = count;
if ((ret = drmCommandWriteRead(nouveau_device(chan->device)->fd,
DRM_NOUVEAU_NOTIFIEROBJ_ALLOC,
&nvnotify->drm,
sizeof(nvnotify->drm)))) {
nouveau_notifier_free((void *)&nvnotify);
return ret;
}
nvnotify->map = (char *)nouveau_channel(chan)->notifier_block +
nvnotify->drm.offset;
*notifier = &nvnotify->base;
return 0;
}
void
nouveau_notifier_free(struct nouveau_notifier **notifier)
{
struct nouveau_notifier_priv *nvnotify;
struct nouveau_channel_priv *nvchan;
struct nouveau_device_priv *nvdev;
struct drm_nouveau_gpuobj_free f;
if (!notifier || !*notifier)
return;
nvnotify = nouveau_notifier(*notifier);
*notifier = NULL;
nvchan = nouveau_channel(nvnotify->base.channel);
nvdev = nouveau_device(nvchan->base.device);
f.channel = nvchan->drm.channel;
f.handle = nvnotify->base.handle;
drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GPUOBJ_FREE, &f, sizeof(f));
free(nvnotify);
}
void
nouveau_notifier_reset(struct nouveau_notifier *notifier, int id)
{
NOTIFIER(n);
n[NV_NOTIFY_TIME_0 /4] = 0x00000000;
n[NV_NOTIFY_TIME_1 /4] = 0x00000000;
n[NV_NOTIFY_RETURN_VALUE/4] = 0x00000000;
n[NV_NOTIFY_STATE /4] = (NV_NOTIFY_STATE_STATUS_IN_PROCESS <<
NV_NOTIFY_STATE_STATUS_SHIFT);
}
uint32_t
nouveau_notifier_status(struct nouveau_notifier *notifier, int id)
{
NOTIFIER(n);
return n[NV_NOTIFY_STATE/4] >> NV_NOTIFY_STATE_STATUS_SHIFT;
}
uint32_t
nouveau_notifier_return_val(struct nouveau_notifier *notifier, int id)
{
NOTIFIER(n);
return n[NV_NOTIFY_RETURN_VALUE/4];
}
static inline double
gettime(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (double)tv.tv_sec + tv.tv_usec / 1000000.0;
}
int
nouveau_notifier_wait_status(struct nouveau_notifier *notifier, int id,
uint32_t status, double timeout)
{
NOTIFIER(n);
double time = 0, t_start = gettime();
while (time <= timeout) {
uint32_t v;
v = n[NV_NOTIFY_STATE/4] >> NV_NOTIFY_STATE_STATUS_SHIFT;
if (v == status)
return 0;
if (timeout)
time = gettime() - t_start;
}
return -EBUSY;
}

View file

@ -0,0 +1,63 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NOUVEAU_NOTIFIER_H__
#define __NOUVEAU_NOTIFIER_H__
#define NV_NOTIFIER_SIZE 32
#define NV_NOTIFY_TIME_0 0x00000000
#define NV_NOTIFY_TIME_1 0x00000004
#define NV_NOTIFY_RETURN_VALUE 0x00000008
#define NV_NOTIFY_STATE 0x0000000C
#define NV_NOTIFY_STATE_STATUS_MASK 0xFF000000
#define NV_NOTIFY_STATE_STATUS_SHIFT 24
#define NV_NOTIFY_STATE_STATUS_COMPLETED 0x00
#define NV_NOTIFY_STATE_STATUS_IN_PROCESS 0x01
#define NV_NOTIFY_STATE_ERROR_CODE_MASK 0x0000FFFF
#define NV_NOTIFY_STATE_ERROR_CODE_SHIFT 0
struct nouveau_notifier {
struct nouveau_channel *channel;
uint32_t handle;
};
int
nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, int count,
struct nouveau_notifier **);
void
nouveau_notifier_free(struct nouveau_notifier **);
void
nouveau_notifier_reset(struct nouveau_notifier *, int id);
uint32_t
nouveau_notifier_status(struct nouveau_notifier *, int id);
uint32_t
nouveau_notifier_return_val(struct nouveau_notifier *, int id);
int
nouveau_notifier_wait_status(struct nouveau_notifier *, int id, uint32_t status,
double timeout);
#endif

View file

@ -0,0 +1,203 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NOUVEAU_PRIVATE_H__
#define __NOUVEAU_PRIVATE_H__
#include <stdint.h>
#include <xf86drm.h>
#include <nouveau_drm.h>
#include "nouveau_drmif.h"
#include "nouveau_device.h"
#include "nouveau_channel.h"
#include "nouveau_grobj.h"
#include "nouveau_notifier.h"
#include "nouveau_bo.h"
#include "nouveau_resource.h"
#include "nouveau_pushbuf.h"
#define NOUVEAU_PUSHBUF_MAX_BUFFERS 1024
#define NOUVEAU_PUSHBUF_MAX_RELOCS 1024
struct nouveau_pushbuf_priv {
struct nouveau_pushbuf base;
int use_cal;
struct nouveau_bo *buffer;
unsigned *pushbuf;
unsigned size;
struct drm_nouveau_gem_pushbuf_bo *buffers;
unsigned nr_buffers;
struct drm_nouveau_gem_pushbuf_reloc *relocs;
unsigned nr_relocs;
/*XXX: nomm */
struct nouveau_fence *fence;
};
#define nouveau_pushbuf(n) ((struct nouveau_pushbuf_priv *)(n))
#define pbbo_to_ptr(o) ((uint64_t)(unsigned long)(o))
#define ptr_to_pbbo(h) ((struct nouveau_pushbuf_bo *)(unsigned long)(h))
#define pbrel_to_ptr(o) ((uint64_t)(unsigned long)(o))
#define ptr_to_pbrel(h) ((struct nouveau_pushbuf_reloc *)(unsigned long)(h))
#define bo_to_ptr(o) ((uint64_t)(unsigned long)(o))
#define ptr_to_bo(h) ((struct nouveau_bo_priv *)(unsigned long)(h))
int
nouveau_pushbuf_init(struct nouveau_channel *);
struct nouveau_dma_priv {
uint32_t base;
uint32_t max;
uint32_t cur;
uint32_t put;
uint32_t free;
int push_free;
} dma;
struct nouveau_channel_priv {
struct nouveau_channel base;
struct drm_nouveau_channel_alloc drm;
void *notifier_block;
struct nouveau_pushbuf_priv pb;
/*XXX: nomm */
volatile uint32_t *user, *put, *get, *ref_cnt;
uint32_t *pushbuf;
struct nouveau_dma_priv struct_dma;
struct nouveau_dma_priv *dma;
struct nouveau_fence *fence_head;
struct nouveau_fence *fence_tail;
uint32_t fence_sequence;
struct nouveau_grobj *fence_grobj;
struct nouveau_notifier *fence_ntfy;
};
#define nouveau_channel(n) ((struct nouveau_channel_priv *)(n))
struct nouveau_fence {
struct nouveau_channel *channel;
};
struct nouveau_fence_cb {
struct nouveau_fence_cb *next;
void (*func)(void *);
void *priv;
};
struct nouveau_fence_priv {
struct nouveau_fence base;
int refcount;
struct nouveau_fence *next;
struct nouveau_fence_cb *signal_cb;
uint32_t sequence;
int emitted;
int signalled;
};
#define nouveau_fence(n) ((struct nouveau_fence_priv *)(n))
int
nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **);
int
nouveau_fence_ref(struct nouveau_fence *, struct nouveau_fence **);
int
nouveau_fence_signal_cb(struct nouveau_fence *, void (*)(void *), void *);
void
nouveau_fence_emit(struct nouveau_fence *);
int
nouveau_fence_wait(struct nouveau_fence **);
void
nouveau_fence_flush(struct nouveau_channel *);
struct nouveau_grobj_priv {
struct nouveau_grobj base;
};
#define nouveau_grobj(n) ((struct nouveau_grobj_priv *)(n))
struct nouveau_notifier_priv {
struct nouveau_notifier base;
struct drm_nouveau_notifierobj_alloc drm;
volatile void *map;
};
#define nouveau_notifier(n) ((struct nouveau_notifier_priv *)(n))
struct nouveau_bo_priv {
struct nouveau_bo base;
int refcount;
/* Buffer configuration + usage hints */
unsigned flags;
unsigned size;
unsigned align;
int user;
/* Tracking */
struct drm_nouveau_gem_pushbuf_bo *pending;
struct nouveau_channel *pending_channel;
int write_marker;
/* Userspace object */
void *sysmem;
/* Kernel object */
uint32_t global_handle;
drm_handle_t handle;
void *map;
/* Last known information from kernel on buffer status */
int pinned;
uint64_t offset;
uint32_t domain;
/*XXX: nomm stuff */
struct nouveau_fence *fence;
struct nouveau_fence *wr_fence;
};
#define nouveau_bo(n) ((struct nouveau_bo_priv *)(n))
int
nouveau_bo_init(struct nouveau_device *);
void
nouveau_bo_takedown(struct nouveau_device *);
struct drm_nouveau_gem_pushbuf_bo *
nouveau_bo_emit_buffer(struct nouveau_channel *, struct nouveau_bo *);
int
nouveau_bo_validate_nomm(struct nouveau_bo_priv *, uint32_t);
#include "nouveau_dma.h"
#endif

View file

@ -0,0 +1,276 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <assert.h>
#include "nouveau_private.h"
#include "nouveau_dma.h"
#define PB_BUFMGR_DWORDS (4096 / 2)
#define PB_MIN_USER_DWORDS 2048
static uint32_t
nouveau_pushbuf_calc_reloc(struct drm_nouveau_gem_pushbuf_bo *pbbo,
struct drm_nouveau_gem_pushbuf_reloc *r,
int mm_enabled)
{
uint32_t push = 0;
const unsigned is_vram = mm_enabled ? NOUVEAU_GEM_DOMAIN_VRAM :
NOUVEAU_BO_VRAM;
if (r->flags & NOUVEAU_GEM_RELOC_LOW)
push = (pbbo->presumed_offset + r->data);
else
if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
push = (pbbo->presumed_offset + r->data) >> 32;
else
push = r->data;
if (r->flags & NOUVEAU_GEM_RELOC_OR) {
if (pbbo->presumed_domain & is_vram)
push |= r->vor;
else
push |= r->tor;
}
return push;
}
int
nouveau_pushbuf_emit_reloc(struct nouveau_channel *chan, void *ptr,
struct nouveau_bo *bo, uint32_t data, uint32_t flags,
uint32_t vor, uint32_t tor)
{
struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
struct drm_nouveau_gem_pushbuf_reloc *r;
struct drm_nouveau_gem_pushbuf_bo *pbbo;
uint32_t domains = 0;
if (nvpb->nr_relocs >= NOUVEAU_PUSHBUF_MAX_RELOCS)
return -ENOMEM;
if (nouveau_bo(bo)->user && (flags & NOUVEAU_BO_WR)) {
fprintf(stderr, "write to user buffer!!\n");
return -EINVAL;
}
pbbo = nouveau_bo_emit_buffer(chan, bo);
if (!pbbo)
return -ENOMEM;
if (flags & NOUVEAU_BO_VRAM)
domains |= NOUVEAU_GEM_DOMAIN_VRAM;
if (flags & NOUVEAU_BO_GART)
domains |= NOUVEAU_GEM_DOMAIN_GART;
pbbo->valid_domains &= domains;
assert(pbbo->valid_domains);
if (!nvdev->mm_enabled) {
struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
nouveau_fence_ref(nvpb->fence, &nvbo->fence);
if (flags & NOUVEAU_BO_WR)
nouveau_fence_ref(nvpb->fence, &nvbo->wr_fence);
}
assert(flags & NOUVEAU_BO_RDWR);
if (flags & NOUVEAU_BO_RD) {
pbbo->read_domains |= domains;
}
if (flags & NOUVEAU_BO_WR) {
pbbo->write_domains |= domains;
nouveau_bo(bo)->write_marker = 1;
}
r = nvpb->relocs + nvpb->nr_relocs++;
r->bo_index = pbbo - nvpb->buffers;
r->reloc_index = (uint32_t *)ptr - nvpb->pushbuf;
r->flags = 0;
if (flags & NOUVEAU_BO_LOW)
r->flags |= NOUVEAU_GEM_RELOC_LOW;
if (flags & NOUVEAU_BO_HIGH)
r->flags |= NOUVEAU_GEM_RELOC_HIGH;
if (flags & NOUVEAU_BO_OR)
r->flags |= NOUVEAU_GEM_RELOC_OR;
r->data = data;
r->vor = vor;
r->tor = tor;
*(uint32_t *)ptr = (flags & NOUVEAU_BO_DUMMY) ? 0 :
nouveau_pushbuf_calc_reloc(pbbo, r, nvdev->mm_enabled);
return 0;
}
static int
nouveau_pushbuf_space(struct nouveau_channel *chan, unsigned min)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
if (nvpb->pushbuf) {
free(nvpb->pushbuf);
nvpb->pushbuf = NULL;
}
nvpb->size = min < PB_MIN_USER_DWORDS ? PB_MIN_USER_DWORDS : min;
nvpb->pushbuf = malloc(sizeof(uint32_t) * nvpb->size);
nvpb->base.channel = chan;
nvpb->base.remaining = nvpb->size;
nvpb->base.cur = nvpb->pushbuf;
if (!nouveau_device(chan->device)->mm_enabled) {
nouveau_fence_ref(NULL, &nvpb->fence);
nouveau_fence_new(chan, &nvpb->fence);
}
return 0;
}
int
nouveau_pushbuf_init(struct nouveau_channel *chan)
{
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
nouveau_pushbuf_space(chan, 0);
nvpb->buffers = calloc(NOUVEAU_PUSHBUF_MAX_BUFFERS,
sizeof(struct drm_nouveau_gem_pushbuf_bo));
nvpb->relocs = calloc(NOUVEAU_PUSHBUF_MAX_RELOCS,
sizeof(struct drm_nouveau_gem_pushbuf_reloc));
chan->pushbuf = &nvpb->base;
return 0;
}
static int
nouveau_pushbuf_flush_nomm(struct nouveau_channel_priv *nvchan)
{
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
struct drm_nouveau_gem_pushbuf_bo *bo = nvpb->buffers;
struct drm_nouveau_gem_pushbuf_reloc *reloc = nvpb->relocs;
unsigned b, r;
int ret;
for (b = 0; b < nvpb->nr_buffers; b++) {
struct nouveau_bo_priv *nvbo =
(void *)(unsigned long)bo[b].user_priv;
uint32_t flags = 0;
if (bo[b].valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= NOUVEAU_BO_VRAM;
if (bo[b].valid_domains & NOUVEAU_GEM_DOMAIN_GART)
flags |= NOUVEAU_BO_GART;
ret = nouveau_bo_validate_nomm(nvbo, flags);
if (ret)
return ret;
if (1 || bo[b].presumed_domain != nvbo->domain ||
bo[b].presumed_offset != nvbo->offset) {
bo[b].presumed_ok = 0;
bo[b].presumed_domain = nvbo->domain;
bo[b].presumed_offset = nvbo->offset;
}
}
for (r = 0; r < nvpb->nr_relocs; r++, reloc++) {
uint32_t push;
if (bo[reloc->bo_index].presumed_ok)
continue;
push = nouveau_pushbuf_calc_reloc(&bo[reloc->bo_index], reloc, 0);
nvpb->pushbuf[reloc->reloc_index] = push;
}
nouveau_dma_space(&nvchan->base, nvpb->size);
nouveau_dma_outp (&nvchan->base, nvpb->pushbuf, nvpb->size);
nouveau_fence_emit(nvpb->fence);
return 0;
}
int
nouveau_pushbuf_flush(struct nouveau_channel *chan, unsigned min)
{
struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
struct drm_nouveau_gem_pushbuf req;
unsigned i;
int ret;
if (nvpb->base.remaining == nvpb->size)
return 0;
nvpb->size -= nvpb->base.remaining;
if (nvdev->mm_enabled) {
req.channel = chan->id;
req.nr_dwords = nvpb->size;
req.dwords = (uint64_t)(unsigned long)nvpb->pushbuf;
req.nr_buffers = nvpb->nr_buffers;
req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
req.nr_relocs = nvpb->nr_relocs;
req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
&req, sizeof(req));
} else {
nouveau_fence_flush(chan);
ret = nouveau_pushbuf_flush_nomm(nvchan);
}
assert(ret == 0);
/* Update presumed offset/domain for any buffers that moved.
* Dereference all buffers on validate list
*/
for (i = 0; i < nvpb->nr_buffers; i++) {
struct drm_nouveau_gem_pushbuf_bo *pbbo = &nvpb->buffers[i];
struct nouveau_bo *bo = (void *)(unsigned long)pbbo->user_priv;
if (pbbo->presumed_ok == 0) {
nouveau_bo(bo)->domain = pbbo->presumed_domain;
nouveau_bo(bo)->offset = pbbo->presumed_offset;
}
nouveau_bo(bo)->pending = NULL;
nouveau_bo_ref(NULL, &bo);
}
nvpb->nr_buffers = 0;
nvpb->nr_relocs = 0;
/* Allocate space for next push buffer */
ret = nouveau_pushbuf_space(chan, min);
assert(!ret);
if (chan->flush_notify)
chan->flush_notify(chan);
return 0;
}

View file

@ -0,0 +1,160 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NOUVEAU_PUSHBUF_H__
#define __NOUVEAU_PUSHBUF_H__
#include <assert.h>
#include <string.h>
#include "nouveau_bo.h"
#include "nouveau_grobj.h"
struct nouveau_pushbuf {
struct nouveau_channel *channel;
unsigned remaining;
uint32_t *cur;
};
int
nouveau_pushbuf_flush(struct nouveau_channel *, unsigned min);
int
nouveau_pushbuf_emit_reloc(struct nouveau_channel *, void *ptr,
struct nouveau_bo *, uint32_t data, uint32_t flags,
uint32_t vor, uint32_t tor);
/* Push buffer access macros */
static __inline__ void
OUT_RING(struct nouveau_channel *chan, unsigned data)
{
*(chan->pushbuf->cur++) = (data);
}
static __inline__ void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned size)
{
memcpy(chan->pushbuf->cur, data, size * 4);
chan->pushbuf->cur += size;
}
static __inline__ void
OUT_RINGf(struct nouveau_channel *chan, float f)
{
union { uint32_t i; float f; } c;
c.f = f;
OUT_RING(chan, c.i);
}
static __inline__ unsigned
AVAIL_RING(struct nouveau_channel *chan)
{
return chan->pushbuf->remaining;
}
static __inline__ void
WAIT_RING(struct nouveau_channel *chan, unsigned size)
{
if (chan->pushbuf->remaining < size)
nouveau_pushbuf_flush(chan, size);
}
static __inline__ void
BEGIN_RING(struct nouveau_channel *chan, struct nouveau_grobj *gr,
unsigned mthd, unsigned size)
{
if (gr->bound == NOUVEAU_GROBJ_UNBOUND)
nouveau_grobj_autobind(gr);
chan->subc[gr->subc].sequence = chan->subc_sequence++;
WAIT_RING(chan, size + 1);
OUT_RING(chan, (gr->subc << 13) | (size << 18) | mthd);
chan->pushbuf->remaining -= (size + 1);
}
static __inline__ void
FIRE_RING(struct nouveau_channel *chan)
{
nouveau_pushbuf_flush(chan, 0);
}
static __inline__ void
BIND_RING(struct nouveau_channel *chan, struct nouveau_grobj *gr, unsigned sc)
{
struct nouveau_subchannel *subc = &gr->channel->subc[sc];
if (subc->gr) {
if (subc->gr->bound == NOUVEAU_GROBJ_BOUND_EXPLICIT)
assert(0);
subc->gr->bound = NOUVEAU_GROBJ_UNBOUND;
}
subc->gr = gr;
subc->gr->subc = sc;
subc->gr->bound = NOUVEAU_GROBJ_BOUND_EXPLICIT;
BEGIN_RING(chan, gr, 0x0000, 1);
OUT_RING (chan, gr->handle);
}
static __inline__ void
OUT_RELOC(struct nouveau_channel *chan, struct nouveau_bo *bo,
unsigned data, unsigned flags, unsigned vor, unsigned tor)
{
nouveau_pushbuf_emit_reloc(chan, chan->pushbuf->cur++, bo,
data, flags, vor, tor);
}
/* Raw data + flags depending on FB/TT buffer */
static __inline__ void
OUT_RELOCd(struct nouveau_channel *chan, struct nouveau_bo *bo,
unsigned data, unsigned flags, unsigned vor, unsigned tor)
{
OUT_RELOC(chan, bo, data, flags | NOUVEAU_BO_OR, vor, tor);
}
/* FB/TT object handle */
static __inline__ void
OUT_RELOCo(struct nouveau_channel *chan, struct nouveau_bo *bo,
unsigned flags)
{
OUT_RELOC(chan, bo, 0, flags | NOUVEAU_BO_OR,
chan->vram->handle, chan->gart->handle);
}
/* Low 32-bits of offset */
static __inline__ void
OUT_RELOCl(struct nouveau_channel *chan, struct nouveau_bo *bo,
unsigned delta, unsigned flags)
{
OUT_RELOC(chan, bo, delta, flags | NOUVEAU_BO_LOW, 0, 0);
}
/* High 32-bits of offset */
static __inline__ void
OUT_RELOCh(struct nouveau_channel *chan, struct nouveau_bo *bo,
unsigned delta, unsigned flags)
{
OUT_RELOC(chan, bo, delta, flags | NOUVEAU_BO_HIGH, 0, 0);
}
#endif

View file

@ -0,0 +1,115 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <stdlib.h>
#include <errno.h>
#include "nouveau_private.h"
int
nouveau_resource_init(struct nouveau_resource **heap,
unsigned start, unsigned size)
{
struct nouveau_resource *r;
r = calloc(1, sizeof(struct nouveau_resource));
if (!r)
return 1;
r->start = start;
r->size = size;
*heap = r;
return 0;
}
int
nouveau_resource_alloc(struct nouveau_resource *heap, unsigned size, void *priv,
struct nouveau_resource **res)
{
struct nouveau_resource *r;
if (!heap || !size || !res || *res)
return 1;
while (heap) {
if (!heap->in_use && heap->size >= size) {
r = calloc(1, sizeof(struct nouveau_resource));
if (!r)
return 1;
r->start = (heap->start + heap->size) - size;
r->size = size;
r->in_use = 1;
r->priv = priv;
heap->size -= size;
r->next = heap->next;
if (heap->next)
heap->next->prev = r;
r->prev = heap;
heap->next = r;
*res = r;
return 0;
}
heap = heap->next;
}
return 1;
}
void
nouveau_resource_free(struct nouveau_resource **res)
{
struct nouveau_resource *r;
if (!res || !*res)
return;
r = *res;
*res = NULL;
r->in_use = 0;
if (r->next && !r->next->in_use) {
struct nouveau_resource *new = r->next;
new->prev = r->prev;
if (r->prev)
r->prev->next = new;
new->size += r->size;
new->start = r->start;
free(r);
r = new;
}
if (r->prev && !r->prev->in_use) {
r->prev->next = r->next;
if (r->next)
r->next->prev = r->prev;
r->prev->size += r->size;
free(r);
}
}

View file

@ -0,0 +1,48 @@
/*
* Copyright 2007 Nouveau Project
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NOUVEAU_RESOURCE_H__
#define __NOUVEAU_RESOURCE_H__
struct nouveau_resource {
struct nouveau_resource *prev;
struct nouveau_resource *next;
int in_use;
void *priv;
unsigned int start;
unsigned int size;
};
int
nouveau_resource_init(struct nouveau_resource **heap, unsigned start,
unsigned size);
int
nouveau_resource_alloc(struct nouveau_resource *heap, unsigned size, void *priv,
struct nouveau_resource **);
void
nouveau_resource_free(struct nouveau_resource **);
#endif

View file

@ -42,6 +42,7 @@
#include <fcntl.h>
#include <errno.h>
#include <signal.h>
#include <time.h>
#include <sys/types.h>
#include <sys/stat.h>
#define stat_t struct stat
@ -1896,13 +1897,33 @@ int drmScatterGatherFree(int fd, drm_handle_t handle)
*/
int drmWaitVBlank(int fd, drmVBlankPtr vbl)
{
struct timespec timeout, cur;
int ret;
ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
if (ret < 0) {
fprintf(stderr, "clock_gettime failed: %s\n", strerror(ret));
goto out;
}
timeout.tv_sec++;
do {
ret = drmIoctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
vbl->request.type &= ~DRM_VBLANK_RELATIVE;
if (ret && errno == EINTR) {
clock_gettime(CLOCK_MONOTONIC, &cur);
/* Timeout after 1s */
if (cur.tv_sec > timeout.tv_sec + 1 ||
(cur.tv_sec == timeout.tv_sec && cur.tv_nsec >=
timeout.tv_nsec)) {
errno = EBUSY;
ret = -1;
break;
}
}
} while (ret && errno == EINTR);
out:
return ret;
}

View file

@ -325,28 +325,28 @@ typedef struct _drmSetVersion {
#elif defined(__alpha__)
#define DRM_CAS(lock, old, new, ret) \
do { \
int old32; \
int cur32; \
__asm__ __volatile__( \
" mb\n" \
" zap %4, 0xF0, %0\n" \
" ldl_l %1, %2\n" \
" zap %1, 0xF0, %1\n" \
" cmpeq %0, %1, %1\n" \
" beq %1, 1f\n" \
" bis %5, %5, %1\n" \
" stl_c %1, %2\n" \
"1: xor %1, 1, %1\n" \
" stl %1, %3" \
: "=r" (old32), \
"=&r" (cur32), \
"=m" (__drm_dummy_lock(lock)),\
"=m" (ret) \
: "r" (old), \
"r" (new)); \
} while(0)
#define DRM_CAS(lock, old, new, ret) \
do { \
int tmp, old32; \
__asm__ __volatile__( \
" addl $31, %5, %3\n" \
"1: ldl_l %0, %2\n" \
" cmpeq %0, %3, %1\n" \
" beq %1, 2f\n" \
" mov %4, %0\n" \
" stl_c %0, %2\n" \
" beq %0, 3f\n" \
" mb\n" \
"2: cmpeq %1, 0, %1\n" \
".subsection 2\n" \
"3: br 1b\n" \
".previous" \
: "=&r"(tmp), "=&r"(ret), \
"=m"(__drm_dummy_lock(lock)), \
"=&r"(old32) \
: "r"(new), "r"(old) \
: "memory"); \
} while (0)
#elif defined(__sparc__)
@ -429,7 +429,9 @@ do { register unsigned int __old __asm("o0"); \
#define DRM_CAS(lock,old,new,ret) do { ret=1; } while (0) /* FAST LOCK FAILS */
#endif
#if defined(__alpha__) || defined(__powerpc__)
#if defined(__alpha__)
#define DRM_CAS_RESULT(_result) long _result
#elif defined(__powerpc__)
#define DRM_CAS_RESULT(_result) int _result
#else
#define DRM_CAS_RESULT(_result) char _result

View file

@ -76,7 +76,7 @@ void* drmAllocCpy(void *array, int count, int entry_size)
* A couple of free functions.
*/
void drmModeFreeModeInfo(struct drm_mode_modeinfo *ptr)
void drmModeFreeModeInfo(drmModeModeInfoPtr ptr)
{
if (!ptr)
return;
@ -273,7 +273,7 @@ drmModeCrtcPtr drmModeGetCrtc(int fd, uint32_t crtcId)
int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId,
uint32_t x, uint32_t y, uint32_t *connectors, int count,
struct drm_mode_modeinfo *mode)
drmModeModeInfoPtr mode)
{
struct drm_mode_crtc crtc;
@ -419,7 +419,7 @@ err_allocs:
return r;
}
int drmModeAttachMode(int fd, uint32_t connector_id, struct drm_mode_modeinfo *mode_info)
int drmModeAttachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_info)
{
struct drm_mode_mode_cmd res;
@ -429,7 +429,7 @@ int drmModeAttachMode(int fd, uint32_t connector_id, struct drm_mode_modeinfo *m
return drmIoctl(fd, DRM_IOCTL_MODE_ATTACHMODE, &res);
}
int drmModeDetachMode(int fd, uint32_t connector_id, struct drm_mode_modeinfo *mode_info)
int drmModeDetachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_info)
{
struct drm_mode_mode_cmd res;
@ -628,27 +628,6 @@ int drmCheckModesettingSupported(const char *busid)
}
int drmModeReplaceFB(int fd, uint32_t buffer_id,
uint32_t width, uint32_t height, uint8_t depth,
uint8_t bpp, uint32_t pitch, uint32_t bo_handle)
{
struct drm_mode_fb_cmd f;
int ret;
f.width = width;
f.height = height;
f.pitch = pitch;
f.bpp = bpp;
f.depth = depth;
f.handle = bo_handle;
f.fb_id = buffer_id;
if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_REPLACEFB, &f)))
return ret;
return 0;
}
int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
uint16_t *red, uint16_t *green, uint16_t *blue)
{

View file

@ -52,6 +52,99 @@
* buffer object interface. This object needs to be pinned.
*/
/*
* If we pickup an old version of drm.h which doesn't include drm_mode.h
* we should redefine defines. This is so that builds doesn't breaks with
* new libdrm on old kernels.
*/
#ifndef _DRM_MODE_H
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
#define DRM_MODE_TYPE_BUILTIN (1<<0)
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_PREFERRED (1<<3)
#define DRM_MODE_TYPE_DEFAULT (1<<4)
#define DRM_MODE_TYPE_USERDEF (1<<5)
#define DRM_MODE_TYPE_DRIVER (1<<6)
/* Video mode flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2)
#define DRM_MODE_FLAG_NVSYNC (1<<3)
#define DRM_MODE_FLAG_INTERLACE (1<<4)
#define DRM_MODE_FLAG_DBLSCAN (1<<5)
#define DRM_MODE_FLAG_CSYNC (1<<6)
#define DRM_MODE_FLAG_PCSYNC (1<<7)
#define DRM_MODE_FLAG_NCSYNC (1<<8)
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
#define DRM_MODE_FLAG_BCAST (1<<10)
#define DRM_MODE_FLAG_PIXMUX (1<<11)
#define DRM_MODE_FLAG_DBLCLK (1<<12)
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
/* DPMS flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_DPMS_ON 0
#define DRM_MODE_DPMS_STANDBY 1
#define DRM_MODE_DPMS_SUSPEND 2
#define DRM_MODE_DPMS_OFF 3
/* Scaling mode options */
#define DRM_MODE_SCALE_NON_GPU 0
#define DRM_MODE_SCALE_FULLSCREEN 1
#define DRM_MODE_SCALE_NO_SCALE 2
#define DRM_MODE_SCALE_ASPECT 3
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
#define DRM_MODE_ENCODER_NONE 0
#define DRM_MODE_ENCODER_DAC 1
#define DRM_MODE_ENCODER_TMDS 2
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
#define DRM_MODE_SUBCONNECTOR_Automatic 0
#define DRM_MODE_SUBCONNECTOR_Unknown 0
#define DRM_MODE_SUBCONNECTOR_DVID 3
#define DRM_MODE_SUBCONNECTOR_DVIA 4
#define DRM_MODE_SUBCONNECTOR_Composite 5
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
#define DRM_MODE_SUBCONNECTOR_Component 8
#define DRM_MODE_CONNECTOR_Unknown 0
#define DRM_MODE_CONNECTOR_VGA 1
#define DRM_MODE_CONNECTOR_DVII 2
#define DRM_MODE_CONNECTOR_DVID 3
#define DRM_MODE_CONNECTOR_DVIA 4
#define DRM_MODE_CONNECTOR_Composite 5
#define DRM_MODE_CONNECTOR_SVIDEO 6
#define DRM_MODE_CONNECTOR_LVDS 7
#define DRM_MODE_CONNECTOR_Component 8
#define DRM_MODE_CONNECTOR_9PinDIN 9
#define DRM_MODE_CONNECTOR_DisplayPort 10
#define DRM_MODE_CONNECTOR_HDMIA 11
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_PROP_PENDING (1<<0)
#define DRM_MODE_PROP_RANGE (1<<1)
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
#define DRM_MODE_CURSOR_BO (1<<0)
#define DRM_MODE_CURSOR_MOVE (1<<1)
#endif /* _DRM_MODE_H */
typedef struct _drmModeRes {
int count_fbs;
@ -70,7 +163,27 @@ typedef struct _drmModeRes {
uint32_t min_height, max_height;
} drmModeRes, *drmModeResPtr;
typedef struct drm_mode_fb_cmd drmModeFB, *drmModeFBPtr;
typedef struct _drmModeModeInfo {
uint32_t clock;
uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew;
uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan;
uint32_t vrefresh; /* vertical refresh * 1000 */
uint32_t flags;
uint32_t type;
char name[DRM_DISPLAY_MODE_LEN];
} drmModeModeInfo, *drmModeModeInfoPtr;
typedef struct _drmModeFB {
uint32_t fb_id;
uint32_t width, height;
uint32_t pitch;
uint32_t bpp;
uint32_t depth;
/* driver specific handle */
uint32_t handle;
} drmModeFB, *drmModeFBPtr;
typedef struct _drmModePropertyBlob {
uint32_t id;
@ -97,7 +210,7 @@ typedef struct _drmModeCrtc {
uint32_t x, y; /**< Position on the framebuffer */
uint32_t width, height;
int mode_valid;
struct drm_mode_modeinfo mode;
drmModeModeInfo mode;
int gamma_size; /**< Number of gamma stops */
@ -136,7 +249,7 @@ typedef struct _drmModeConnector {
drmModeSubPixel subpixel;
int count_modes;
struct drm_mode_modeinfo *modes;
drmModeModeInfoPtr modes;
int count_props;
uint32_t *props; /**< List of property ids */
@ -148,7 +261,7 @@ typedef struct _drmModeConnector {
extern void drmModeFreeModeInfo( struct drm_mode_modeinfo *ptr );
extern void drmModeFreeModeInfo( drmModeModeInfoPtr ptr );
extern void drmModeFreeResources( drmModeResPtr ptr );
extern void drmModeFreeFB( drmModeFBPtr ptr );
extern void drmModeFreeCrtc( drmModeCrtcPtr ptr );
@ -180,13 +293,6 @@ extern int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
*/
extern int drmModeRmFB(int fd, uint32_t bufferId);
/**
* Replace a framebuffer object with a new one - for resizing the screen.
*/
extern int drmModeReplaceFB(int fd, uint32_t buffer_id,
uint32_t width, uint32_t height, uint8_t depth,
uint8_t bpp, uint32_t pitch, uint32_t bo_handle);
/*
* Crtc functions
*/
@ -201,7 +307,7 @@ extern drmModeCrtcPtr drmModeGetCrtc(int fd, uint32_t crtcId);
*/
int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId,
uint32_t x, uint32_t y, uint32_t *connectors, int count,
struct drm_mode_modeinfo *mode);
drmModeModeInfoPtr mode);
/*
* Cursor functions
@ -235,13 +341,13 @@ extern drmModeConnectorPtr drmModeGetConnector(int fd,
/**
* Attaches the given mode to an connector.
*/
extern int drmModeAttachMode(int fd, uint32_t connectorId, struct drm_mode_modeinfo *mode_info);
extern int drmModeAttachMode(int fd, uint32_t connectorId, drmModeModeInfoPtr mode_info);
/**
* Detaches a mode from the connector
* must be unused, by the given mode.
*/
extern int drmModeDetachMode(int fd, uint32_t connectorId, struct drm_mode_modeinfo *mode_info);
extern int drmModeDetachMode(int fd, uint32_t connectorId, drmModeModeInfoPtr mode_info);
extern drmModePropertyPtr drmModeGetProperty(int fd, uint32_t propertyId);
extern void drmModeFreeProperty(drmModePropertyPtr ptr);

10
libdrm_intel.pc.in Normal file
View file

@ -0,0 +1,10 @@
prefix=@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
includedir=@includedir@
Name: libdrm
Description: Userspace interface to kernel DRM services
Version: @PACKAGE_VERSION@
Libs: -L${libdir} -ldrm -ldrm_intel
Cflags: -I${includedir} -I${includedir}/drm

View file

@ -48,21 +48,6 @@ config DRM_I810
selected, the module will be called i810. AGP support is required
for this driver to work.
choice
prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
depends on DRM && AGP && AGP_INTEL
optional
config DRM_I915
tristate "i915 driver"
help
Choose this option if you have a system that has Intel 830M, 845G,
852GM, 855GM, 865G, 915G, 915GM, 945G, 945GM and 965G integrated
graphics. If M is selected, the module will be called i915.
AGP support is required for this driver to work.
endchoice
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM && (!X86_64 || BROKEN) && (!PPC || BROKEN)

View file

@ -58,7 +58,7 @@ endif
# Modules for all architectures
MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
mach64.o nv.o nouveau.o xgi.o
mach64.o nouveau.o xgi.o
# Modules only for ix86 architectures
ifneq (,$(findstring 86,$(MACHINE)))
@ -83,19 +83,15 @@ R128HEADERS = r128_drv.h r128_drm.h $(DRMHEADERS)
RADEONHEADERS = radeon_drv.h radeon_drm.h r300_reg.h $(DRMHEADERS)
MGAHEADERS = mga_drv.h mga_drm.h mga_ucode.h $(DRMHEADERS)
I810HEADERS = i810_drv.h i810_drm.h $(DRMHEADERS)
I915HEADERS = i915_drv.h i915_drm.h $(DRMHEADERS)
SISHEADERS= sis_drv.h sis_drm.h drm_hashtab.h drm_sman.h $(DRMHEADERS)
SAVAGEHEADERS= savage_drv.h savage_drm.h $(DRMHEADERS)
VIAHEADERS = via_drm.h via_drv.h via_3d_reg.h via_verifier.h $(DRMHEADERS)
MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
NVHEADERS = nv_drv.h $(DRMHEADERS)
FFBHEADERS = ffb_drv.h $(DRMHEADERS)
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS)
PROGS = dristat drmstat
CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c drm_pciids.h .tmp_versions
CLEANFILES = *.o *.ko .depend .*.flags .*.d .*.cmd *.mod.c drm_pciids.h .tmp_versions
# VERSION is not defined from the initial invocation. It is defined when
# this Makefile is invoked from the kernel's root Makefile.
@ -221,23 +217,6 @@ clean cleandir:
$(MODULE_LIST)::
make DRM_MODULES=$@ modules
# Build test utilities
PRGCFLAGS = $(CFLAGS) -g -ansi -pedantic -DPOSIX_C_SOURCE=199309L \
-D_POSIX_SOURCE -D_XOPEN_SOURCE -D_BSD_SOURCE -D_SVID_SOURCE \
-I. -I../../..
DRMSTATLIBS = -L../../.. -L.. -ldrm -lxf86_os \
-L../../../../dummylib -ldummy -lm
programs: $(PROGS)
dristat: dristat.c
$(CC) $(PRGCFLAGS) $< -o $@
drmstat: drmstat.c
$(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS)
install:
make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules_install
@ -286,7 +265,6 @@ CONFIG_DRM_FFB := n
CONFIG_DRM_SAVAGE := n
CONFIG_DRM_VIA := n
CONFIG_DRM_MACH64 := n
CONFIG_DRM_NV := n
CONFIG_DRM_NOUVEAU := n
CONFIG_DRM_XGI := n
@ -319,9 +297,6 @@ endif
ifneq (,$(findstring mga,$(DRM_MODULES)))
CONFIG_DRM_MGA := m
endif
ifneq (,$(findstring nv,$(DRM_MODULES)))
CONFIG_DRM_NV := m
endif
ifneq (,$(findstring nouveau,$(DRM_MODULES)))
CONFIG_DRM_NOUVEAU := m
endif
@ -354,7 +329,6 @@ $(ffb-objs): $(FFBHEADERS)
$(savage-objs): $(SAVAGEHEADERS)
$(via-objs): $(VIAHEADERS)
$(mach64-objs): $(MACH64HEADERS)
$(nv-objs): $(NVHEADERS)
$(nouveau-objs): $(NOUVEAUHEADERS)
$(xgi-objs): $(XGIHEADERS)

Some files were not shown because too many files have changed in this diff Show more