Merge branch 'xgi-0-0-2'

This commit is contained in:
Ian Romanick 2007-09-06 15:37:52 -07:00
commit 54c96cbc46
16 changed files with 2138 additions and 2 deletions

View file

@ -58,7 +58,7 @@ endif
# Modules for all architectures
MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
mach64.o nv.o nouveau.o
mach64.o nv.o nouveau.o xgi.o
# Modules only for ix86 architectures
ifneq (,$(findstring 86,$(MACHINE)))
@ -91,6 +91,7 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
NVHEADERS = nv_drv.h $(DRMHEADERS)
FFBHEADERS = ffb_drv.h $(DRMHEADERS)
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS)
PROGS = dristat drmstat
@ -284,6 +285,7 @@ CONFIG_DRM_VIA := n
CONFIG_DRM_MACH64 := n
CONFIG_DRM_NV := n
CONFIG_DRM_NOUVEAU := n
CONFIG_DRM_XGI := n
# Enable module builds for the modules requested/supported.
@ -320,6 +322,9 @@ endif
ifneq (,$(findstring nouveau,$(DRM_MODULES)))
CONFIG_DRM_NOUVEAU := m
endif
ifneq (,$(findstring xgi,$(DRM_MODULES)))
CONFIG_DRM_XGI := m
endif
# These require AGP support
@ -347,6 +352,7 @@ $(via-objs): $(VIAHEADERS)
$(mach64-objs): $(MACH64HEADERS)
$(nv-objs): $(NVHEADERS)
$(nouveau-objs): $(NOUVEAUHEADERS)
$(xgi-objs): $(XGIHEADERS)
endif

View file

@ -38,6 +38,8 @@ via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
via_video.o via_dmablit.o via_fence.o via_buffer.o
mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
nv-objs := nv_drv.o
xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \
xgi_fence.o
ifeq ($(CONFIG_COMPAT),y)
drm-objs += drm_ioc32.o
@ -62,3 +64,4 @@ obj-$(CONFIG_DRM_VIA) += via.o
obj-$(CONFIG_DRM_MACH64)+= mach64.o
obj-$(CONFIG_DRM_NV) += nv.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
obj-$(CONFIG_DRM_XGI) += xgi.o

322
linux-core/xgi_cmdlist.c Normal file
View file

@ -0,0 +1,322 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#include "xgi_drv.h"
#include "xgi_regs.h"
#include "xgi_misc.h"
#include "xgi_cmdlist.h"
static void xgi_emit_flush(struct xgi_info * info, bool stop);
static void xgi_emit_nop(struct xgi_info * info);
static unsigned int get_batch_command(enum xgi_batch_type type);
static void triggerHWCommandList(struct xgi_info * info);
static void xgi_cmdlist_reset(struct xgi_info * info);
/**
* Graphic engine register (2d/3d) acessing interface
*/
static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data)
{
#ifdef XGI_MMIO_DEBUG
DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n",
map->handle, addr, data);
#endif
DRM_WRITE32(map, addr, data);
}
int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
struct drm_file * filp)
{
struct xgi_mem_alloc mem_alloc = {
.location = XGI_MEMLOC_NON_LOCAL,
.size = size,
};
int err;
err = xgi_alloc(info, &mem_alloc, filp);
if (err) {
return err;
}
info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr);
info->cmdring.size = mem_alloc.size;
info->cmdring.ring_hw_base = mem_alloc.hw_addr;
info->cmdring.last_ptr = NULL;
info->cmdring.ring_offset = 0;
return 0;
}
/**
* get_batch_command - Get the command ID for the current begin type.
* @type: Type of the current batch
*
* See section 3.2.2 "Begin" (page 15) of the 3D SPG.
*
* This function assumes that @type is on the range [0,3].
*/
unsigned int get_batch_command(enum xgi_batch_type type)
{
static const unsigned int ports[4] = {
0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
};
return ports[type];
}
int xgi_submit_cmdlist(struct drm_device * dev, void * data,
struct drm_file * filp)
{
struct xgi_info *const info = dev->dev_private;
const struct xgi_cmd_info *const pCmdInfo =
(struct xgi_cmd_info *) data;
const unsigned int cmd = get_batch_command(pCmdInfo->type);
u32 begin[4];
begin[0] = (cmd << 24) | BEGIN_VALID_MASK
| (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence);
begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size;
begin[2] = pCmdInfo->hw_addr >> 4;
begin[3] = 0;
if (info->cmdring.last_ptr == NULL) {
const unsigned int portOffset = BASE_3D_ENG + (cmd << 2);
/* Enable PCI Trigger Mode
*/
dwWriteReg(info->mmio_map,
BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
(M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
M2REG_CLEAR_COUNTERS_MASK | 0x08 |
M2REG_PCI_TRIGGER_MODE_MASK);
dwWriteReg(info->mmio_map,
BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
(M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
M2REG_PCI_TRIGGER_MODE_MASK);
/* Send PCI begin command
*/
dwWriteReg(info->mmio_map, portOffset, begin[0]);
dwWriteReg(info->mmio_map, portOffset + 4, begin[1]);
dwWriteReg(info->mmio_map, portOffset + 8, begin[2]);
dwWriteReg(info->mmio_map, portOffset + 12, begin[3]);
} else {
DRM_DEBUG("info->cmdring.last_ptr != NULL\n");
if (pCmdInfo->type == BTYPE_3D) {
xgi_emit_flush(info, FALSE);
}
info->cmdring.last_ptr[1] = begin[1];
info->cmdring.last_ptr[2] = begin[2];
info->cmdring.last_ptr[3] = begin[3];
DRM_WRITEMEMORYBARRIER();
info->cmdring.last_ptr[0] = begin[0];
triggerHWCommandList(info);
}
info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr);
drm_fence_flush_old(info->dev, 0, info->next_sequence);
return 0;
}
/*
state: 0 - console
1 - graphic
2 - fb
3 - logout
*/
int xgi_state_change(struct xgi_info * info, unsigned int to,
unsigned int from)
{
#define STATE_CONSOLE 0
#define STATE_GRAPHIC 1
#define STATE_FBTERM 2
#define STATE_LOGOUT 3
#define STATE_REBOOT 4
#define STATE_SHUTDOWN 5
if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) {
DRM_INFO("Leaving graphical mode (probably VT switch)\n");
} else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) {
DRM_INFO("Entering graphical mode (probably VT switch)\n");
xgi_cmdlist_reset(info);
} else if ((from == STATE_GRAPHIC)
&& ((to == STATE_LOGOUT)
|| (to == STATE_REBOOT)
|| (to == STATE_SHUTDOWN))) {
DRM_INFO("Leaving graphical mode (probably X shutting down)\n");
} else {
DRM_ERROR("Invalid state change.\n");
return -EINVAL;
}
return 0;
}
int xgi_state_change_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp)
{
struct xgi_state_info *const state =
(struct xgi_state_info *) data;
struct xgi_info *info = dev->dev_private;
return xgi_state_change(info, state->_toState, state->_fromState);
}
void xgi_cmdlist_reset(struct xgi_info * info)
{
info->cmdring.last_ptr = NULL;
info->cmdring.ring_offset = 0;
}
void xgi_cmdlist_cleanup(struct xgi_info * info)
{
if (info->cmdring.ring_hw_base != 0) {
/* If command lists have been issued, terminate the command
* list chain with a flush command.
*/
if (info->cmdring.last_ptr != NULL) {
xgi_emit_flush(info, FALSE);
xgi_emit_nop(info);
}
xgi_waitfor_pci_idle(info);
(void) memset(&info->cmdring, 0, sizeof(info->cmdring));
}
}
static void triggerHWCommandList(struct xgi_info * info)
{
static unsigned int s_triggerID = 1;
dwWriteReg(info->mmio_map,
BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
0x05000000 + (0x0ffff & s_triggerID++));
}
/**
* Emit a flush to the CRTL command stream.
* @info XGI info structure
*
* This function assumes info->cmdring.ptr is non-NULL.
*/
void xgi_emit_flush(struct xgi_info * info, bool stop)
{
const u32 flush_command[8] = {
((0x10 << 24)
| (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)),
BEGIN_LINK_ENABLE_MASK | (0x00004),
0x00000000, 0x00000000,
/* Flush the 2D engine with the default 32 clock delay.
*/
M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
};
const unsigned int flush_size = sizeof(flush_command);
u32 *batch_addr;
u32 hw_addr;
/* check buf is large enough to contain a new flush batch */
if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) {
info->cmdring.ring_offset = 0;
}
hw_addr = info->cmdring.ring_hw_base
+ info->cmdring.ring_offset;
batch_addr = info->cmdring.ptr
+ (info->cmdring.ring_offset / 4);
(void) memcpy(batch_addr, flush_command, flush_size);
if (stop) {
*batch_addr |= BEGIN_STOP_STORE_CURRENT_POINTER_MASK;
}
info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4);
info->cmdring.last_ptr[2] = hw_addr >> 4;
info->cmdring.last_ptr[3] = 0;
DRM_WRITEMEMORYBARRIER();
info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24)
| (BEGIN_VALID_MASK);
triggerHWCommandList(info);
info->cmdring.ring_offset += flush_size;
info->cmdring.last_ptr = batch_addr;
}
/**
* Emit an empty command to the CRTL command stream.
* @info XGI info structure
*
* This function assumes info->cmdring.ptr is non-NULL. In addition, since
* this function emits a command that does not have linkage information,
* it sets info->cmdring.ptr to NULL.
*/
void xgi_emit_nop(struct xgi_info * info)
{
info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK
| (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence);
info->cmdring.last_ptr[2] = 0;
info->cmdring.last_ptr[3] = 0;
DRM_WRITEMEMORYBARRIER();
info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24)
| (BEGIN_VALID_MASK);
triggerHWCommandList(info);
info->cmdring.last_ptr = NULL;
}
void xgi_emit_irq(struct xgi_info * info)
{
if (info->cmdring.last_ptr == NULL)
return;
xgi_emit_flush(info, TRUE);
}

66
linux-core/xgi_cmdlist.h Normal file
View file

@ -0,0 +1,66 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#ifndef _XGI_CMDLIST_H_
#define _XGI_CMDLIST_H_
struct xgi_cmdring_info {
/**
* Kernel space pointer to the base of the command ring.
*/
u32 * ptr;
/**
* Size, in bytes, of the command ring.
*/
unsigned int size;
/**
* Base address of the command ring from the hardware's PoV.
*/
unsigned int ring_hw_base;
u32 * last_ptr;
/**
* Offset, in bytes, from the start of the ring to the next available
* location to store a command.
*/
unsigned int ring_offset;
};
struct xgi_info;
extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
struct drm_file * filp);
extern int xgi_state_change(struct xgi_info * info, unsigned int to,
unsigned int from);
extern void xgi_cmdlist_cleanup(struct xgi_info * info);
extern void xgi_emit_irq(struct xgi_info * info);
#endif /* _XGI_CMDLIST_H_ */

1
linux-core/xgi_drm.h Symbolic link
View file

@ -0,0 +1 @@
../shared-core/xgi_drm.h

428
linux-core/xgi_drv.c Normal file
View file

@ -0,0 +1,428 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#include "drmP.h"
#include "drm.h"
#include "xgi_drv.h"
#include "xgi_regs.h"
#include "xgi_misc.h"
#include "xgi_cmdlist.h"
#include "drm_pciids.h"
static struct pci_device_id pciidlist[] = {
xgi_PCI_IDS
};
static struct drm_fence_driver xgi_fence_driver = {
.num_classes = 1,
.wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK,
.flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1,
.sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK,
.lazy_capable = 1,
.emit = xgi_fence_emit_sequence,
.poke_flush = xgi_poke_flush,
.has_irq = xgi_fence_has_irq
};
static int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
static struct drm_ioctl_desc xgi_ioctls[] = {
DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH),
DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER),
};
static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls);
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static int xgi_driver_load(struct drm_device *dev, unsigned long flags);
static int xgi_driver_unload(struct drm_device *dev);
static void xgi_driver_lastclose(struct drm_device * dev);
static void xgi_reclaim_buffers_locked(struct drm_device * dev,
struct drm_file * filp);
static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS);
static struct drm_driver driver = {
.driver_features =
DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED | DRIVER_SG,
.dev_priv_size = sizeof(struct xgi_info),
.load = xgi_driver_load,
.unload = xgi_driver_unload,
.lastclose = xgi_driver_lastclose,
.dma_quiescent = NULL,
.irq_preinstall = NULL,
.irq_postinstall = NULL,
.irq_uninstall = NULL,
.irq_handler = xgi_kern_isr,
.reclaim_buffers = drm_core_reclaim_buffers,
.reclaim_buffers_idlelocked = xgi_reclaim_buffers_locked,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = xgi_ioctls,
.dma_ioctl = NULL,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
},
.fence_driver = &xgi_fence_driver,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_dev(pdev, ent, &driver);
}
static int __init xgi_init(void)
{
driver.num_ioctls = xgi_max_ioctl;
return drm_init(&driver, pciidlist);
}
static void __exit xgi_exit(void)
{
drm_exit(&driver);
}
module_init(xgi_init);
module_exit(xgi_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
void xgi_engine_init(struct xgi_info * info)
{
u8 temp;
OUT3C5B(info->mmio_map, 0x11, 0x92);
/* -------> copy from OT2D
* PCI Retry Control Register.
* disable PCI read retry & enable write retry in mem. (10xx xxxx)b
*/
temp = IN3X5B(info->mmio_map, 0x55);
OUT3X5B(info->mmio_map, 0x55, (temp & 0xbf) | 0x80);
xgi_enable_ge(info);
/* Enable linear addressing of the card. */
temp = IN3X5B(info->mmio_map, 0x21);
OUT3X5B(info->mmio_map, 0x21, temp | 0x20);
/* Enable 32-bit internal data path */
temp = IN3X5B(info->mmio_map, 0x2A);
OUT3X5B(info->mmio_map, 0x2A, temp | 0x40);
/* Enable PCI burst write ,disable burst read and enable MMIO. */
/*
* 0x3D4.39 Enable PCI burst write, disable burst read and enable MMIO.
* 7 ---- Pixel Data Format 1: big endian 0: little endian
* 6 5 4 3---- Memory Data with Big Endian Format, BE[3:0]# with Big Endian Format
* 2 ---- PCI Burst Write Enable
* 1 ---- PCI Burst Read Enable
* 0 ---- MMIO Control
*/
temp = IN3X5B(info->mmio_map, 0x39);
OUT3X5B(info->mmio_map, 0x39, (temp | 0x05) & 0xfd);
/* enable GEIO decode */
/* temp = IN3X5B(info->mmio_map, 0x29);
* OUT3X5B(info->mmio_map, 0x29, temp | 0x08);
*/
/* Enable graphic engine I/O PCI retry function*/
/* temp = IN3X5B(info->mmio_map, 0x62);
* OUT3X5B(info->mmio_map, 0x62, temp | 0x50);
*/
/* protect all register except which protected by 3c5.0e.7 */
/* OUT3C5B(info->mmio_map, 0x11, 0x87); */
}
int xgi_bootstrap(struct drm_device * dev, void * data,
struct drm_file * filp)
{
struct xgi_info *info = dev->dev_private;
struct xgi_bootstrap * bs = (struct xgi_bootstrap *) data;
struct drm_map_list *maplist;
int err;
DRM_SPININIT(&info->fence_lock, "fence lock");
info->next_sequence = 0;
info->complete_sequence = 0;
if (info->mmio_map == NULL) {
err = drm_addmap(dev, info->mmio.base, info->mmio.size,
_DRM_REGISTERS, _DRM_KERNEL,
&info->mmio_map);
if (err) {
DRM_ERROR("Unable to map MMIO region: %d\n", err);
return err;
}
xgi_enable_mmio(info);
xgi_engine_init(info);
}
info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024;
DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n",
(unsigned long) info->fb.base, info->fb.size);
if ((info->fb.base == 0) || (info->fb.size == 0)) {
DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n",
(unsigned long) info->fb.base, info->fb.size);
return -EINVAL;
}
/* Init the resource manager */
if (!info->fb_heap_initialized) {
err = xgi_fb_heap_init(info);
if (err) {
DRM_ERROR("Unable to initialize FB heap.\n");
return err;
}
}
info->pcie.size = bs->gart.size;
/* Init the resource manager */
if (!info->pcie_heap_initialized) {
err = xgi_pcie_heap_init(info);
if (err) {
DRM_ERROR("Unable to initialize GART heap.\n");
return err;
}
/* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
err = xgi_cmdlist_initialize(info, 0x100000, filp);
if (err) {
DRM_ERROR("xgi_cmdlist_initialize() failed\n");
return err;
}
}
if (info->pcie_map == NULL) {
err = drm_addmap(info->dev, 0, info->pcie.size,
_DRM_SCATTER_GATHER, _DRM_LOCKED,
& info->pcie_map);
if (err) {
DRM_ERROR("Could not add map for GART backing "
"store.\n");
return err;
}
}
maplist = drm_find_matching_map(dev, info->pcie_map);
if (maplist == NULL) {
DRM_ERROR("Could not find GART backing store map.\n");
return -EINVAL;
}
bs->gart = *info->pcie_map;
bs->gart.handle = (void *)(unsigned long) maplist->user_token;
return 0;
}
void xgi_driver_lastclose(struct drm_device * dev)
{
struct xgi_info * info = dev->dev_private;
if (info != NULL) {
if (info->mmio_map != NULL) {
xgi_cmdlist_cleanup(info);
xgi_disable_ge(info);
xgi_disable_mmio(info);
}
/* The core DRM lastclose routine will destroy all of our
* mappings for us. NULL out the pointers here so that
* xgi_bootstrap can do the right thing.
*/
info->pcie_map = NULL;
info->mmio_map = NULL;
info->fb_map = NULL;
if (info->pcie_heap_initialized) {
drm_ati_pcigart_cleanup(dev, &info->gart_info);
}
if (info->fb_heap_initialized
|| info->pcie_heap_initialized) {
drm_sman_cleanup(&info->sman);
info->fb_heap_initialized = FALSE;
info->pcie_heap_initialized = FALSE;
}
}
}
void xgi_reclaim_buffers_locked(struct drm_device * dev,
struct drm_file * filp)
{
struct xgi_info * info = dev->dev_private;
mutex_lock(&info->dev->struct_mutex);
if (drm_sman_owner_clean(&info->sman, (unsigned long) filp)) {
mutex_unlock(&info->dev->struct_mutex);
return;
}
if (dev->driver->dma_quiescent) {
dev->driver->dma_quiescent(dev);
}
drm_sman_owner_cleanup(&info->sman, (unsigned long) filp);
mutex_unlock(&info->dev->struct_mutex);
return;
}
/*
* driver receives an interrupt if someone waiting, then hand it off.
*/
irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
struct xgi_info *info = dev->dev_private;
const u32 irq_bits = DRM_READ32(info->mmio_map,
(0x2800
+ M2REG_AUTO_LINK_STATUS_ADDRESS))
& (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
| M2REG_ACTIVE_INTERRUPT_0_MASK
| M2REG_ACTIVE_INTERRUPT_2_MASK
| M2REG_ACTIVE_INTERRUPT_3_MASK);
if (irq_bits != 0) {
DRM_WRITE32(info->mmio_map,
0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits);
xgi_fence_handler(dev);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
}
}
int xgi_driver_load(struct drm_device *dev, unsigned long flags)
{
struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER);
int err;
if (!info)
return -ENOMEM;
(void) memset(info, 0, sizeof(*info));
dev->dev_private = info;
info->dev = dev;
info->mmio.base = drm_get_resource_start(dev, 1);
info->mmio.size = drm_get_resource_len(dev, 1);
DRM_INFO("mmio base: 0x%lx, size: 0x%x\n",
(unsigned long) info->mmio.base, info->mmio.size);
if ((info->mmio.base == 0) || (info->mmio.size == 0)) {
DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n",
(unsigned long) info->mmio.base, info->mmio.size);
err = -EINVAL;
goto fail;
}
info->fb.base = drm_get_resource_start(dev, 0);
info->fb.size = drm_get_resource_len(dev, 0);
DRM_INFO("fb base: 0x%lx, size: 0x%x\n",
(unsigned long) info->fb.base, info->fb.size);
err = drm_sman_init(&info->sman, 2, 12, 8);
if (err) {
goto fail;
}
return 0;
fail:
drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
return err;
}
int xgi_driver_unload(struct drm_device *dev)
{
struct xgi_info * info = dev->dev_private;
drm_sman_takedown(&info->sman);
drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
dev->dev_private = NULL;
return 0;
}

114
linux-core/xgi_drv.h Normal file
View file

@ -0,0 +1,114 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#ifndef _XGI_DRV_H_
#define _XGI_DRV_H_
#include "drmP.h"
#include "drm.h"
#include "drm_sman.h"
#define DRIVER_AUTHOR "Andrea Zhang <andrea_zhang@macrosynergy.com>"
#define DRIVER_NAME "xgi"
#define DRIVER_DESC "XGI XP5 / XP10 / XG47"
#define DRIVER_DATE "20070906"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#include "xgi_cmdlist.h"
#include "xgi_drm.h"
struct xgi_aperture {
dma_addr_t base;
unsigned int size;
};
struct xgi_info {
struct drm_device *dev;
bool bootstrap_done;
/* physical characteristics */
struct xgi_aperture mmio;
struct xgi_aperture fb;
struct xgi_aperture pcie;
struct drm_map *mmio_map;
struct drm_map *pcie_map;
struct drm_map *fb_map;
/* look up table parameters */
struct ati_pcigart_info gart_info;
unsigned int lutPageSize;
struct drm_sman sman;
bool fb_heap_initialized;
bool pcie_heap_initialized;
struct xgi_cmdring_info cmdring;
DRM_SPINTYPE fence_lock;
unsigned complete_sequence;
unsigned next_sequence;
};
extern int xgi_fb_heap_init(struct xgi_info * info);
extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
struct drm_file * filp);
extern int xgi_free(struct xgi_info * info, unsigned long index,
struct drm_file * filp);
extern int xgi_pcie_heap_init(struct xgi_info * info);
extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address);
extern void xgi_enable_mmio(struct xgi_info * info);
extern void xgi_disable_mmio(struct xgi_info * info);
extern void xgi_enable_ge(struct xgi_info * info);
extern void xgi_disable_ge(struct xgi_info * info);
extern void xgi_poke_flush(struct drm_device * dev, uint32_t class);
extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
uint32_t flags, uint32_t * sequence, uint32_t * native_type);
extern void xgi_fence_handler(struct drm_device * dev);
extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class,
uint32_t flags);
extern int xgi_alloc_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp);
extern int xgi_free_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp);
extern int xgi_submit_cmdlist(struct drm_device * dev, void * data,
struct drm_file * filp);
extern int xgi_state_change_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp);
#endif

123
linux-core/xgi_fb.c Normal file
View file

@ -0,0 +1,123 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#include "xgi_drv.h"
#define XGI_FB_HEAP_START 0x1000000
int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
struct drm_file * filp)
{
struct drm_memblock_item *block;
const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)
? "on-card" : "GART";
if ((alloc->location != XGI_MEMLOC_LOCAL)
&& (alloc->location != XGI_MEMLOC_NON_LOCAL)) {
DRM_ERROR("Invalid memory pool (0x%08x) specified.\n",
alloc->location);
return -EINVAL;
}
if ((alloc->location == XGI_MEMLOC_LOCAL)
? !info->fb_heap_initialized : !info->pcie_heap_initialized) {
DRM_ERROR("Attempt to allocate from uninitialized memory "
"pool (0x%08x).\n", alloc->location);
return -EINVAL;
}
mutex_lock(&info->dev->struct_mutex);
block = drm_sman_alloc(&info->sman, alloc->location, alloc->size,
0, (unsigned long) filp);
mutex_unlock(&info->dev->struct_mutex);
if (block == NULL) {
alloc->size = 0;
DRM_ERROR("%s memory allocation failed\n", mem_name);
return -ENOMEM;
} else {
alloc->offset = (*block->mm->offset)(block->mm,
block->mm_info);
alloc->hw_addr = alloc->offset;
alloc->index = block->user_hash.key;
if (alloc->location == XGI_MEMLOC_NON_LOCAL) {
alloc->hw_addr += info->pcie.base;
}
DRM_DEBUG("%s memory allocation succeeded: 0x%x\n",
mem_name, alloc->offset);
}
return 0;
}
int xgi_alloc_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp)
{
struct xgi_info *info = dev->dev_private;
return xgi_alloc(info, (struct xgi_mem_alloc *) data, filp);
}
int xgi_free(struct xgi_info * info, unsigned long index,
struct drm_file * filp)
{
int err;
mutex_lock(&info->dev->struct_mutex);
err = drm_sman_free_key(&info->sman, index);
mutex_unlock(&info->dev->struct_mutex);
return err;
}
int xgi_free_ioctl(struct drm_device * dev, void * data,
struct drm_file * filp)
{
struct xgi_info *info = dev->dev_private;
return xgi_free(info, *(unsigned long *) data, filp);
}
int xgi_fb_heap_init(struct xgi_info * info)
{
int err;
mutex_lock(&info->dev->struct_mutex);
err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL,
XGI_FB_HEAP_START,
info->fb.size - XGI_FB_HEAP_START);
mutex_unlock(&info->dev->struct_mutex);
info->fb_heap_initialized = (err == 0);
return err;
}

127
linux-core/xgi_fence.c Normal file
View file

@ -0,0 +1,127 @@
/*
* (C) Copyright IBM Corporation 2007
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Ian Romanick <idr@us.ibm.com>
*/
#include "xgi_drv.h"
#include "xgi_regs.h"
#include "xgi_misc.h"
#include "xgi_cmdlist.h"
static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
{
struct xgi_info * info = dev->dev_private;
struct drm_fence_class_manager * fc = &dev->fm.class[class];
uint32_t pending_flush_types = 0;
uint32_t signaled_flush_types = 0;
if ((info == NULL) || (class != 0))
return 0;
DRM_SPINLOCK(&info->fence_lock);
pending_flush_types = fc->pending_flush |
((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
if (pending_flush_types) {
if (pending_flush_types & DRM_FENCE_TYPE_EXE) {
const u32 begin_id = DRM_READ32(info->mmio_map,
0x2820)
& BEGIN_BEGIN_IDENTIFICATION_MASK;
if (begin_id != info->complete_sequence) {
info->complete_sequence = begin_id;
signaled_flush_types |= DRM_FENCE_TYPE_EXE;
}
}
if (signaled_flush_types) {
drm_fence_handler(dev, 0, info->complete_sequence,
signaled_flush_types);
}
}
DRM_SPINUNLOCK(&info->fence_lock);
return fc->pending_flush |
((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
}
int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
uint32_t flags, uint32_t * sequence,
uint32_t * native_type)
{
struct xgi_info * info = dev->dev_private;
if ((info == NULL) || (class != 0))
return -EINVAL;
DRM_SPINLOCK(&info->fence_lock);
info->next_sequence++;
if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) {
info->next_sequence = 1;
}
DRM_SPINUNLOCK(&info->fence_lock);
xgi_emit_irq(info);
*sequence = (uint32_t) info->next_sequence;
*native_type = DRM_FENCE_TYPE_EXE;
return 0;
}
void xgi_poke_flush(struct drm_device * dev, uint32_t class)
{
struct drm_fence_manager * fm = &dev->fm;
unsigned long flags;
write_lock_irqsave(&fm->lock, flags);
xgi_do_flush(dev, class);
write_unlock_irqrestore(&fm->lock, flags);
}
void xgi_fence_handler(struct drm_device * dev)
{
struct drm_fence_manager * fm = &dev->fm;
write_lock(&fm->lock);
xgi_do_flush(dev, 0);
write_unlock(&fm->lock);
}
int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
{
return ((class == 0) && (flags == DRM_FENCE_TYPE_EXE)) ? 1 : 0;
}

477
linux-core/xgi_misc.c Normal file
View file

@ -0,0 +1,477 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#include "xgi_drv.h"
#include "xgi_regs.h"
#include <linux/delay.h>
/*
* irq functions
*/
#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff
static unsigned int s_invalid_begin = 0;
static bool xgi_validate_signal(struct drm_map * map)
{
if (DRM_READ32(map, 0x2800) & 0x001c0000) {
u16 check;
/* Check Read back status */
DRM_WRITE8(map, 0x235c, 0x80);
check = DRM_READ16(map, 0x2360);
if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
return FALSE;
}
/* Check RO channel */
DRM_WRITE8(map, 0x235c, 0x83);
check = DRM_READ16(map, 0x2360);
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
return FALSE;
}
/* Check RW channel */
DRM_WRITE8(map, 0x235c, 0x88);
check = DRM_READ16(map, 0x2360);
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
return FALSE;
}
/* Check RO channel outstanding */
DRM_WRITE8(map, 0x235c, 0x8f);
check = DRM_READ16(map, 0x2360);
if (0 != (check & 0x3ff)) {
return FALSE;
}
/* Check RW channel outstanding */
DRM_WRITE8(map, 0x235c, 0x90);
check = DRM_READ16(map, 0x2360);
if (0 != (check & 0x3ff)) {
return FALSE;
}
/* No pending PCIE request. GE stall. */
}
return TRUE;
}
static void xgi_ge_hang_reset(struct drm_map * map)
{
int time_out = 0xffff;
DRM_WRITE8(map, 0xb057, 8);
while (0 != (DRM_READ32(map, 0x2800) & 0xf0000000)) {
while (0 != ((--time_out) & 0xfff))
/* empty */ ;
if (0 == time_out) {
u8 old_3ce;
u8 old_3cf;
u8 old_index;
u8 old_36;
DRM_INFO("Can not reset back 0x%x!\n",
DRM_READ32(map, 0x2800));
DRM_WRITE8(map, 0xb057, 0);
/* Have to use 3x5.36 to reset. */
/* Save and close dynamic gating */
old_3ce = DRM_READ8(map, 0x3ce);
DRM_WRITE8(map, 0x3ce, 0x2a);
old_3cf = DRM_READ8(map, 0x3cf);
DRM_WRITE8(map, 0x3cf, old_3cf & 0xfe);
/* Reset GE */
old_index = DRM_READ8(map, 0x3d4);
DRM_WRITE8(map, 0x3d4, 0x36);
old_36 = DRM_READ8(map, 0x3d5);
DRM_WRITE8(map, 0x3d5, old_36 | 0x10);
while (0 != ((--time_out) & 0xfff))
/* empty */ ;
DRM_WRITE8(map, 0x3d5, old_36);
DRM_WRITE8(map, 0x3d4, old_index);
/* Restore dynamic gating */
DRM_WRITE8(map, 0x3cf, old_3cf);
DRM_WRITE8(map, 0x3ce, old_3ce);
break;
}
}
DRM_WRITE8(map, 0xb057, 0);
}
bool xgi_ge_irq_handler(struct xgi_info * info)
{
const u32 int_status = DRM_READ32(info->mmio_map, 0x2810);
bool is_support_auto_reset = FALSE;
/* Check GE on/off */
if (0 == (0xffffc0f0 & int_status)) {
if (0 != (0x1000 & int_status)) {
/* We got GE stall interrupt.
*/
DRM_WRITE32(info->mmio_map, 0x2810,
int_status | 0x04000000);
if (is_support_auto_reset) {
static cycles_t last_tick;
static unsigned continue_int_count = 0;
/* OE II is busy. */
if (!xgi_validate_signal(info->mmio_map)) {
/* Nothing but skip. */
} else if (0 == continue_int_count++) {
last_tick = get_cycles();
} else {
const cycles_t new_tick = get_cycles();
if ((new_tick - last_tick) >
STALL_INTERRUPT_RESET_THRESHOLD) {
continue_int_count = 0;
} else if (continue_int_count >= 3) {
continue_int_count = 0;
/* GE Hung up, need reset. */
DRM_INFO("Reset GE!\n");
xgi_ge_hang_reset(info->mmio_map);
}
}
}
} else if (0 != (0x1 & int_status)) {
s_invalid_begin++;
DRM_WRITE32(info->mmio_map, 0x2810,
(int_status & ~0x01) | 0x04000000);
}
return TRUE;
}
return FALSE;
}
bool xgi_crt_irq_handler(struct xgi_info * info)
{
bool ret = FALSE;
u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
/* CRT1 interrupt just happened
*/
if (IN3CFB(info->mmio_map, 0x37) & 0x01) {
u8 op3cf_3d;
u8 op3cf_37;
/* What happened?
*/
op3cf_37 = IN3CFB(info->mmio_map, 0x37);
/* Clear CRT interrupt
*/
op3cf_3d = IN3CFB(info->mmio_map, 0x3d);
OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04));
OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04));
ret = TRUE;
}
DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
return (ret);
}
bool xgi_dvi_irq_handler(struct xgi_info * info)
{
bool ret = FALSE;
const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
/* DVI interrupt just happened
*/
if (IN3CFB(info->mmio_map, 0x38) & 0x20) {
const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4);
u8 op3cf_39;
u8 op3cf_37;
u8 op3x5_5a;
/* What happened?
*/
op3cf_37 = IN3CFB(info->mmio_map, 0x37);
/* Notify BIOS that DVI plug/unplug happened
*/
op3x5_5a = IN3X5B(info->mmio_map, 0x5a);
OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7);
DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4);
/* Clear DVI interrupt
*/
op3cf_39 = IN3CFB(info->mmio_map, 0x39);
OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01));
OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01));
ret = TRUE;
}
DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
return (ret);
}
static void dump_reg_header(unsigned regbase)
{
printk("\n=====xgi_dump_register========0x%x===============\n",
regbase);
printk(" 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
}
static void dump_indexed_reg(struct xgi_info * info, unsigned regbase)
{
unsigned i, j;
u8 temp;
dump_reg_header(regbase);
for (i = 0; i < 0x10; i++) {
printk("%1x ", i);
for (j = 0; j < 0x10; j++) {
DRM_WRITE8(info->mmio_map, regbase - 1,
(i * 0x10) + j);
temp = DRM_READ8(info->mmio_map, regbase);
printk("%3x", temp);
}
printk("\n");
}
}
static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range)
{
unsigned i, j;
dump_reg_header(regbase);
for (i = 0; i < range; i++) {
printk("%1x ", i);
for (j = 0; j < 0x10; j++) {
u8 temp = DRM_READ8(info->mmio_map,
regbase + (i * 0x10) + j);
printk("%3x", temp);
}
printk("\n");
}
}
void xgi_dump_register(struct xgi_info * info)
{
dump_indexed_reg(info, 0x3c5);
dump_indexed_reg(info, 0x3d5);
dump_indexed_reg(info, 0x3cf);
dump_reg(info, 0xB000, 0x05);
dump_reg(info, 0x2200, 0x0B);
dump_reg(info, 0x2300, 0x07);
dump_reg(info, 0x2400, 0x10);
dump_reg(info, 0x2800, 0x10);
}
#define WHOLD_GE_STATUS 0x2800
/* Test everything except the "whole GE busy" bit, the "master engine busy"
* bit, and the reserved bits [26:21].
*/
#define IDLE_MASK ~((1U<<31) | (1U<<28) | (0x3f<<21))
void xgi_waitfor_pci_idle(struct xgi_info * info)
{
unsigned int idleCount = 0;
u32 old_status = 0;
unsigned int same_count = 0;
while (idleCount < 5) {
const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS)
& IDLE_MASK;
if (status == old_status) {
same_count++;
if ((same_count % 100) == 0) {
DRM_ERROR("GE status stuck at 0x%08x for %u iterations!\n",
old_status, same_count);
}
} else {
old_status = status;
same_count = 0;
}
if (status != 0) {
msleep(1);
idleCount = 0;
} else {
idleCount++;
}
}
}
void xgi_enable_mmio(struct xgi_info * info)
{
u8 protect = 0;
u8 temp;
/* Unprotect registers */
DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
protect = DRM_READ8(info->mmio_map, 0x3C5);
DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A);
temp = DRM_READ8(info->mmio_map, 0x3D5);
DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20);
/* Enable MMIO */
DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
temp = DRM_READ8(info->mmio_map, 0x3D5);
DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01);
/* Protect registers */
OUT3C5B(info->mmio_map, 0x11, protect);
}
void xgi_disable_mmio(struct xgi_info * info)
{
u8 protect = 0;
u8 temp;
/* Unprotect registers */
DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
protect = DRM_READ8(info->mmio_map, 0x3C5);
DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
/* Disable MMIO access */
DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
temp = DRM_READ8(info->mmio_map, 0x3D5);
DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE);
/* Protect registers */
OUT3C5B(info->mmio_map, 0x11, protect);
}
void xgi_enable_ge(struct xgi_info * info)
{
u8 bOld3cf2a;
int wait = 0;
OUT3C5B(info->mmio_map, 0x11, 0x92);
/* Save and close dynamic gating
*/
bOld3cf2a = IN3CFB(info->mmio_map, XGI_MISC_CTRL);
OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a & ~EN_GEPWM);
/* Enable 2D and 3D GE
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
/* Reset both 3D and 2D engine
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL,
(GE_ENABLE | GE_RESET | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
/* Enable 2D engine only
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL, GE_ENABLE);
/* Enable 2D+3D engine
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
/* Restore dynamic gating
*/
OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a);
}
void xgi_disable_ge(struct xgi_info * info)
{
int wait = 0;
OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
/* Reset both 3D and 2D engine
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL,
(GE_ENABLE | GE_RESET | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
wait = 10;
while (wait--) {
DRM_READ8(info->mmio_map, 0x36);
}
/* Disable 2D engine and 3D engine.
*/
OUT3X5B(info->mmio_map, XGI_GE_CNTL, 0);
}

37
linux-core/xgi_misc.h Normal file
View file

@ -0,0 +1,37 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#ifndef _XGI_MISC_H_
#define _XGI_MISC_H_
extern void xgi_dump_register(struct xgi_info * info);
extern bool xgi_ge_irq_handler(struct xgi_info * info);
extern bool xgi_crt_irq_handler(struct xgi_info * info);
extern bool xgi_dvi_irq_handler(struct xgi_info * info);
extern void xgi_waitfor_pci_idle(struct xgi_info * info);
#endif

126
linux-core/xgi_pcie.c Normal file
View file

@ -0,0 +1,126 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#include "xgi_drv.h"
#include "xgi_regs.h"
#include "xgi_misc.h"
void xgi_gart_flush(struct drm_device *dev)
{
struct xgi_info *const info = dev->dev_private;
u8 temp;
DRM_MEMORYBARRIER();
/* Set GART in SFB */
temp = DRM_READ8(info->mmio_map, 0xB00C);
DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02);
/* Set GART base address to HW */
DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr);
/* Flush GART table. */
DRM_WRITE8(info->mmio_map, 0xB03F, 0x40);
DRM_WRITE8(info->mmio_map, 0xB03F, 0x00);
}
int xgi_pcie_heap_init(struct xgi_info * info)
{
u8 temp = 0;
int err;
struct drm_scatter_gather request;
/* Get current FB aperture size */
temp = IN3X5B(info->mmio_map, 0x27);
DRM_INFO("In3x5(0x27): 0x%x \n", temp);
if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */
info->pcie.base = 256 * 1024 * 1024;
} else { /* 128MB; Jong 06/05/2006; 0x08000000 */
info->pcie.base = 128 * 1024 * 1024;
}
DRM_INFO("info->pcie.base: 0x%lx\n", (unsigned long) info->pcie.base);
/* Get current lookup table page size */
temp = DRM_READ8(info->mmio_map, 0xB00C);
if (temp & 0x04) { /* 8KB */
info->lutPageSize = 8 * 1024;
} else { /* 4KB */
info->lutPageSize = 4 * 1024;
}
DRM_INFO("info->lutPageSize: 0x%x \n", info->lutPageSize);
request.size = info->pcie.size;
err = drm_sg_alloc(info->dev, & request);
if (err) {
DRM_ERROR("cannot allocate PCIE GART backing store! "
"size = %d\n", info->pcie.size);
return err;
}
info->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
info->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
info->gart_info.table_size = info->dev->sg->pages * sizeof(u32);
if (!drm_ati_pcigart_init(info->dev, &info->gart_info)) {
DRM_ERROR("failed to init PCI GART!\n");
return -ENOMEM;
}
xgi_gart_flush(info->dev);
mutex_lock(&info->dev->struct_mutex);
err = drm_sman_set_range(&info->sman, XGI_MEMLOC_NON_LOCAL,
0, info->pcie.size);
mutex_unlock(&info->dev->struct_mutex);
if (err) {
drm_ati_pcigart_cleanup(info->dev, &info->gart_info);
}
info->pcie_heap_initialized = (err == 0);
return err;
}
/**
* xgi_find_pcie_virt
* @address: GE HW address
*
* Returns CPU virtual address. Assumes the CPU VAddr is continuous in not
* the same block
*/
void *xgi_find_pcie_virt(struct xgi_info * info, u32 address)
{
const unsigned long offset = address - info->pcie.base;
return ((u8 *) info->dev->sg->virtual) + offset;
}

169
linux-core/xgi_regs.h Normal file
View file

@ -0,0 +1,169 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#ifndef _XGI_REGS_H_
#define _XGI_REGS_H_
#include "drmP.h"
#include "drm.h"
#define MAKE_MASK(bits) ((1U << (bits)) - 1)
#define ONE_BIT_MASK MAKE_MASK(1)
#define TWENTY_BIT_MASK MAKE_MASK(20)
#define TWENTYONE_BIT_MASK MAKE_MASK(21)
#define TWENTYTWO_BIT_MASK MAKE_MASK(22)
/* Port 0x3d4/0x3d5, index 0x2a */
#define XGI_INTERFACE_SEL 0x2a
#define DUAL_64BIT (1U<<7)
#define INTERNAL_32BIT (1U<<6)
#define EN_SEP_WR (1U<<5)
#define POWER_DOWN_SEL (1U<<4)
/*#define RESERVED_3 (1U<<3) */
#define SUBS_MCLK_PCICLK (1U<<2)
#define MEM_SIZE_MASK (3<<0)
#define MEM_SIZE_32MB (0<<0)
#define MEM_SIZE_64MB (1<<0)
#define MEM_SIZE_128MB (2<<0)
#define MEM_SIZE_256MB (3<<0)
/* Port 0x3d4/0x3d5, index 0x36 */
#define XGI_GE_CNTL 0x36
#define GE_ENABLE (1U<<7)
/*#define RESERVED_6 (1U<<6) */
/*#define RESERVED_5 (1U<<5) */
#define GE_RESET (1U<<4)
/*#define RESERVED_3 (1U<<3) */
#define GE_ENABLE_3D (1U<<2)
/*#define RESERVED_1 (1U<<1) */
/*#define RESERVED_0 (1U<<0) */
/* Port 0x3ce/0x3cf, index 0x2a */
#define XGI_MISC_CTRL 0x2a
#define MOTION_VID_SUSPEND (1U<<7)
#define DVI_CRTC_TIMING_SEL (1U<<6)
#define LCD_SEL_CTL_NEW (1U<<5)
#define LCD_SEL_EXT_DELYCTRL (1U<<4)
#define REG_LCDDPARST (1U<<3)
#define LCD2DPAOFF (1U<<2)
/*#define RESERVED_1 (1U<<1) */
#define EN_GEPWM (1U<<0) /* Enable GE power management */
#define BASE_3D_ENG 0x2800
#define M2REG_FLUSH_ENGINE_ADDRESS 0x000
#define M2REG_FLUSH_ENGINE_COMMAND 0x00
#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21)
#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20)
#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK
#define M2REG_RESET_ADDRESS 0x004
#define M2REG_RESET_COMMAND 0x01
#define M2REG_RESET_STATUS2_MASK (ONE_BIT_MASK<<10)
#define M2REG_RESET_STATUS1_MASK (ONE_BIT_MASK<<9)
#define M2REG_RESET_STATUS0_MASK (ONE_BIT_MASK<<8)
#define M2REG_RESET_3DENG_MASK (ONE_BIT_MASK<<4)
#define M2REG_RESET_2DENG_MASK (ONE_BIT_MASK<<2)
/* Write register */
#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x010
#define M2REG_AUTO_LINK_SETTING_COMMAND 0x04
#define M2REG_CLEAR_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11)
#define M2REG_CLEAR_INTERRUPT_3_MASK (ONE_BIT_MASK<<10)
#define M2REG_CLEAR_INTERRUPT_2_MASK (ONE_BIT_MASK<<9)
#define M2REG_CLEAR_INTERRUPT_0_MASK (ONE_BIT_MASK<<8)
#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4)
#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1)
#define M2REG_INVALID_LIST_AUTO_INTERRUPT_MASK (ONE_BIT_MASK<<0)
/* Read register */
#define M2REG_AUTO_LINK_STATUS_ADDRESS 0x010
#define M2REG_AUTO_LINK_STATUS_COMMAND 0x04
#define M2REG_ACTIVE_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11)
#define M2REG_ACTIVE_INTERRUPT_3_MASK (ONE_BIT_MASK<<10)
#define M2REG_ACTIVE_INTERRUPT_2_MASK (ONE_BIT_MASK<<9)
#define M2REG_ACTIVE_INTERRUPT_0_MASK (ONE_BIT_MASK<<8)
#define M2REG_INVALID_LIST_AUTO_INTERRUPTED_MODE_MASK (ONE_BIT_MASK<<0)
#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x014
#define M2REG_PCI_TRIGGER_REGISTER_COMMAND 0x05
/**
* Begin instruction, double-word 0
*/
#define BEGIN_STOP_STORE_CURRENT_POINTER_MASK (ONE_BIT_MASK<<22)
#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20)
#define BEGIN_BEGIN_IDENTIFICATION_MASK TWENTY_BIT_MASK
/**
* Begin instruction, double-word 1
*/
#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31)
#define BEGIN_COMMAND_LIST_LENGTH_MASK TWENTYTWO_BIT_MASK
/* Hardware access functions */
static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data)
{
DRM_WRITE8(map, 0x3C4, index);
DRM_WRITE8(map, 0x3C5, data);
}
static inline void OUT3X5B(struct drm_map * map, u8 index, u8 data)
{
DRM_WRITE8(map, 0x3D4, index);
DRM_WRITE8(map, 0x3D5, data);
}
static inline void OUT3CFB(struct drm_map * map, u8 index, u8 data)
{
DRM_WRITE8(map, 0x3CE, index);
DRM_WRITE8(map, 0x3CF, data);
}
static inline u8 IN3C5B(struct drm_map * map, u8 index)
{
DRM_WRITE8(map, 0x3C4, index);
return DRM_READ8(map, 0x3C5);
}
static inline u8 IN3X5B(struct drm_map * map, u8 index)
{
DRM_WRITE8(map, 0x3D4, index);
return DRM_READ8(map, 0x3D5);
}
static inline u8 IN3CFB(struct drm_map * map, u8 index)
{
DRM_WRITE8(map, 0x3CE, index);
return DRM_READ8(map, 0x3CF);
}
#endif

View file

@ -36,4 +36,5 @@ klibdrminclude_HEADERS = \
sis_drm.h \
via_drm.h \
r300_reg.h \
via_3d_reg.h
via_3d_reg.h \
xgi_drm.h

View file

@ -735,3 +735,6 @@
0x12d2 0x002c NV_04 "VTNT2"
0x12d2 0x00a0 NV_04 "ITNT2"
[xgi]
0x18ca 0x2200 0 "XP5"
0x18ca 0x0047 0 "XP10 / XG47"

133
shared-core/xgi_drm.h Normal file
View file

@ -0,0 +1,133 @@
/****************************************************************************
* Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
* ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
#ifndef _XGI_DRM_H_
#define _XGI_DRM_H_
#include <linux/types.h>
#include <asm/ioctl.h>
struct drm_xgi_sarea {
__u16 device_id;
__u16 vendor_id;
char device_name[32];
unsigned int scrn_start;
unsigned int scrn_xres;
unsigned int scrn_yres;
unsigned int scrn_bpp;
unsigned int scrn_pitch;
};
struct xgi_bootstrap {
/**
* Size of PCI-e GART range in megabytes.
*/
struct drm_map gart;
};
enum xgi_mem_location {
XGI_MEMLOC_NON_LOCAL = 0,
XGI_MEMLOC_LOCAL = 1,
XGI_MEMLOC_INVALID = 0x7fffffff
};
struct xgi_mem_alloc {
/**
* Memory region to be used for allocation.
*
* Must be one of XGI_MEMLOC_NON_LOCAL or XGI_MEMLOC_LOCAL.
*/
unsigned int location;
/**
* Number of bytes request.
*
* On successful allocation, set to the actual number of bytes
* allocated.
*/
unsigned int size;
/**
* Address of the memory from the graphics hardware's point of view.
*/
__u32 hw_addr;
/**
* Offset of the allocation in the mapping.
*/
__u32 offset;
/**
* Magic handle used to release memory.
*
* See also DRM_XGI_FREE ioctl.
*/
unsigned long index;
};
enum xgi_batch_type {
BTYPE_2D = 0,
BTYPE_3D = 1,
BTYPE_FLIP = 2,
BTYPE_CTRL = 3,
BTYPE_NONE = 0x7fffffff
};
struct xgi_cmd_info {
__u32 type;
__u32 hw_addr;
__u32 size;
__u32 id;
};
struct xgi_state_info {
unsigned int _fromState;
unsigned int _toState;
};
/*
* Ioctl definitions
*/
#define DRM_XGI_BOOTSTRAP 0
#define DRM_XGI_ALLOC 1
#define DRM_XGI_FREE 2
#define DRM_XGI_SUBMIT_CMDLIST 3
#define DRM_XGI_STATE_CHANGE 4
#define XGI_IOCTL_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap)
#define XGI_IOCTL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_ALLOC, struct xgi_mem_alloc)
#define XGI_IOCTL_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FREE, __u32)
#define XGI_IOCTL_SUBMIT_CMDLIST DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info)
#define XGI_IOCTL_STATE_CHANGE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info)
#endif /* _XGI_DRM_H_ */