Add reworked via driver as "openchrome".

This commit is contained in:
Thomas Hellstrom 2009-01-17 12:57:04 +01:00
parent 39daf20090
commit d93fd75f4d
34 changed files with 7047 additions and 1 deletions

View file

@ -97,6 +97,8 @@ RADEONMSHEADERS = radeon_ms_driver.h $(DRMHEADERS)
PROGS = dristat drmstat
CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c drm_pciids.h .tmp_versions
CLEANMODFILES = *.o *.ko modules.order Module.symvers *.mod.c
# VERSION is not defined from the initial invocation. It is defined when
# this Makefile is invoked from the kernel's root Makefile.
@ -218,6 +220,7 @@ endif
clean cleandir:
rm -rf $(CLEANFILES)
cd openchrome; rm -rf $(CLEANMODFILES)
$(MODULE_LIST)::
make DRM_MODULES=$@ modules
@ -292,6 +295,7 @@ CONFIG_DRM_NV := n
CONFIG_DRM_NOUVEAU := n
CONFIG_DRM_XGI := n
CONFIG_DRM_RADEON_MS := n
CONFIG_DRM_OPENCHROME := n
# Enable module builds for the modules requested/supported.
@ -334,6 +338,9 @@ endif
ifneq (,$(findstring radeon_ms,$(DRM_MODULES)))
#CONFIG_DRM_RADEON_MS := m
endif
ifneq (,$(findstring openchrome,$(DRM_MODULES)))
CONFIG_DRM_OPENCHROME := m
endif
# These require AGP support

View file

@ -81,3 +81,4 @@ obj-$(CONFIG_DRM_NV) += nv.o
#obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
obj-$(CONFIG_DRM_XGI) += xgi.o
obj-$(CONFIG_DRM_RADEON_MS) += radeon_ms.o
obj-$(CONFIG_DRM_OPENCHROME) += openchrome/

View file

@ -0,0 +1,14 @@
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
#ccflags-m := -I..
openchrome-y := via_buffer.o via_context.o via_dmablit.o via_dma.o \
via_drv.o via_execbuf.o via_init.o via_irq.o via_ttm_fence.o \
via_ttm_glue.o via_verifier.o via_video.o \
ttm_agp_backend.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o \
ttm_execbuf_util.o ttm_fence.o ttm_fence_user.o ttm_lock.o \
ttm_memory.o ttm_object.o ttm_pat_compat.o ttm_placement_user.o \
ttm_tt.o
obj-m += openchrome.o

View file

@ -0,0 +1 @@
../../shared-core/ochr_drm.h

View file

@ -0,0 +1 @@
../ttm/ttm_agp_backend.c

View file

@ -0,0 +1 @@
../ttm/ttm_bo.c

View file

@ -0,0 +1 @@
../ttm/ttm_bo_util.c

View file

@ -0,0 +1 @@
../ttm/ttm_bo_vm.c

View file

@ -0,0 +1 @@
../ttm/ttm_execbuf_util.c

View file

@ -0,0 +1 @@
../ttm/ttm_fence.c

View file

@ -0,0 +1 @@
../ttm/ttm_fence_user.c

View file

@ -0,0 +1 @@
../ttm/ttm_lock.c

View file

@ -0,0 +1 @@
../ttm/ttm_memory.c

View file

@ -0,0 +1 @@
../ttm/ttm_object.c

View file

@ -0,0 +1 @@
../ttm/ttm_pat_compat.c

View file

@ -0,0 +1 @@
../ttm/ttm_placement_user.c

View file

@ -0,0 +1 @@
../ttm/ttm_tt.c

View file

@ -0,0 +1,244 @@
/**************************************************************************
*
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "ochr_drm.h"
#include "via_drv.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement_common.h"
#include "ttm/ttm_execbuf_util.h"
struct ttm_backend *via_create_ttm_backend_entry(struct ttm_bo_device *bdev)
{
struct drm_via_private *dev_priv =
container_of(bdev, struct drm_via_private, bdev);
return ttm_agp_backend_init(bdev, dev_priv->dev->agp->bridge);
}
int via_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
/*
* FIXME: Invalidate texture caches here.
*/
return 0;
}
int via_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
struct drm_via_private *dev_priv =
container_of(bdev, struct drm_via_private, bdev);
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
man->gpu_offset = dev_priv->tt_start;
man->io_offset = dev_priv->tt_start;
man->io_size = dev_priv->tt_size;
man->io_addr = NULL;
man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = 0;
man->io_offset = dev_priv->vram_start;
man->io_size = dev_priv->vram_size * 1024;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
man->io_addr = NULL;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_PRIV0:
man->gpu_offset = dev_priv->agp_bo->offset;
man->io_offset = dev_priv->agp_bo->offset;
man->io_size = (dev_priv->agp_bo->num_pages << PAGE_SHIFT);
man->io_addr = NULL;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
uint32_t via_evict_flags(struct ttm_buffer_object * bo)
{
uint32_t cur_placement = bo->mem.flags & ~TTM_PL_MASK_MEMTYPE;
switch (bo->mem.mem_type) {
/*
* Evict pre-bound AGP to VRAM, since
* that's the only fastpath we have.
* That is, when that fast-path is implemented.
*/
case TTM_PL_PRIV0:
return cur_placement | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_WC;
default:
return cur_placement | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_WC;
}
}
static int via_move_dmablit(struct ttm_buffer_object *bo,
int evict, int no_wait, struct ttm_mem_reg *new_mem)
{
struct drm_via_private *dev_priv =
container_of(bo->bdev, struct drm_via_private, bdev);
int ret;
int fence_class;
struct ttm_fence_object *fence;
if (no_wait) {
DRM_ERROR("Move dmablit busy.\n");
return -EBUSY;
}
ret = via_dmablit_bo(bo, new_mem, NULL, &fence_class);
if (unlikely(ret != 0)) {
DRM_ERROR("Dmablit error %d\n", ret);
return ret;
}
ret = ttm_fence_object_create(&dev_priv->fdev, fence_class,
TTM_FENCE_TYPE_EXE,
TTM_FENCE_FLAG_EMIT, &fence);
if (unlikely(ret != 0)) {
(void)via_driver_dma_quiescent(dev_priv->dev);
if (fence)
ttm_fence_object_unref(&fence);
}
ret = ttm_bo_move_accel_cleanup(bo, (void *)fence,
(void *)(unsigned long)
TTM_FENCE_TYPE_EXE, evict, no_wait,
new_mem);
if (fence)
ttm_fence_object_unref(&fence);
return ret;
}
static int via_move_vram_tt(struct ttm_buffer_object *bo,
int evict, int no_wait, struct ttm_mem_reg *new_mem)
{
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
if (old_mem->mem_type == TTM_PL_TT) {
struct ttm_mem_reg tmp_mem = *old_mem;
tmp_mem.mm_node = NULL;
tmp_mem.proposed_flags &= ~TTM_PL_MASK_MEM;
tmp_mem.proposed_flags |= (TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED);
tmp_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
if (ret)
return ret;
return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else {
struct ttm_mem_reg tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
tmp_mem.proposed_flags &= ~TTM_PL_MASK_MEM;
tmp_mem.proposed_flags |= (TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED);
tmp_mem.mem_type = TTM_PL_SYSTEM;
ret = via_move_dmablit(bo, 1, no_wait, &tmp_mem);
if (ret)
return ret;
return ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
}
return 0;
}
static void via_move_null(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct ttm_mem_reg *old_mem = &bo->mem;
uint32_t save_proposed_flags = old_mem->proposed_flags;
BUG_ON(old_mem->mm_node != NULL);
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->proposed_flags = save_proposed_flags;
}
int via_bo_move(struct ttm_buffer_object *bo,
int evict, int interruptible, int no_wait,
struct ttm_mem_reg *new_mem)
{
struct ttm_mem_reg *old_mem = &bo->mem;
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
via_move_null(bo, new_mem);
return 0;
}
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
int ret = via_move_dmablit(bo, evict, no_wait, new_mem);
if (likely(ret == 0))
return 0;
}
if ((old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_TT) ||
(old_mem->mem_type == TTM_PL_TT &&
new_mem->mem_type == TTM_PL_VRAM)) {
int ret = via_move_vram_tt(bo, evict, no_wait, new_mem);
if (likely(ret == 0))
return 0;
}
return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
}

View file

@ -0,0 +1,139 @@
/**************************************************************************
*
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*
* The purpose of having device private context structures is to
* be able to have pre-allocated per-context reloc-, valicate_buffer-, and
* pci- buffer request in order to parallelize most of the execbuf handling.
* Also we have a possibility to check for invalid context use.
*
* We refcount the via_cpriv struct since it's entirely possible that
* via_context_dtor is called while execbuf is using the context.
*/
#include "drmP.h"
#include "via_drv.h"
static void via_context_destroy(struct kref *kref)
{
struct via_cpriv *cpriv = container_of(kref, struct via_cpriv, kref);
if (cpriv->reloc_buf)
drm_free(cpriv->reloc_buf, VIA_RELOC_BUF_SIZE, DRM_MEM_DRIVER);
if (cpriv->val_bufs)
vfree(cpriv->val_bufs);
drm_free(cpriv, sizeof(*cpriv), DRM_MEM_DRIVER);
}
/*
* Note! odd error reporting.
*/
int via_context_ctor(struct drm_device *dev, int context)
{
struct drm_via_private *dev_priv = via_priv(dev);
struct via_cpriv *cpriv;
int ret;
cpriv = drm_calloc(1, sizeof(*cpriv), DRM_MEM_DRIVER);
if (unlikely(cpriv == NULL))
return 0;
cpriv->hash.key = context;
atomic_set(&cpriv->in_execbuf, -1);
kref_init(&cpriv->kref);
cpriv->reloc_buf = drm_alloc(VIA_RELOC_BUF_SIZE, DRM_MEM_DRIVER);
if (unlikely(cpriv->reloc_buf == NULL))
goto out_err0;
write_lock(&dev_priv->context_lock);
ret = drm_ht_insert_item(&dev_priv->context_hash, &cpriv->hash);
write_unlock(&dev_priv->context_lock);
if (unlikely(ret != 0))
goto out_err1;
return 1;
out_err1:
drm_free(cpriv->reloc_buf, VIA_RELOC_BUF_SIZE, DRM_MEM_DRIVER);
out_err0:
drm_free(cpriv, sizeof(*cpriv), DRM_MEM_DRIVER);
return 0;
}
int via_context_dtor(struct drm_device *dev, int context)
{
struct drm_via_private *dev_priv = via_priv(dev);
struct drm_hash_item *hash;
struct via_cpriv *cpriv = NULL;
int ret;
via_release_futex(dev_priv, context);
write_lock(&dev_priv->context_lock);
ret = drm_ht_find_item(&dev_priv->context_hash, context, &hash);
if (ret == 0) {
(void)drm_ht_remove_item(&dev_priv->context_hash, hash);
cpriv = drm_hash_entry(hash, struct via_cpriv, hash);
}
write_unlock(&dev_priv->context_lock);
BUG_ON(ret != 0);
if (likely(cpriv != NULL))
kref_put(&cpriv->kref, via_context_destroy);
return 0;
}
struct via_cpriv *via_context_lookup(struct drm_via_private *dev_priv,
int context)
{
struct drm_hash_item *hash;
struct via_cpriv *cpriv = NULL;
int ret;
read_lock(&dev_priv->context_lock);
ret = drm_ht_find_item(&dev_priv->context_hash, context, &hash);
if (likely(ret == 0)) {
cpriv = drm_hash_entry(hash, struct via_cpriv, hash);
kref_get(&cpriv->kref);
}
read_unlock(&dev_priv->context_lock);
return cpriv;
}
void via_context_unref(struct via_cpriv **p_cpriv)
{
struct via_cpriv *cpriv = *p_cpriv;
*p_cpriv = NULL;
kref_put(&cpriv->kref, via_context_destroy);
}

View file

@ -0,0 +1,653 @@
/* via_dma.c -- DMA support for the VIA Unichrome/Pro
*
* Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
* Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
* All Rights Reserved.
* Copyright 2004 The Unichrome project.
* All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Tungsten Graphics,
* Erdi Chen,
* Thomas Hellstrom.
*/
#include "drmP.h"
#include "drm.h"
#include "ochr_drm.h"
#include "via_drv.h"
#include "via_3d_reg.h"
#define VIA_FENCE_EXTRA (7*8)
#define VIA_OUT_RING_H1(nReg, nData) \
{ \
iowrite32(((nReg) >> 2) | HALCYON_HEADER1, vb); \
iowrite32(nData, vb + 1); \
vb += 2; \
dev_priv->dma_low += 8; \
}
#define via_flush_write_combine() DRM_MEMORYBARRIER()
#define VIA_OUT_RING_QW(w1,w2) \
{ \
iowrite32(w1, vb); \
iowrite32(w2, vb + 1); \
vb += 2; \
dev_priv->dma_low += 8; \
}
#define VIA_TRACKER_INTERVAL 0x10000
struct via_dma_tracker {
struct list_head head;
uint32_t seq;
uint32_t loc;
};
static void via_cmdbuf_start(struct drm_via_private *dev_priv);
static void via_cmdbuf_pause(struct drm_via_private *dev_priv);
static void via_cmdbuf_rewind(struct drm_via_private *dev_priv);
/*
* FIXME: A memory cache for trackers?
*/
static void via_traverse_trackers(struct drm_via_private *dev_priv)
{
struct via_dma_tracker *tracker, *next;
uint32_t cur_seq = ioread32(dev_priv->fence_map);
list_for_each_entry_safe(tracker, next, &dev_priv->dma_trackers, head) {
if ((cur_seq - tracker->seq) < (1 << 31)) {
dev_priv->dma_free = tracker->loc;
list_del(&tracker->head);
kfree(tracker);
} else {
break;
}
}
}
/*
* Fixme: Wait interruptible.
*/
static inline int via_cmdbuf_wait(struct drm_via_private *dev_priv,
unsigned int size)
{
uint32_t cur_addr = dev_priv->dma_low;
uint32_t next_addr = cur_addr + size + (512 * 1024);
while (unlikely(dev_priv->dma_free > cur_addr &&
dev_priv->dma_free <= next_addr)) {
msleep(1);
via_traverse_trackers(dev_priv);
}
return 0;
}
static int via_add_tracker(struct drm_via_private *dev_priv, uint32_t sequence)
{
struct via_dma_tracker *tracker;
if (likely
(((dev_priv->dma_low - dev_priv->dma_tracker) & VIA_AGPC_MASK) <
VIA_TRACKER_INTERVAL))
return 0;
via_traverse_trackers(dev_priv);
dev_priv->dma_tracker = dev_priv->dma_low;
tracker = kmalloc(sizeof(*tracker), GFP_KERNEL);
if (!tracker)
return -ENOMEM;
tracker->loc = dev_priv->dma_low;
tracker->seq = sequence;
list_add_tail(&tracker->head, &dev_priv->dma_trackers);
return 0;
}
/*
* Checks whether buffer head has reach the end. Rewind the ring buffer
* when necessary.
*
* Returns virtual pointer to ring buffer.
*/
static inline uint32_t __iomem *via_check_dma(struct drm_via_private *dev_priv,
unsigned int size)
{
if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
dev_priv->dma_high) {
/*
* Make sure the reader is wrapped before we do this!!
*/
via_cmdbuf_wait(dev_priv, size + 4 * CMDBUF_ALIGNMENT_SIZE);
via_cmdbuf_rewind(dev_priv);
}
if (via_cmdbuf_wait(dev_priv, size) != 0) {
return NULL;
}
return (uint32_t __iomem *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
void via_dma_initialize(struct drm_via_private *dev_priv)
{
dev_priv->dma_ptr = (char *)dev_priv->agpc_map;
dev_priv->dma_low = 0;
dev_priv->dma_tracker = dev_priv->dma_low;
dev_priv->dma_high = dev_priv->agpc_bo->num_pages << PAGE_SHIFT;
dev_priv->dma_free = dev_priv->dma_high;
dev_priv->dma_wrap = dev_priv->dma_high;
dev_priv->dma_offset = dev_priv->agpc_bo->offset;
dev_priv->last_pause_ptr = NULL;
dev_priv->hw_addr_ptr =
(uint32_t __iomem *) (dev_priv->mmio_map + 0x418);
via_cmdbuf_start(dev_priv);
}
static void via_emit_blit_sequence(struct drm_via_private *dev_priv,
uint32_t __iomem * vb,
uint32_t offset, uint32_t value)
{
uint32_t vram_offset = dev_priv->fence_bo->offset + offset;
VIA_OUT_RING_H1(VIA_REG_GEMODE, VIA_GEM_32bpp);
VIA_OUT_RING_H1(VIA_REG_FGCOLOR, value);
VIA_OUT_RING_H1(VIA_REG_DSTBASE, (vram_offset & ~0x1f) >> 3);
VIA_OUT_RING_H1(VIA_REG_PITCH, VIA_PITCH_ENABLE |
(4 >> 3) | ((4 >> 3) << 16));
VIA_OUT_RING_H1(VIA_REG_DSTPOS, (vram_offset & 0x1f) >> 2);
VIA_OUT_RING_H1(VIA_REG_DIMENSION, 0);
VIA_OUT_RING_H1(VIA_REG_GECMD, VIA_GEC_BLT | VIA_GEC_FIXCOLOR_PAT
| (VIA_ROP_PAT << 24));
#if 0
VIA_OUT_RING_QW(VIA_VIDEO_HEADER6, 0x00000002);
VIA_OUT_RING_QW(0x00F60000, 0x00000000);
VIA_OUT_RING_QW(0x326C, (3 << 30) | (0x1f << 25));
VIA_OUT_RING_QW(0x33D0, (1 << 27) | (1 << 7) | (1 << 6) |
(1 << 4) | (1 << 0));
#endif
}
static void via_blit_sequence(struct drm_via_private *dev_priv,
uint32_t offset, uint32_t value)
{
uint32_t vram_offset = dev_priv->fence_bo->offset + offset;
VIA_WRITE(VIA_REG_GEMODE, VIA_GEM_32bpp);
VIA_WRITE(VIA_REG_FGCOLOR, value);
VIA_WRITE(VIA_REG_DSTBASE, (vram_offset & ~0x1f) >> 3);
VIA_WRITE(VIA_REG_PITCH, VIA_PITCH_ENABLE |
(4 >> 3) | ((4 >> 3) << 16));
VIA_WRITE(VIA_REG_DSTPOS, (vram_offset & 0x1f) >> 2);
VIA_WRITE(VIA_REG_DIMENSION, 0);
VIA_WRITE(VIA_REG_GECMD, VIA_GEC_BLT | VIA_GEC_FIXCOLOR_PAT
| (VIA_ROP_PAT << 24));
}
/*
* This function is used internally by ring buffer mangement code.
*
* Returns virtual pointer to ring buffer.
*/
static inline uint32_t __iomem *via_get_dma(struct drm_via_private *dev_priv)
{
return (uint32_t __iomem *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
static void via_emit_fence_seq(struct drm_via_private *dev_priv,
uint32_t offset, uint32_t value)
{
via_emit_blit_sequence(dev_priv, via_get_dma(dev_priv), offset, value);
}
void via_emit_fence_seq_standalone(struct drm_via_private *dev_priv,
uint32_t offset, uint32_t value)
{
via_emit_blit_sequence(dev_priv,
via_check_dma(dev_priv, VIA_FENCE_EXTRA),
offset, value);
via_cmdbuf_pause(dev_priv);
}
int via_driver_dma_quiescent(struct drm_device *dev)
{
struct drm_via_private *dev_priv = dev->dev_private;
via_wait_idle(dev_priv);
return 0;
}
static inline uint32_t *via_align_buffer(struct drm_via_private *dev_priv,
uint32_t * vb, int qw_count)
{
for (; qw_count > 0; --qw_count) {
VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
}
return vb;
}
/*
* Hooks a segment of data into the tail of the ring-buffer by
* modifying the pause address stored in the buffer itself. If
* the regulator has already paused, restart it.
*/
static int via_hook_segment(struct drm_via_private *dev_priv,
uint32_t pause_addr_hi, uint32_t pause_addr_lo,
int no_pci_fire)
{
int paused, count;
uint32_t *paused_at = dev_priv->last_pause_ptr;
uint32_t reader, ptr;
uint32_t diff;
paused = 0;
via_flush_write_combine();
(void)ioread32((uint32_t *) (via_get_dma(dev_priv) - 1));
iowrite32(pause_addr_lo, paused_at);
via_flush_write_combine();
(void)ioread32(paused_at);
reader = ioread32(dev_priv->hw_addr_ptr);
ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
dev_priv->dma_offset + 4;
dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
/*
* If there is a possibility that the command reader will
* miss the new pause address and pause on the old one,
* In that case we need to program the new start address
* using PCI.
*/
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
count = 10000000;
while ((diff < CMDBUF_ALIGNMENT_SIZE) && count--) {
paused = (VIA_READ(0x41c) & 0x80000000);
if (paused)
break;
reader = ioread32(dev_priv->hw_addr_ptr);
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
}
paused = VIA_READ(0x41c) & 0x80000000;
if (paused && !no_pci_fire) {
reader = ioread32(dev_priv->hw_addr_ptr);
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
diff &= (dev_priv->dma_high - 1);
if (diff < (dev_priv->dma_high >> 1)) {
if (diff != 0) {
uint32_t __iomem *rekick;
DRM_INFO("Paused at incorrect address. "
"0x%08x, 0x%08x 0x%08x 0x%08x. Restarting.\n",
ptr, reader, VIA_READ(0x40c),
dev_priv->dma_diff);
/*
* Obtain the new pause address the command
* reader was supposed to pick up.
*/
rekick = (uint32_t *)
dev_priv->dma_ptr +
((reader - dev_priv->dma_offset +
dev_priv->dma_diff - 4) >> 2);
pause_addr_lo = ioread32(rekick);
pause_addr_hi = ioread32(--rekick);
DRM_INFO("Restarting 0x%08x 0x%08x\n",
pause_addr_hi, pause_addr_lo);
}
/*
* There is a concern that these writes may stall the PCI bus
* if the GPU is not idle. However, idling the GPU first
* doesn't make a difference.
*/
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
/*
* Really need to flush PCI posting here,
* but some register reads will
* flush AGP completely according to docs.
* FIXME: Find a suitable register to read.
*/
}
}
return paused;
}
static inline int via_is_idle(uint32_t status)
{
return ((status & (VIA_VR_QUEUE_BUSY |
VIA_CMD_RGTR_BUSY |
VIA_2D_ENG_BUSY |
VIA_3D_ENG_BUSY)) == (VIA_VR_QUEUE_BUSY));
}
void via_wait_idle(struct drm_via_private *dev_priv)
{
unsigned long _end = jiffies + VIA_IDLE_TIMEOUT;
uint32_t status;
status = VIA_READ(VIA_REG_STATUS);
do {
if (unlikely(!via_is_idle(status))) {
schedule();
status = VIA_READ(VIA_REG_STATUS);
}
} while (unlikely(!time_after_eq(jiffies, _end) &&
!via_is_idle(status)));
if (unlikely(!via_is_idle(status)))
DRM_INFO("Warning: Idle timeout.\n");
return;
}
static uint32_t *via_align_cmd(struct drm_via_private *dev_priv,
uint32_t cmd_type, uint32_t addr,
uint32_t * cmd_addr_hi, uint32_t * cmd_addr_lo,
int skip_wait)
{
uint32_t agp_base;
uint32_t cmd_addr, addr_lo, addr_hi;
uint32_t *vb;
uint32_t qw_pad_count;
if (!skip_wait)
via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
vb = via_get_dma(dev_priv);
VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
(VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
agp_base = dev_priv->dma_offset;
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
cmd_addr = (addr) ? addr :
agp_base + dev_priv->dma_low - 4 + (qw_pad_count << 3);
addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
(cmd_addr & HC_HAGPBpL_MASK));
addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
return vb;
}
static void via_cmdbuf_start(struct drm_via_private *dev_priv)
{
uint32_t pause_addr_lo, pause_addr_hi;
uint32_t start_addr, start_addr_lo;
uint32_t end_addr, end_addr_lo;
uint32_t command;
uint32_t agp_base;
uint32_t ptr;
uint32_t reader;
int count;
dev_priv->dma_low = 0;
agp_base = dev_priv->dma_offset;
start_addr = agp_base;
end_addr = agp_base + dev_priv->dma_high;
start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
((end_addr & 0xff000000) >> 16));
dev_priv->last_pause_ptr =
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
&pause_addr_hi, &pause_addr_lo, 1) - 1;
via_flush_write_combine();
(void)ioread32((uint32_t *) dev_priv->last_pause_ptr);
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
VIA_WRITE(VIA_REG_TRANSPACE, command);
VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
DRM_WRITEMEMORYBARRIER();
VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
VIA_READ(VIA_REG_TRANSPACE);
dev_priv->dma_diff = 0;
count = 10000000;
while (!(VIA_READ(0x41c) & 0x80000000) && count--) ;
reader = ioread32(dev_priv->hw_addr_ptr);
ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
dev_priv->dma_offset + 4;
/*
* This is the difference between where we tell the
* command reader to pause and where it actually pauses.
* This differs between hw implementation so we need to
* detect it.
*/
dev_priv->dma_diff = ptr - reader;
}
static inline void via_dummy_bitblt(struct drm_via_private *dev_priv)
{
uint32_t *vb = via_get_dma(dev_priv);
VIA_OUT_RING_H1(VIA_REG_DSTPOS, (0 | (0 << 16)));
VIA_OUT_RING_H1(VIA_REG_DIMENSION, 0 | (0 << 16));
VIA_OUT_RING_H1(VIA_REG_GECMD, VIA_GEC_BLT | VIA_GEC_FIXCOLOR_PAT |
0xAA000000);
}
static void via_cmdbuf_rewind(struct drm_via_private *dev_priv)
{
uint32_t agp_base;
uint32_t pause_addr_lo, pause_addr_hi;
uint32_t jump_addr_lo, jump_addr_hi, hook_addr;
uint32_t *hook_ptr;
uint32_t dma_low_save1, dma_low_save2;
agp_base = dev_priv->dma_offset;
hook_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0,
&jump_addr_hi, &jump_addr_lo, 0);
via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
&hook_addr, 0);
iowrite32(hook_addr, --hook_ptr);
dev_priv->dma_wrap = dev_priv->dma_low;
/*
* Wrap command buffer to the beginning.
*/
dev_priv->dma_low = 0;
if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
DRM_ERROR("via_cmdbuf_jump failed\n");
}
via_dummy_bitblt(dev_priv);
via_dummy_bitblt(dev_priv);
hook_ptr =
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0);
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0);
iowrite32(pause_addr_lo, --hook_ptr);
dma_low_save1 = dev_priv->dma_low;
/*
* Now, set a trap that will pause the regulator if it tries to rerun the old
* command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
* and reissues the jump command over PCI, while the regulator has already taken the jump
* and actually paused at the current buffer end).
* There appears to be no other way to detect this condition, since the hw_addr_pointer
* does not seem to get updated immediately when a jump occurs.
*/
hook_ptr =
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0);
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0);
iowrite32(pause_addr_lo, --hook_ptr);
dma_low_save2 = dev_priv->dma_low;
dev_priv->dma_low = dma_low_save1;
via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
dev_priv->dma_low = dma_low_save2;
via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
}
static void via_cmdbuf_flush(struct drm_via_private *dev_priv,
uint32_t cmd_type)
{
uint32_t pause_addr_lo, pause_addr_hi, hook;
uint32_t __iomem *hook_addr;
hook_addr =
via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo,
0);
if (cmd_type == HC_HAGPBpID_PAUSE) {
via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &hook, 0);
iowrite32(hook, --hook_addr);
}
via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
}
static void via_cmdbuf_pause(struct drm_via_private *dev_priv)
{
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
}
void via_dma_takedown(struct drm_via_private *dev_priv)
{
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
via_wait_idle(dev_priv);
while (!list_empty(&dev_priv->dma_trackers))
via_traverse_trackers(dev_priv);
}
int via_copy_cmdbuf(struct drm_via_private *dev_priv,
uint64_t cmd_buffer,
uint32_t size,
uint32_t mechanism, uint32_t ** cmdbuf_addr, int *is_iomem)
{
void __user *commands = (void __user *)(unsigned long)cmd_buffer;
uint32_t *vb;
int ret;
if (unlikely(size > VIA_PCI_BUF_SIZE)) {
DRM_ERROR("Command buffer too large.\n");
return -ENOMEM;
}
if (mechanism == _VIA_MECHANISM_AGP && drm_via_disable_verifier) {
vb = via_check_dma(dev_priv, size + VIA_FENCE_EXTRA);
if (unlikely(!vb)) {
DRM_ERROR("No space in AGP ring buffer.\n");
return -EBUSY;
}
*is_iomem = 1;
} else {
vb = (uint32_t *) dev_priv->pci_buf;
*is_iomem = 0;
}
ret = copy_from_user(vb, commands, size);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed copying command "
"buffer from user space.\n");
return ret;
}
*cmdbuf_addr = vb;
return 0;
}
int via_dispatch_commands(struct drm_device *dev, unsigned long size,
uint32_t mechanism)
{
struct drm_via_private *dev_priv = via_priv(dev);
uint32_t *vb;
uint32_t seq;
int ret;
switch (mechanism) {
case _VIA_MECHANISM_AGP:
if (!drm_via_disable_verifier) {
vb = via_check_dma(dev_priv, size + VIA_FENCE_EXTRA);
if (unlikely(!vb)) {
DRM_ERROR("No space in AGP ring buffer.\n");
return -EBUSY;
}
memcpy_toio(vb, dev_priv->pci_buf, size);
}
dev_priv->dma_low += size;
seq =
atomic_add_return(1, &dev_priv->fence_seq[VIA_ENGINE_CMD]);
via_emit_fence_seq(dev_priv, VIA_FENCE_OFFSET_CMD, seq);
via_add_tracker(dev_priv, seq);
via_cmdbuf_pause(dev_priv);
return 0;
case _VIA_MECHANISM_PCI:
via_wait_idle(dev_priv);
ret = via_parse_command_stream(dev,
(uint32_t *) dev_priv->pci_buf,
size);
seq =
atomic_add_return(1, &dev_priv->fence_seq[VIA_ENGINE_CMD]);
via_wait_idle(dev_priv);
via_blit_sequence(dev_priv, VIA_FENCE_OFFSET_CMD, seq);
via_wait_idle(dev_priv);
return ret;
default:
return -EINVAL;
}
return -EINVAL;
}

View file

@ -0,0 +1,784 @@
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
*
* Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Thomas Hellstrom.
* Partially based on code obtained from Digeo Inc.
*/
#include "drmP.h"
#include "ochr_drm.h"
#include "via_drv.h"
#include "via_dmablit.h"
#include <linux/pagemap.h>
#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
enum via_dmablit_sys_type {
VIA_DMABLIT_BO,
VIA_DMABLIT_USER
};
struct drm_via_descriptor {
uint32_t mem_addr;
uint32_t dev_addr;
uint32_t size;
uint32_t next;
};
struct via_dmablit {
uint32_t num_lines;
uint32_t line_length;
uint32_t vram_offs;
uint32_t vram_stride;
uint32_t mem_pg_offs;
uint32_t mem_stride;
int to_fb;
};
struct drm_via_sg_info {
uint32_t engine;
uint32_t fence_seq;
struct list_head head;
struct page **pages;
unsigned long num_pages;
enum via_dmablit_sys_type sys_type;
struct drm_via_descriptor **desc_pages;
int num_desc_pages;
int num_desc;
enum dma_data_direction direction;
unsigned char *bounce_buffer;
dma_addr_t chain_start;
unsigned int descriptors_per_page;
int aborted;
enum {
dr_via_device_mapped,
dr_via_desc_pages_alloc,
dr_via_pages_locked,
dr_via_pages_alloc,
dr_via_sg_init
} state;
};
/*
* Unmap a DMA mapping.
*/
static void
via_unmap_blit_from_device(struct pci_dev *pdev, struct drm_via_sg_info *vsg)
{
int num_desc = vsg->num_desc;
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
struct drm_via_descriptor *desc_ptr =
vsg->desc_pages[cur_descriptor_page] + descriptor_this_page;
dma_addr_t next = vsg->chain_start;
while (num_desc--) {
if (descriptor_this_page-- == 0) {
cur_descriptor_page--;
descriptor_this_page = vsg->descriptors_per_page - 1;
desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
}
dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr),
DMA_TO_DEVICE);
dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size,
vsg->direction);
next = (dma_addr_t) desc_ptr->next;
desc_ptr--;
}
}
/*
* If mode = 0, count how many descriptors are needed.
* If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
* Descriptors are run in reverse order by the hardware because we are not allowed to update the
* 'next' field without syncing calls when the descriptor is already mapped.
*/
static void
via_map_blit_for_device(struct pci_dev *pdev,
const struct via_dmablit *xfer,
struct drm_via_sg_info *vsg, int mode)
{
unsigned cur_descriptor_page = 0;
unsigned num_descriptors_this_page = 0;
unsigned long mem_addr = xfer->mem_pg_offs;
unsigned long cur_mem;
uint32_t fb_addr = xfer->vram_offs;
uint32_t cur_fb;
unsigned long line_len;
unsigned remaining_len;
int num_desc = 0;
int cur_line;
dma_addr_t next = 0 | VIA_DMA_DPR_EC;
struct drm_via_descriptor *desc_ptr = NULL;
if (mode == 1)
desc_ptr = vsg->desc_pages[cur_descriptor_page];
for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
line_len = xfer->line_length;
cur_fb = fb_addr;
cur_mem = mem_addr;
while (line_len > 0) {
remaining_len =
min(PAGE_SIZE - VIA_PGOFF(cur_mem), line_len);
line_len -= remaining_len;
if (mode == 1) {
struct page *page;
page = vsg->pages[cur_mem >> PAGE_SHIFT];
BUG_ON(cur_descriptor_page >=
vsg->num_desc_pages);
desc_ptr->mem_addr =
dma_map_page(&pdev->dev,
page,
VIA_PGOFF(cur_mem),
remaining_len, vsg->direction);
desc_ptr->dev_addr = cur_fb;
desc_ptr->size = remaining_len;
desc_ptr->next = (uint32_t) next;
next =
dma_map_single(&pdev->dev, desc_ptr,
sizeof(*desc_ptr),
DMA_TO_DEVICE);
desc_ptr++;
if (++num_descriptors_this_page >=
vsg->descriptors_per_page) {
num_descriptors_this_page = 0;
desc_ptr =
vsg->
desc_pages[++cur_descriptor_page];
}
}
num_desc++;
cur_mem += remaining_len;
cur_fb += remaining_len;
}
mem_addr += xfer->mem_stride;
fb_addr += xfer->vram_stride;
}
if (mode == 1) {
vsg->chain_start = next;
vsg->state = dr_via_device_mapped;
}
vsg->num_desc = num_desc;
}
/*
* Function that frees up all resources for a blit. It is usable even if the
* blit info has only been partially built as long as the status enum is consistent
* with the actual status of the used resources.
*/
static void via_free_sg_info(struct pci_dev *pdev, struct drm_via_sg_info *vsg)
{
struct page *page;
int i;
switch (vsg->state) {
case dr_via_device_mapped:
via_unmap_blit_from_device(pdev, vsg);
case dr_via_desc_pages_alloc:
for (i = 0; i < vsg->num_desc_pages; ++i) {
if (vsg->desc_pages[i] != NULL)
free_page((unsigned long)vsg->desc_pages[i]);
}
kfree(vsg->desc_pages);
case dr_via_pages_locked:
if (vsg->sys_type == VIA_DMABLIT_USER) {
for (i = 0; i < vsg->num_pages; ++i) {
if (NULL != (page = vsg->pages[i])) {
if (!PageReserved(page)
&& (DMA_FROM_DEVICE ==
vsg->direction))
SetPageDirty(page);
page_cache_release(page);
}
}
}
case dr_via_pages_alloc:
if (vsg->sys_type == VIA_DMABLIT_USER)
vfree(vsg->pages);
default:
vsg->state = dr_via_sg_init;
}
if (vsg->bounce_buffer) {
vfree(vsg->bounce_buffer);
vsg->bounce_buffer = NULL;
}
}
/*
* Fire a blit engine.
*/
static void
via_fire_dmablit(struct drm_device *dev, struct drm_via_sg_info *vsg,
int engine)
{
struct drm_via_private *dev_priv = via_priv(dev);
VIA_WRITE(VIA_PCI_DMA_MAR0 + engine * 0x10, 0);
VIA_WRITE(VIA_PCI_DMA_DAR0 + engine * 0x10, 0);
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04,
VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | VIA_DMA_CSR_DE);
VIA_WRITE(VIA_PCI_DMA_MR0 + engine * 0x04,
VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine * 0x10, 0);
VIA_WRITE(VIA_PCI_DMA_DPR0 + engine * 0x10, vsg->chain_start);
DRM_WRITEMEMORYBARRIER();
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04,
VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
VIA_READ(VIA_PCI_DMA_CSR0 + engine * 0x04);
}
#if 0
/*
* Obtain a page pointer array and lock all pages into system memory.
*/
static int
via_lock_all_dma_pages(struct drm_via_sg_info *vsg,
struct drm_via_dmablit *xfer)
{
int ret;
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
vsg->num_pages =
VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
first_pfn + 1;
if (NULL ==
(vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
return -ENOMEM;
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
down_read(&current->mm->mmap_sem);
ret = get_user_pages(current, current->mm,
(unsigned long)xfer->mem_addr,
vsg->num_pages,
(vsg->direction == DMA_FROM_DEVICE),
0, vsg->pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
if (ret < 0)
return ret;
vsg->state = dr_via_pages_locked;
return -EINVAL;
}
vsg->state = dr_via_pages_locked;
DRM_DEBUG("DMA pages locked\n");
return 0;
}
#endif
/*
* Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
* pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
* quite large for some blits, and pages don't need to be contingous.
*/
static int via_alloc_desc_pages(struct drm_via_sg_info *vsg)
{
int i;
vsg->descriptors_per_page =
PAGE_SIZE / sizeof(struct drm_via_descriptor);
vsg->num_desc_pages =
(vsg->num_desc + vsg->descriptors_per_page -
1) / vsg->descriptors_per_page;
if (NULL ==
(vsg->desc_pages =
kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
return -ENOMEM;
memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
vsg->state = dr_via_desc_pages_alloc;
for (i = 0; i < vsg->num_desc_pages; ++i) {
if (NULL == (vsg->desc_pages[i] = (struct drm_via_descriptor *)
__get_free_page(GFP_KERNEL)))
return -ENOMEM;
}
DRM_DEBUG("Allocated %d pages for %d descriptors.\n",
vsg->num_desc_pages, vsg->num_desc);
return 0;
}
static void via_abort_dmablit(struct drm_device *dev, int engine)
{
struct drm_via_private *dev_priv = via_priv(dev);
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04, VIA_DMA_CSR_TA);
}
static void via_dmablit_engine_off(struct drm_device *dev, int engine)
{
struct drm_via_private *dev_priv = via_priv(dev);
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04,
VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
}
/*
* The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
* The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
* task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
* the workqueue task takes care of processing associated with the old blit.
*/
void via_dmablit_handler(struct drm_device *dev, int engine)
{
struct drm_via_private *dev_priv = via_priv(dev);
struct drm_via_blitq *blitq = dev_priv->blit_queues + engine;
struct drm_via_sg_info *cur;
int done_transfer;
unsigned long irqsave = 0;
uint32_t status = 0;
spin_lock_irqsave(&blitq->blit_lock, irqsave);
done_transfer = blitq->is_active &&
((status =
VIA_READ(VIA_PCI_DMA_CSR0 + engine * 0x04)) & VIA_DMA_CSR_TD);
done_transfer = done_transfer || (blitq->aborting
&& !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
if (done_transfer) {
cur->aborted = blitq->aborting;
blitq->completed_fence_seq = cur->fence_seq;
list_add_tail(&cur->head, &blitq->done_blits);
/*
* Clear transfer done flag.
*/
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine * 0x04, VIA_DMA_CSR_TD);
blitq->is_active = 0;
blitq->aborting = 0;
schedule_work(&blitq->wq);
} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
/*
* Abort transfer after one second.
*/
via_abort_dmablit(dev, engine);
blitq->aborting = 1;
blitq->end = jiffies + DRM_HZ;
}
if (!blitq->is_active) {
if (!list_empty(&blitq->pending_blits)) {
cur = list_entry(blitq->pending_blits.next,
struct drm_via_sg_info, head);
list_del_init(&cur->head);
via_fire_dmablit(dev, cur, engine);
blitq->is_active = 1;
blitq->cur = cur;
blitq->end = jiffies + DRM_HZ;
if (!timer_pending(&blitq->poll_timer)) {
blitq->poll_timer.expires = jiffies + 1;
add_timer(&blitq->poll_timer);
}
} else {
if (timer_pending(&blitq->poll_timer)) {
del_timer(&blitq->poll_timer);
}
via_dmablit_engine_off(dev, engine);
}
}
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
/*
* A timer that regularly polls the blit engine in cases where we don't have interrupts:
* a) Broken hardware (typically those that don't have any video capture facility).
* b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
* The timer and hardware IRQ's can and do work in parallel. If the hardware has
* irqs, it will shorten the latency somewhat.
*/
static void via_dmablit_timer(unsigned long data)
{
struct drm_via_blitq *blitq = (struct drm_via_blitq *)data;
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((struct drm_via_private *)dev->dev_private)->blit_queues);
DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
(unsigned long)jiffies);
via_dmablit_handler(dev, engine);
if (!timer_pending(&blitq->poll_timer)) {
blitq->poll_timer.expires = jiffies + 1;
add_timer(&blitq->poll_timer);
/*
* Rerun handler to delete timer if engines are off, and
* to shorten abort latency. This is a little nasty.
*/
via_dmablit_handler(dev, engine);
}
}
/*
* Workqueue task that frees data and mappings associated with a blit.
* Also wakes up waiting processes. Each of these tasks handles one
* blit engine only and may not be called on each interrupt.
*/
static void via_dmablit_workqueue(struct work_struct *work)
{
struct drm_via_blitq *blitq =
container_of(work, struct drm_via_blitq, wq);
struct drm_device *dev = blitq->dev;
unsigned long irqsave;
struct drm_via_sg_info *cur_sg;
DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
(blitq -
((struct drm_via_private *)dev->dev_private)->blit_queues));
spin_lock_irqsave(&blitq->blit_lock, irqsave);
while (!list_empty(&blitq->done_blits)) {
cur_sg = list_entry(blitq->done_blits.next,
struct drm_via_sg_info, head);
list_del_init(&cur_sg->head);
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
DRM_WAKEUP(&blitq->busy_queue);
via_free_sg_info(dev->pdev, cur_sg);
kfree(cur_sg);
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
/*
* Init all blit engines. Currently we use two, but some hardware have 4.
*/
void via_init_dmablit(struct drm_device *dev)
{
int i;
struct drm_via_private *dev_priv = via_priv(dev);
struct drm_via_blitq *blitq;
pci_set_master(dev->pdev);
for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
blitq->dev = dev;
blitq->cur = NULL;
blitq->num_pending = 0;
blitq->is_active = 0;
blitq->aborting = 0;
blitq->num_pending = 0;
INIT_LIST_HEAD(&blitq->pending_blits);
INIT_LIST_HEAD(&blitq->done_blits);
spin_lock_init(&blitq->blit_lock);
DRM_INIT_WAITQUEUE(&blitq->busy_queue);
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
init_timer(&blitq->poll_timer);
blitq->poll_timer.function = &via_dmablit_timer;
blitq->poll_timer.data = (unsigned long)blitq;
}
}
int
via_dmablit_bo(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem,
struct page **pages, int *fence_class)
{
struct ttm_bo_device *bdev = bo->bdev;
struct drm_via_private *dev_priv =
container_of(bdev, struct drm_via_private, bdev);
struct drm_device *dev = dev_priv->dev;
int draw = (new_mem->mem_type == TTM_PL_VRAM);
int ret = 0;
struct ttm_tt *ttm = bo->ttm;
struct via_dmablit xfer;
int engine;
struct drm_via_sg_info *vsg;
struct drm_via_blitq *blitq;
unsigned long irq_flags;
int i;
struct page *d;
BUG_ON(!draw && (bo->mem.mem_type != TTM_PL_VRAM));
BUG_ON(!ttm);
for (i = 0; i < ttm->num_pages; ++i) {
d = ttm_tt_get_page(ttm, i);
if (unlikely(d == NULL))
return -ENOMEM;
}
vsg = kmalloc(sizeof(*vsg), GFP_KERNEL);
if (unlikely(vsg == NULL))
return -ENOMEM;
if (draw) {
struct ttm_mem_type_manager *man =
&bdev->man[new_mem->mem_type];
vsg->direction = DMA_TO_DEVICE;
xfer.vram_offs = (new_mem->mm_node->start << PAGE_SHIFT) +
man->gpu_offset;
engine = 0;
} else {
vsg->direction = DMA_FROM_DEVICE;
xfer.vram_offs = bo->offset;
engine = 1;
}
blitq = dev_priv->blit_queues + engine;
vsg->bounce_buffer = NULL;
vsg->state = dr_via_sg_init;
xfer.mem_stride = PAGE_SIZE;
xfer.line_length = xfer.mem_stride;
xfer.vram_stride = xfer.mem_stride;
xfer.num_lines = bo->num_pages;
xfer.to_fb = draw;
xfer.mem_pg_offs = 0;
vsg->sys_type = VIA_DMABLIT_BO;
vsg->pages = (pages == NULL) ? ttm->pages : pages;
vsg->num_pages = bo->num_pages;
vsg->state = dr_via_pages_alloc;
vsg->state = dr_via_pages_locked;
via_map_blit_for_device(dev->pdev, &xfer, vsg, 0);
ret = via_alloc_desc_pages(vsg);
if (unlikely(ret != 0))
goto out_err;
via_map_blit_for_device(dev->pdev, &xfer, vsg, 1);
spin_lock_irqsave(&blitq->blit_lock, irq_flags);
list_add_tail(&vsg->head, &blitq->pending_blits);
++blitq->num_pending;
*fence_class = engine + VIA_ENGINE_DMA0;
vsg->fence_seq =
atomic_add_return(1, &dev_priv->fence_seq[*fence_class]);
spin_unlock_irqrestore(&blitq->blit_lock, irq_flags);
via_dmablit_handler(dev, engine);
return 0;
out_err:
via_free_sg_info(dev->pdev, vsg);
kfree(vsg);
return ret;
}
#if 0
/*
* Build all info and do all mappings required for a blit.
*/
static int
via_build_sg_info(struct drm_device *dev, struct drm_via_sg_info *vsg,
struct drm_via_dmablit *xfer)
{
int draw = xfer->to_fb;
int ret = 0;
vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
vsg->bounce_buffer = NULL;
vsg->state = dr_via_sg_init;
if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
DRM_ERROR("Zero size bitblt.\n");
return -EINVAL;
}
/*
* Below check is a driver limitation, not a hardware one. We
* don't want to lock unused pages, and don't want to incoporate the
* extra logic of avoiding them. Make sure there are no.
* (Not a big limitation anyway.)
*/
if ((xfer->mem_stride - xfer->line_length) > 2 * PAGE_SIZE) {
DRM_ERROR("Too large system memory stride. Stride: %d, "
"Length: %d\n", xfer->mem_stride, xfer->line_length);
return -EINVAL;
}
if ((xfer->mem_stride == xfer->line_length) &&
(xfer->fb_stride == xfer->line_length)) {
xfer->mem_stride *= xfer->num_lines;
xfer->line_length = xfer->mem_stride;
xfer->fb_stride = xfer->mem_stride;
xfer->num_lines = 1;
}
/*
* Don't lock an arbitrary large number of pages, since that causes a
* DOS security hole.
*/
if (xfer->num_lines > 2048
|| (xfer->num_lines * xfer->mem_stride > (2048 * 2048 * 4))) {
DRM_ERROR("Too large PCI DMA bitblt.\n");
return -EINVAL;
}
/*
* we allow a negative fb stride to allow flipping of images in
* transfer.
*/
if (xfer->mem_stride < xfer->line_length ||
abs(xfer->fb_stride) < xfer->line_length) {
DRM_ERROR("Invalid frame-buffer / memory stride.\n");
return -EINVAL;
}
/*
* A hardware bug seems to be worked around if system memory addresses start on
* 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
* about this. Meanwhile, impose the following restrictions:
*/
#ifdef VIA_BUGFREE
if ((((unsigned long)xfer->mem_addr & 3) !=
((unsigned long)xfer->fb_addr & 3)) || ((xfer->num_lines > 1)
&& ((xfer->mem_stride & 3)
!=
(xfer->
fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
return -EINVAL;
}
#else
if ((((unsigned long)xfer->mem_addr & 15)
|| ((unsigned long)xfer->fb_addr & 3)) || ((xfer->num_lines > 1)
&&
((xfer->mem_stride & 15)
|| (xfer->
fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
return -EINVAL;
}
#endif
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
DRM_ERROR("Could not lock DMA pages.\n");
via_free_sg_info(dev->pdev, vsg);
return ret;
}
via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
if (0 != (ret = via_alloc_desc_pages(vsg))) {
DRM_ERROR("Could not allocate DMA descriptor pages.\n");
via_free_sg_info(dev->pdev, vsg);
return ret;
}
via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
return 0;
}
/*
* Grab a free slot. Build blit info and queue a blit.
*/
static int via_dmablit(struct drm_device *dev, struct drm_via_dmablit *xfer,
int interruptible)
{
struct drm_via_private *dev_priv = via_priv(dev);
struct drm_via_sg_info *vsg;
struct drm_via_blitq *blitq;
int ret;
int engine;
unsigned long irqsave;
if (dev_priv == NULL) {
DRM_ERROR("Called without initialization.\n");
return -EINVAL;
}
engine = (xfer->to_fb) ? 0 : 1;
blitq = dev_priv->blit_queues + engine;
vsg = kmalloc(sizeof(*vsg), GFP_KERNEL);
if (unlikely(vsg == NULL))
return -ENOMEM;
ret = via_build_sg_info(dev, vsg, xfer);
if (unlikely(ret != 0))
goto out_err0;
spin_lock_irqsave(&blitq->blit_lock, irqsave);
list_add_tail(&vsg->head, &blitq->pending_blits);
++blitq->num_pending;
vsg->fence_seq =
atomic_add_return(1,
&dev_priv->fence_seq[engine + VIA_ENGINE_DMA0]);
xfer->sync.sync_handle = vsg->fence_seq;
xfer->sync.engine = engine + VIA_ENGINE_DMA0;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
via_dmablit_handler(dev, engine, 0);
return 0;
out_err0:
kfree(vsg);
return ret;
}
#endif

View file

@ -0,0 +1,116 @@
/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
*
* Copyright (c) 2005 Thomas Hellstrom.
* All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Thomas Hellstrom.
* Register info from Digeo Inc.
*/
#ifndef _VIA_DMABLIT_H
#define _VIA_DMABLIT_H
#include <linux/dma-mapping.h>
#define VIA_NUM_BLIT_ENGINES 2
#define VIA_NUM_BLIT_SLOTS 8
struct drm_via_sg_info;
struct drm_via_blitq {
uint32_t completed_fence_seq;
struct drm_device *dev;
unsigned long end;
int aborting;
int is_active;
unsigned int num_pending;
struct list_head pending_blits;
struct list_head done_blits;
struct drm_via_sg_info *cur;
spinlock_t blit_lock;
wait_queue_head_t busy_queue;
struct work_struct wq;
struct timer_list poll_timer;
};
/*
* PCI DMA Registers
* Channels 2 & 3 don't seem to be implemented in hardware.
*/
#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
/* Define for DMA engine */
/* DPR */
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
/* MR */
#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
/* CSR */
#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
extern void via_dmablit_handler(struct drm_device *dev, int engine);
extern void via_init_dmablit(struct drm_device *dev);
#endif

View file

@ -0,0 +1,195 @@
/*
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include "ochr_drm.h"
#include "via_drv.h"
#include "drm_pciids.h"
int drm_via_disable_verifier;
MODULE_PARM_DESC(disable_verifier, "Disable GPU command security check. "
"DANGEROUS!!");
module_param_named(disable_verifier, drm_via_disable_verifier, int, 0600);
static int dri_library_name(struct drm_device *dev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "unichrome\n");
}
static struct pci_device_id pciidlist[] = {
viadrv_PCI_IDS
};
#define DRM_IOCTL_VIA_DEC_FUTEX \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX,\
struct drm_via_futex)
#define DRM_IOCTL_VIA_TTM_EXECBUF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_TTM_EXECBUF,\
struct drm_via_ttm_execbuf_arg)
#define DRM_VIA_TTM_PL_CREATE (TTM_PL_CREATE + DRM_VIA_PLACEMENT_OFFSET)
#define DRM_VIA_TTM_PL_REFERENCE (TTM_PL_REFERENCE + DRM_VIA_PLACEMENT_OFFSET)
#define DRM_VIA_TTM_PL_UNREF (TTM_PL_UNREF + DRM_VIA_PLACEMENT_OFFSET)
#define DRM_VIA_TTM_PL_SYNCCPU (TTM_PL_SYNCCPU + DRM_VIA_PLACEMENT_OFFSET)
#define DRM_VIA_TTM_PL_WAITIDLE (TTM_PL_WAITIDLE + DRM_VIA_PLACEMENT_OFFSET)
#define DRM_VIA_TTM_PL_SETSTATUS (TTM_PL_SETSTATUS + DRM_VIA_PLACEMENT_OFFSET)
#define DRM_VIA_TTM_FENCE_SIGNALED (TTM_FENCE_SIGNALED + DRM_VIA_FENCE_OFFSET)
#define DRM_VIA_TTM_FENCE_FINISH (TTM_FENCE_FINISH + DRM_VIA_FENCE_OFFSET)
#define DRM_VIA_TTM_FENCE_UNREF (TTM_FENCE_UNREF + DRM_VIA_FENCE_OFFSET)
#define DRM_IOCTL_VIA_TTM_PL_CREATE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_TTM_PL_CREATE,\
union ttm_pl_create_arg)
#define DRM_IOCTL_VIA_TTM_PL_REFERENCE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_TTM_PL_REFERENCE,\
union ttm_pl_reference_arg)
#define DRM_IOCTL_VIA_TTM_PL_UNREF \
DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_TTM_PL_UNREF,\
struct ttm_pl_reference_req)
#define DRM_IOCTL_VIA_TTM_PL_SYNCCPU \
DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_TTM_PL_SYNCCPU,\
struct ttm_synccpu_arg)
#define DRM_IOCTL_VIA_TTM_PL_WAITIDLE \
DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_TTM_PL_WAITIDLE,\
struct ttm_waitidle_arg)
#define DRM_IOCTL_VIA_TTM_PL_SETSTATUS \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_TTM_PL_CREATE,\
union ttm_pl_setstatus_arg)
#define DRM_IOCTL_VIA_TTM_FENCE_SIGNALED \
DRM_IOWR (DRM_COMMAND_BASE + DRM_VIA_TTM_FENCE_SIGNALED, \
union ttm_fence_signaled_arg)
#define DRM_IOCTL_VIA_TTM_FENCE_FINISH \
DRM_IOWR (DRM_COMMAND_BASE + DRM_VIA_TTM_FENCE_FINISH, \
union ttm_fence_finish_arg)
#define DRM_IOCTL_VIA_TTM_FENCE_UNREF \
DRM_IOW (DRM_COMMAND_BASE + DRM_VIA_TTM_FENCE_UNREF, \
struct ttm_fence_unref_arg)
static struct drm_ioctl_desc via_ioctls[] = {
DRM_IOCTL_DEF(DRM_VIA_VT, via_vt_ioctl, DRM_AUTH | DRM_MASTER),
DRM_IOCTL_DEF(DRM_VIA_GET_PARAM, via_getparam_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_EXTENSION, via_extension_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_TTM_EXECBUF, via_execbuffer, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_TTM_PL_CREATE, via_pl_create_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_TTM_PL_REFERENCE, via_pl_reference_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_TTM_PL_UNREF, via_pl_unref_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_TTM_PL_SYNCCPU, via_pl_synccpu_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_TTM_PL_WAITIDLE, via_pl_waitidle_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_TTM_PL_SETSTATUS, via_pl_setstatus_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_TTM_FENCE_SIGNALED,
via_fence_signaled_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_TTM_FENCE_FINISH, via_fence_finish_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_TTM_FENCE_UNREF, via_fence_unref_ioctl,
DRM_AUTH)
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED,
.load = via_driver_load,
.unload = via_driver_unload,
.context_ctor = via_context_ctor,
.context_dtor = via_context_dtor,
.get_vblank_counter = via_get_vblank_counter,
.enable_vblank = via_enable_vblank,
.disable_vblank = via_disable_vblank,
.irq_preinstall = via_driver_irq_preinstall,
.irq_postinstall = via_driver_irq_postinstall,
.irq_uninstall = via_driver_irq_uninstall,
.irq_handler = via_driver_irq_handler,
.dma_quiescent = NULL,
.dri_library_name = dri_library_name,
.reclaim_buffers = drm_core_reclaim_buffers,
.reclaim_buffers_locked = NULL,
.firstopen = via_firstopen,
.reclaim_buffers_idlelocked = NULL,
.lastclose = via_lastclose,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = via_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(via_ioctls),
.fops = {
.owner = THIS_MODULE,
.open = via_open,
.release = via_release,
.unlocked_ioctl = drm_unlocked_ioctl,
.mmap = via_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.read = via_ttm_read,
.write = via_ttm_write},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
.resume = via_resume,
.suspend = via_suspend,
},
.fence_driver = NULL,
.bo_driver = NULL,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRM_VIA_DRIVER_DATE,
.major = DRM_VIA_DRIVER_MAJOR,
.minor = DRM_VIA_DRIVER_MINOR,
.patchlevel = DRM_VIA_DRIVER_PATCHLEVEL
};
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_dev(pdev, ent, &driver);
}
static int __init via_init(void)
{
via_init_command_verifier();
return drm_init(&driver, pciidlist);
}
static void __exit via_exit(void)
{
drm_exit(&driver);
}
module_init(via_init);
module_exit(via_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,355 @@
/*
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _VIA_DRV_H_
#define _VIA_DRV_H_
#include "via_verifier.h"
#include "ochr_drm.h"
#include "via_dmablit.h"
#include "ttm/ttm_object.h"
#include "ttm/ttm_fence_driver.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_lock.h"
#include "ttm/ttm_memory.h"
#define DRIVER_AUTHOR "Tungsten Graphics"
#define DRIVER_NAME "openchrome"
#define DRIVER_DESC "VIA Unichrome / Pro / II"
#define VIA_VQ_SIZE (512*1024)
#define VIA_AGPC_SIZE (2*1024*1024)
#define VIA_AGPC_MASK (VIA_AGPC_SIZE -1)
#define VIA_AGPBO_SIZE (16*1024*1024)
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
/*
* Registers go here.
*/
#define CMDBUF_ALIGNMENT_SIZE (0x100)
#define CMDBUF_ALIGNMENT_MASK (0x0ff)
/* defines for VIA 2D registers */
#define VIA_REG_GEMODE 0x004
#define VIA_GEM_8bpp 0x00000000
#define VIA_GEM_16bpp 0x00000100
#define VIA_GEM_32bpp 0x00000300
#define VIA_REG_SRCBASE 0x030
#define VIA_REG_DSTBASE 0x034
#define VIA_REG_PITCH 0x038
#define VIA_PITCH_ENABLE 0x80000000
#define VIA_REG_SRCCOLORKEY 0x01C
#define VIA_REG_KEYCONTROL 0x02C
#define VIA_REG_SRCPOS 0x008
#define VIA_REG_DSTPOS 0x00C
#define VIA_REG_GECMD 0x000
#define VIA_GEC_BLT 0x00000001
#define VIA_GEC_INCX 0x00000000
#define VIA_GEC_DECY 0x00004000
#define VIA_GEC_INCY 0x00000000
#define VIA_GEC_DECX 0x00008000
#define VIA_GEC_FIXCOLOR_PAT 0x00002000
#define VIA_REG_DIMENSION 0x010 /* width and height */
#define VIA_REG_FGCOLOR 0x018
#define VIA_ROP_CLEAR 0x00
#define VIA_ROP_SRC 0xCC
#define VIA_ROP_PAT 0xF0
#define VIA_ROP_SET 0xFF
/* defines for VIA 3D registers */
#define VIA_REG_STATUS 0x400
#define VIA_REG_TRANSET 0x43C
#define VIA_REG_TRANSPACE 0x440
/* VIA_REG_STATUS(0x400): Engine Status */
#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
#define VIA_2D_ENG_BUSY 0x00000002 /* 2D Engine is busy */
#define VIA_3D_ENG_BUSY 0x00000001 /* 3D Engine is busy */
#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
#define VIA_PCI_BUF_SIZE 60000
#define VIA_FIRE_BUF_SIZE 1024
#define VIA_FENCE_OFFSET_CMD 0x000
#define VIA_IDLE_TIMEOUT (3*HZ)
/*
* Extension offsets.
*/
#define DRM_VIA_DEC_FUTEX 0x03
#define DRM_VIA_TTM_EXECBUF 0x04
#define DRM_VIA_PLACEMENT_OFFSET 0x10
#define DRM_VIA_FENCE_OFFSET 0x18
enum via_barriers {
VIA_BARRIER_HQV0 = 0,
VIA_BARRIER_HQV1,
VIA_BARRIER_MPEG0,
VIA_BARRIER_MPEG1,
VIA_NUM_BARRIERS
};
struct via_fpriv {
struct ttm_object_file *tfile;
};
/*
* Context private stuff. Mainly buffers used
* for execbuf that we don't want to allocate
* for each call, and that may be used by
* a number of contexts at a time.
*/
struct via_sarea {
struct drm_via_sarea sa;
struct drm_via_sarea_xvmc sa_xvmc;
};
struct drm_via_private {
struct drm_global_reference mem_global_ref;
struct drm_device *dev;
struct ttm_object_device *tdev;
struct ttm_fence_device fdev;
struct ttm_bo_device bdev;
struct ttm_lock ttm_lock;
struct via_sarea *sarea_priv;
drm_local_map_t *sarea;
unsigned long agpAddr;
wait_queue_head_t decoder_queue[DRM_VIA_NR_XVMC_LOCKS];
char pci_buf[VIA_PCI_BUF_SIZE];
char *dma_ptr;
unsigned int dma_low;
unsigned int dma_high;
unsigned int dma_offset;
uint32_t dma_wrap;
uint32_t dma_tracker;
uint32_t dma_free;
struct list_head dma_trackers;
uint32_t __iomem *last_pause_ptr;
uint32_t __iomem *hw_addr_ptr;
atomic_t vbl_received;
drm_via_state_t hc_state;
const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
uint32_t num_fire_offsets;
int chipset;
uint32_t irq_enable_mask;
uint32_t irq_pending_mask;
uint32_t irq2_enable_mask;
uint32_t irq2_pending_mask;
spinlock_t irq_lock;
/* Memory manager stuff */
unsigned long vram_offset;
unsigned long agp_offset;
struct drm_via_blitq blit_queues[VIA_NUM_BLIT_ENGINES];
uint32_t dma_diff;
atomic_t fence_seq[VIA_NUM_ENGINES];
uint64_t vram_size; /* kiB */
uint64_t vram_start;
int vram_direct;
int vram_mtrr;
uint64_t tt_size; /* bytes */
uint64_t tt_start;
struct ttm_buffer_object *vq_bo;
struct ttm_buffer_object *fence_bo;
struct ttm_bo_kmap_obj fence_bmo;
uint32_t *fence_map;
struct ttm_buffer_object *agpc_bo;
struct ttm_bo_kmap_obj agpc_bmo;
volatile uint32_t *agpc_map;
/*
* Fixed memory region for transient buffer objects.
*/
struct ttm_buffer_object *agp_bo;
struct mutex init_mutex;
u8 __iomem *mmio_map;
atomic_t val_seq;
struct mutex cmdbuf_mutex;
int has_irq;
struct hrtimer fence_timer;
struct ttm_fence_object *barriers[VIA_NUM_BARRIERS];
uint32_t max_validate_buffers;
rwlock_t context_lock;
struct drm_open_hash context_hash;
};
struct via_cpriv {
struct drm_hash_item hash;
struct drm_via_private *dev_priv;
struct kref kref;
atomic_t in_execbuf;
void *reloc_buf;
struct via_validate_buffer *val_bufs;
};
enum via_family {
VIA_OTHER = 0, /* Baseline */
VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
};
/* VIA MMIO register access */
#define VIA_READ(reg) ioread32((u32 *)(dev_priv->mmio_map + (reg)))
#define VIA_WRITE(reg,val) iowrite32(val, (u32 *)(dev_priv->mmio_map + (reg)))
#define VIA_READ8(reg) ioread8(dev_priv->mmio_map + (reg))
#define VIA_WRITE8(reg,val) iowrite8(val, dev_priv->mmio_map + (reg))
extern int drm_via_disable_verifier;
extern struct drm_fence_driver via_fence_driver;
static inline struct drm_via_private *via_priv(struct drm_device *dev)
{
return (struct drm_via_private *)dev->dev_private;
}
extern int via_decoder_futex(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
extern int via_driver_unload(struct drm_device *dev);
extern int via_suspend(struct pci_dev *pdev, pm_message_t state);
extern int via_resume(struct pci_dev *pdev);
extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
extern int via_enable_vblank(struct drm_device *dev, int crtc);
extern void via_disable_vblank(struct drm_device *dev, int crtc);
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
extern void via_driver_irq_preinstall(struct drm_device *dev);
extern int via_driver_irq_postinstall(struct drm_device *dev);
extern void via_driver_irq_uninstall(struct drm_device *dev);
extern int via_dma_cleanup(struct drm_device *dev);
extern void via_init_command_verifier(void);
extern int via_driver_dma_quiescent(struct drm_device *dev);
extern void via_init_futex(struct drm_via_private *dev_priv);
extern void via_release_futex(struct drm_via_private *dev_priv, int context);
extern int via_firstopen(struct drm_device *dev);
extern void via_lastclose(struct drm_device *dev);
extern int via_dmablit_bo(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem,
struct page **pages,
int *fence_class);
extern struct ttm_backend *via_create_ttm_backend_entry(struct ttm_bo_device
*bdev);
extern int via_invalidate_caches(struct ttm_bo_device *bdev,
uint32_t buffer_flags);
extern int via_init_mem_type(struct ttm_bo_device *dev, uint32_t type,
struct ttm_mem_type_manager *man);
extern uint32_t via_evict_flags(struct ttm_buffer_object *bo);
extern int via_bo_move(struct ttm_buffer_object *bo, int evict,
int interruptible, int no_wait, struct ttm_mem_reg *new_mem);
extern void via_dma_initialize(struct drm_via_private *dev_priv);
extern void via_dma_takedown(struct drm_via_private *dev_priv);
extern void via_wait_idle(struct drm_via_private *dev_priv);
int via_copy_cmdbuf(struct drm_via_private *dev_priv,
uint64_t cmd_buffer,
uint32_t size,
uint32_t mechanism,
uint32_t ** cmdbuf_addr, int *is_iomem);
extern int via_vt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_dispatch_commands(struct drm_device *dev,
unsigned long size, uint32_t mechanism);
extern void via_ttm_signal_fences(struct drm_via_private *dev_priv);
extern void via_ttm_fence_cmd_handler(struct drm_via_private *dev_priv, uint32_t signal_types);
extern void via_ttm_fence_dmablit_handler(struct drm_via_private *dev_priv, int engine);
extern enum hrtimer_restart via_ttm_fence_timer_func(struct hrtimer *timer);
extern int via_ttm_fence_device_init(struct drm_via_private *dev_priv);
extern int via_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_extension_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_release(struct inode *inode, struct file *filp);
extern int via_open(struct inode *inode, struct file *filp);
extern int via_context_ctor(struct drm_device *dev, int context);
extern int via_context_dtor(struct drm_device *dev, int context);
struct via_cpriv *via_context_lookup(struct drm_via_private *dev_priv,
int context);
extern void via_context_unref(struct via_cpriv **cpriv);
static inline struct via_fpriv *via_fpriv(struct drm_file *file_priv)
{
return (struct via_fpriv *) file_priv->driver_priv;
}
extern int via_fence_signaled_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_fence_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_fence_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_pl_waitidle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_pl_setstatus_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_pl_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_pl_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_pl_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_pl_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int via_mmap(struct file *filp, struct vm_area_struct *vma);
extern int via_verify_access(struct ttm_buffer_object *bo,
struct file *filp);
extern ssize_t via_ttm_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos);
extern ssize_t via_ttm_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos);
extern int via_ttm_global_init(struct drm_via_private *dev_priv);
extern void via_ttm_global_release(struct drm_via_private *dev_priv);
#endif

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,246 @@
/* via_irq.c
*
* Copyright 2004 BEAM Ltd.
* Copyright (c) 2002-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* All Rights Reserved.
* Copyright 2005 Thomas Hellstrom.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Terry Barnaby <terry1@beam.ltd.uk>
* Keith Whitwell <keith@tungstengraphics.com>
* Thomas Hellstrom <unichrome@shipmail.org>
*
* This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
* interrupt, as well as an infrastructure to handle other interrupts of the chip.
* The refresh rate is also calculated for video playback sync purposes.
*/
#include "drmP.h"
#include "drm.h"
#include "ochr_drm.h"
#include "via_drv.h"
#define VIA_REG_INTERRUPT 0x200
/* VIA_REG_INTERRUPT */
#define VIA_IRQ_GLOBAL (1 << 31)
#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
#define VIA_IRQ_VBLANK_PENDING (1 << 3)
#define VIA_IRQ_HQV0_ENABLE (1 << 11)
#define VIA_IRQ_HQV1_ENABLE (1 << 25)
#define VIA_IRQ_HQV0_PENDING (1 << 9)
#define VIA_IRQ_HQV1_PENDING (1 << 10)
#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
u32 via_get_vblank_counter(struct drm_device * dev, int crtc)
{
struct drm_via_private *dev_priv = dev->dev_private;
if (crtc != 0)
return 0;
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct drm_via_private *dev_priv = via_priv(dev);
u32 status;
u32 pending;
int handled = 0;
spin_lock(&dev_priv->irq_lock);
status = VIA_READ(VIA_REG_INTERRUPT);
pending = status & dev_priv->irq_pending_mask;
if (pending & VIA_IRQ_HQV0_PENDING) {
via_ttm_fence_cmd_handler(dev_priv, VIA_FENCE_TYPE_HQV0);
handled = 1;
}
if (pending & VIA_IRQ_HQV1_PENDING) {
via_ttm_fence_cmd_handler(dev_priv, VIA_FENCE_TYPE_HQV1);
handled = 1;
}
if (pending & VIA_IRQ_DMA0_TD_PENDING) {
via_ttm_fence_dmablit_handler(dev_priv, VIA_ENGINE_DMA0);
handled = 1;
}
if (pending & VIA_IRQ_DMA1_TD_PENDING) {
handled = 1;
via_ttm_fence_dmablit_handler(dev_priv, VIA_ENGINE_DMA1);
}
if (status & VIA_IRQ_VBLANK_PENDING) {
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
handled = 1;
}
/* Acknowlege interrupts */
VIA_WRITE(VIA_REG_INTERRUPT, status);
(void)VIA_READ(VIA_REG_INTERRUPT);
spin_unlock(&dev_priv->irq_lock);
if (handled)
return IRQ_HANDLED;
else
return IRQ_NONE;
}
static __inline__ void viadrv_acknowledge_irqs(struct drm_via_private *dev_priv)
{
u32 status;
if (dev_priv) {
/* Acknowlege interrupts */
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status);
(void)VIA_READ(VIA_REG_INTERRUPT);
}
}
int via_enable_vblank(struct drm_device *dev, int crtc)
{
struct drm_via_private *dev_priv = via_priv(dev);
unsigned long irq_flags;
if (crtc != 0) {
DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
return -EINVAL;
}
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_enable_mask |= VIA_IRQ_VBLANK_ENABLE;
VIA_WRITE(VIA_REG_INTERRUPT, dev_priv->irq_enable_mask);
(void)VIA_READ(VIA_REG_INTERRUPT);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
return 0;
}
void via_disable_vblank(struct drm_device *dev, int crtc)
{
struct drm_via_private *dev_priv = via_priv(dev);
unsigned long irq_flags;
if (crtc != 0)
DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_enable_mask &= ~VIA_IRQ_VBLANK_ENABLE;
VIA_WRITE(VIA_REG_INTERRUPT, dev_priv->irq_enable_mask);
(void)VIA_READ(VIA_REG_INTERRUPT);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
/*
* drm_dma.h hooks
*/
void via_driver_irq_preinstall(struct drm_device *dev)
{
struct drm_via_private *dev_priv = via_priv(dev);
DRM_DEBUG("dev_priv: %p\n", dev_priv);
if (dev_priv) {
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->irq_enable_mask =
VIA_IRQ_GLOBAL |
VIA_IRQ_DMA0_TD_ENABLE | VIA_IRQ_DMA1_TD_ENABLE;
dev_priv->irq2_enable_mask = 0;
dev_priv->irq_pending_mask =
VIA_IRQ_VBLANK_PENDING |
VIA_IRQ_DMA0_TD_PENDING | VIA_IRQ_DMA1_TD_PENDING;
dev_priv->irq2_pending_mask = 0;
if (dev_priv->chipset == VIA_PRO_GROUP_A ||
dev_priv->chipset == VIA_DX9_0) {
dev_priv->irq_enable_mask |=
VIA_IRQ_HQV0_ENABLE | VIA_IRQ_HQV1_ENABLE;
dev_priv->irq_pending_mask |=
VIA_IRQ_HQV0_PENDING | VIA_IRQ_HQV1_PENDING;
}
/* Clear bits if they're already high */
viadrv_acknowledge_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
}
int via_driver_irq_postinstall(struct drm_device *dev)
{
struct drm_via_private *dev_priv = via_priv(dev);
DRM_DEBUG("via_driver_irq_postinstall\n");
if (!dev_priv)
return -EINVAL;
drm_vblank_init(dev, 1);
spin_lock_irq(&dev_priv->irq_lock);
VIA_WRITE(VIA_REG_INTERRUPT, dev_priv->irq_enable_mask);
wmb();
/* Some magic, oh for some data sheets ! */
VIA_WRITE8(0x83d4, 0x11);
wmb();
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
(void)VIA_READ8(0x83d5);
spin_unlock_irq(&dev_priv->irq_lock);
return 0;
}
void via_driver_irq_uninstall(struct drm_device *dev)
{
struct drm_via_private *dev_priv = via_priv(dev);
u32 status;
DRM_DEBUG("\n");
if (dev_priv) {
spin_lock_irq(&dev_priv->irq_lock);
/* Some more magic, oh for some data sheets ! */
VIA_WRITE8(0x83d4, 0x11);
wmb();
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT,
status & ~dev_priv->irq_enable_mask);
(void)VIA_READ(VIA_REG_INTERRUPT);
spin_unlock_irq(&dev_priv->irq_lock);
}
}

View file

@ -0,0 +1,230 @@
/**************************************************************************
*
* Copyright (c) 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "ochr_drm.h"
#include "via_drv.h"
#include "ttm/ttm_fence_driver.h"
#define VIA_POLL_DELAY_NS 100000
static void via_ttm_fence_poll(struct ttm_fence_device *fdev,
uint32_t engine, uint32_t waiting_types)
{
struct drm_via_private *dev_priv =
container_of(fdev, struct drm_via_private, fdev);
uint32_t seq;
if (unlikely(!waiting_types))
return;
switch (engine) {
case VIA_ENGINE_CMD:
{
struct ttm_fence_class_manager *fc =
&fdev->fence_class[engine];
uint32_t offset = VIA_FENCE_OFFSET_CMD;
uint32_t type = TTM_FENCE_TYPE_EXE;
uint32_t error = 0;
offset >>= 2;
seq = ioread32(dev_priv->fence_map + offset);
if (unlikely(waiting_types & VIA_FENCE_TYPE_HQV0)) {
if (!(VIA_READ(0x3D0) & (1 << 4)))
type |= VIA_FENCE_TYPE_HQV0;
}
if (unlikely(waiting_types & VIA_FENCE_TYPE_HQV1)) {
if (!(VIA_READ(0x13D0) & (1 << 4)))
type |= VIA_FENCE_TYPE_HQV1;
}
if (unlikely(waiting_types & VIA_FENCE_TYPE_MPEG0)) {
uint32_t mpeg_status = VIA_READ(0xC54);
if ((mpeg_status & 0x207) == 0x204)
type |= VIA_FENCE_TYPE_MPEG0;
if (unlikely((mpeg_status & 0x70) != 0)) {
type |= VIA_FENCE_TYPE_MPEG0;
error = mpeg_status & 0x70;
}
}
/*
* FIXME: Info on MPEG1 engine?
*/
ttm_fence_handler(fdev, engine, seq, type, error);
if (fc->waiting_types) {
hrtimer_start(&dev_priv->fence_timer,
ktime_set(0, VIA_POLL_DELAY_NS),
HRTIMER_MODE_REL);
}
break;
}
case VIA_ENGINE_DMA0:
case VIA_ENGINE_DMA1:
case VIA_ENGINE_DMA2:
case VIA_ENGINE_DMA3:
{
uint32_t dma_engine = engine - VIA_ENGINE_DMA0;
struct drm_via_blitq *blitq =
&dev_priv->blit_queues[dma_engine];
unsigned long irq_flags;
via_dmablit_handler(dev_priv->dev, dma_engine);
spin_lock_irqsave(&blitq->blit_lock, irq_flags);
seq = blitq->completed_fence_seq;
spin_unlock_irqrestore(&blitq->blit_lock, irq_flags);
ttm_fence_handler(fdev, engine, seq, TTM_FENCE_TYPE_EXE,
0);
break;
}
default:
break;
}
}
/**
* Emit a fence sequence.
*/
static int via_ttm_fence_emit_sequence(struct ttm_fence_device *fdev,
uint32_t class,
uint32_t flags,
uint32_t * sequence,
unsigned long *timeout_jiffies)
{
struct drm_via_private *dev_priv =
container_of(fdev, struct drm_via_private, fdev);
*sequence = atomic_read(&dev_priv->fence_seq[class]);
*timeout_jiffies = jiffies + 3 * HZ;
return 0;
}
void via_ttm_fence_cmd_handler(struct drm_via_private *dev_priv,
uint32_t signal_types)
{
struct ttm_fence_device *fdev = &dev_priv->fdev;
struct ttm_fence_class_manager *fc = &fdev->fence_class[VIA_ENGINE_CMD];
write_lock(&fc->lock);
via_ttm_fence_poll(fdev, VIA_ENGINE_CMD,
fc->waiting_types | signal_types);
write_unlock(&fc->lock);
}
void via_ttm_fence_dmablit_handler(struct drm_via_private *dev_priv, int engine)
{
struct ttm_fence_device *fdev = &dev_priv->fdev;
struct ttm_fence_class_manager *fc = &fdev->fence_class[engine];
write_lock(&fc->lock);
via_ttm_fence_poll(fdev, engine, fc->waiting_types);
write_unlock(&fc->lock);
}
static int via_ttm_fence_has_irq(struct ttm_fence_device *fdev,
uint32_t engine, uint32_t flags)
{
struct drm_via_private *dev_priv =
container_of(fdev, struct drm_via_private, fdev);
if (engine >= VIA_ENGINE_DMA0)
return dev_priv->has_irq;
return hrtimer_is_hres_active(&dev_priv->fence_timer);
}
void via_ttm_signal_fences(struct drm_via_private *dev_priv)
{
struct ttm_fence_class_manager *fc;
int i;
unsigned long irq_flags;
if (via_driver_dma_quiescent(dev_priv->dev)) {
msleep(1000);
}
for (i = 0; i < dev_priv->fdev.num_classes; ++i) {
fc = &dev_priv->fdev.fence_class[i];
write_lock_irqsave(&fc->lock, irq_flags);
ttm_fence_handler(&dev_priv->fdev, i,
atomic_read(&dev_priv->fence_seq[i]),
0xFFFFFFFF, 0);
write_unlock_irqrestore(&fc->lock, irq_flags);
}
}
enum hrtimer_restart via_ttm_fence_timer_func(struct hrtimer *timer)
{
struct drm_via_private *dev_priv =
container_of(timer, struct drm_via_private, fence_timer);
struct ttm_fence_device *fdev = &dev_priv->fdev;
struct ttm_fence_class_manager *fc = &fdev->fence_class[VIA_ENGINE_CMD];
unsigned long irq_flags;
write_lock_irqsave(&fc->lock, irq_flags);
via_ttm_fence_poll(fdev, 0, fc->waiting_types);
write_unlock_irqrestore(&fc->lock, irq_flags);
return HRTIMER_NORESTART;
}
static struct ttm_fence_driver via_ttm_fence_driver = {
.has_irq = via_ttm_fence_has_irq,
.emit = via_ttm_fence_emit_sequence,
.flush = NULL,
.poll = via_ttm_fence_poll,
.needed_flush = NULL,
.wait = NULL,
.signaled = NULL,
};
int via_ttm_fence_device_init(struct drm_via_private *dev_priv)
{
struct ttm_fence_class_init fci = {.wrap_diff = (1 << 30),
.flush_diff = (1 << 29),
.sequence_mask = 0xFFFFFFFF
};
return ttm_fence_device_init(5,
dev_priv->mem_global_ref.object,
&dev_priv->fdev,
&fci, 1, &via_ttm_fence_driver);
}

View file

@ -0,0 +1,209 @@
/*
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* All Rights Reserved.
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include "via_drv.h"
#include "ttm/ttm_userobj_api.h"
static struct vm_operations_struct via_ttm_vm_ops;
static struct vm_operations_struct *ttm_vm_ops = NULL;
int via_fence_signaled_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return ttm_fence_signaled_ioctl(via_fpriv(file_priv)->tfile, data);
}
int via_fence_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return ttm_fence_finish_ioctl(via_fpriv(file_priv)->tfile, data);
}
int via_fence_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return ttm_fence_unref_ioctl(via_fpriv(file_priv)->tfile, data);
}
int via_pl_waitidle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return ttm_pl_waitidle_ioctl(via_fpriv(file_priv)->tfile, data);
}
int via_pl_setstatus_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return ttm_pl_setstatus_ioctl(via_fpriv(file_priv)->tfile,
&via_priv(dev)->ttm_lock, data);
}
int via_pl_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return ttm_pl_synccpu_ioctl(via_fpriv(file_priv)->tfile, data);
}
int via_pl_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return ttm_pl_unref_ioctl(via_fpriv(file_priv)->tfile, data);
}
int via_pl_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return ttm_pl_reference_ioctl(via_fpriv(file_priv)->tfile, data);
}
int via_pl_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_via_private *dev_priv = via_priv(dev);
return ttm_pl_create_ioctl(via_fpriv(file_priv)->tfile,
&dev_priv->bdev, &dev_priv->ttm_lock, data);
}
/**
* psb_ttm_fault - Wrapper around the ttm fault method.
*
* @vma: The struct vm_area_struct as in the vm fault() method.
* @vmf: The struct vm_fault as in the vm fault() method.
*
* Since ttm_fault() will reserve buffers while faulting,
* we need to take the ttm read lock around it, as this driver
* relies on the ttm_lock in write mode to exclude all threads from
* reserving and thus validating buffers in aperture- and memory shortage
* situations.
*/
static int via_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct drm_via_private *dev_priv =
container_of(bo->bdev, struct drm_via_private, bdev);
int ret;
ret = ttm_read_lock(&dev_priv->ttm_lock, true);
if (unlikely(ret != 0))
return VM_FAULT_NOPAGE;
ret = ttm_vm_ops->fault(vma, vmf);
ttm_read_unlock(&dev_priv->ttm_lock);
return ret;
}
int via_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
struct drm_via_private *dev_priv;
int ret;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return drm_mmap(filp, vma);
file_priv = (struct drm_file *)filp->private_data;
dev_priv = via_priv(file_priv->minor->dev);
ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
if (unlikely(ret != 0))
return ret;
if (unlikely(ttm_vm_ops == NULL)) {
ttm_vm_ops = vma->vm_ops;
via_ttm_vm_ops = *ttm_vm_ops;
via_ttm_vm_ops.fault = &via_ttm_fault;
}
vma->vm_ops = &via_ttm_vm_ops;
return 0;
}
ssize_t via_ttm_write(struct file * filp, const char __user * buf,
size_t count, loff_t * f_pos)
{
struct drm_file *file_priv = (struct drm_file *)filp->private_data;
struct drm_via_private *dev_priv = via_priv(file_priv->minor->dev);
return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1);
}
ssize_t via_ttm_read(struct file * filp, char __user * buf,
size_t count, loff_t * f_pos)
{
struct drm_file *file_priv = (struct drm_file *)filp->private_data;
struct drm_via_private *dev_priv = via_priv(file_priv->minor->dev);
return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1);
}
int via_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct drm_file *file_priv = (struct drm_file *)filp->private_data;
return ttm_pl_verify_access(bo, via_fpriv(file_priv)->tfile);
}
static int via_ttm_mem_global_init(struct drm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
static void via_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
int via_ttm_global_init(struct drm_via_private *dev_priv)
{
struct drm_global_reference *global_ref;
int ret;
global_ref = &dev_priv->mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &via_ttm_mem_global_init;
global_ref->release = &via_ttm_mem_global_release;
ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed referencing a global TTM memory object.\n");
return ret;
}
return 0;
}
void via_ttm_global_release(struct drm_via_private *dev_priv)
{
drm_global_item_unref(&dev_priv->mem_global_ref);
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,62 @@
/*
* Copyright 2004 The Unichrome Project. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Thomas Hellström 2004.
*/
#ifndef _VIA_VERIFIER_H_
#define _VIA_VERIFIER_H_
typedef enum {
no_sequence = 0,
z_address,
dest_address,
tex_address
} drm_via_sequence_t;
typedef struct {
unsigned texture;
uint32_t z_addr;
uint32_t d_addr;
uint32_t t_addr[2][12];
uint32_t pitch[2][12];
uint32_t height[2][12];
uint32_t tex_level_lo[2];
uint32_t tex_level_hi[2];
uint32_t tex_palette_size[2];
uint32_t tex_npot[2];
drm_via_sequence_t unfinished;
int agp_texture;
int multitex;
struct drm_device *dev;
drm_local_map_t *map_cache;
uint32_t vertex_count;
int agp;
const uint32_t *buf_start;
} drm_via_state_t;
extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
struct drm_device *dev, int agp);
extern int via_parse_command_stream(struct drm_device *dev,
const uint32_t * buf, unsigned int size);
#endif

View file

@ -0,0 +1,105 @@
/*
* Copyright 2005 Thomas Hellstrom. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Thomas Hellstrom 2005.
*
* Video and XvMC related functions.
*/
#include "drmP.h"
#include "ochr_drm.h"
#include "via_drv.h"
#include "drm_sarea.h"
void via_init_futex(struct drm_via_private *dev_priv)
{
unsigned int i;
volatile struct via_sarea *sa_priv = dev_priv->sarea_priv;
DRM_DEBUG("\n");
for (i = 0; i < DRM_VIA_NR_XVMC_LOCKS; ++i) {
DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
DRM_VIA_XVMCLOCKPTR(&sa_priv->sa_xvmc, i)->lock = 0;
}
}
void via_release_futex(struct drm_via_private *dev_priv, int context)
{
unsigned int i;
volatile int *lock;
volatile struct via_sarea *sa_priv = dev_priv->sarea_priv;
if (!dev_priv->sarea_priv)
return;
for (i = 0; i < DRM_VIA_NR_XVMC_LOCKS; ++i) {
lock =
(volatile int *)DRM_VIA_XVMCLOCKPTR(&sa_priv->sa_xvmc, i);
if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
if (_DRM_LOCK_IS_HELD(*lock)
&& (*lock & _DRM_LOCK_CONT)) {
DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
}
*lock = 0;
}
}
}
int via_decoder_futex(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_via_futex *fx = data;
volatile int *lock;
struct drm_via_private *dev_priv = via_priv(dev);
struct via_sarea *sa_priv = dev_priv->sarea_priv;
int ret = 0;
DRM_DEBUG("\n");
if (unlikely(sa_priv == NULL)) {
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("Could not find sarea.\n");
return -EINVAL;
}
dev_priv->sarea_priv =
(struct via_sarea *)((u8 *) dev_priv->sarea->handle +
sizeof(struct drm_sarea));
sa_priv = dev_priv->sarea_priv;
}
if (fx->lock > DRM_VIA_NR_XVMC_LOCKS)
return -EFAULT;
lock = (volatile int *)DRM_VIA_XVMCLOCKPTR(&sa_priv->sa_xvmc, fx->lock);
switch (fx->op) {
case VIA_FUTEX_WAIT:
DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
(fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
return ret;
case VIA_FUTEX_WAKE:
DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
return 0;
}
return 0;
}

View file

@ -39,4 +39,6 @@ klibdrminclude_HEADERS = \
via_drm.h \
r300_reg.h \
via_3d_reg.h \
xgi_drm.h
xgi_drm.h \
ochr_drm.h

472
shared-core/ochr_drm.h Normal file
View file

@ -0,0 +1,472 @@
/*
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _OCHR_DRM_H_
#define _OCHR_DRM_H_
#include <ttm/ttm_placement_user.h>
#include <ttm/ttm_fence_user.h>
/*
* With the arrival of libdrm there is a need to version this file.
* As usual, bump MINOR for new features, MAJOR for changes that create
* backwards incompatibilities, (which should be avoided whenever possible).
*/
#define DRM_VIA_DRIVER_DATE "20090119"
#define DRM_VIA_DRIVER_MAJOR 0
#define DRM_VIA_DRIVER_MINOR 1
#define DRM_VIA_DRIVER_PATCHLEVEL 0
#define DRM_VIA_DRIVER_VERSION (((VIA_DRM_DRIVER_MAJOR) << 16) | (VIA_DRM_DRIVER_MINOR))
#define DRM_VIA_MAX_MIP 16
#define DRM_VIA_NR_SCANOUTS 4
#define DRM_VIA_NR_XVMC_PORTS 10
#define DRM_VIA_NR_XVMC_LOCKS 5
#define DRM_VIA_MAX_CACHELINE_SIZE 64
#define DRM_VIA_XVMCLOCKPTR(saPriv,lockNo) \
((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->xvmc_lock_area) + \
(DRM_VIA_MAX_CACHELINE_SIZE - 1)) & \
~(DRM_VIA_MAX_CACHELINE_SIZE - 1)) + \
DRM_VIA_MAX_CACHELINE_SIZE*(lockNo)))
/* VIA specific ioctls */
#define DRM_VIA_VT 0x00
#define DRM_VIA_GET_PARAM 0x01
#define DRM_VIA_EXTENSION 0x02
#define DRM_IOCTL_VIA_VT DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_VT, struct drm_via_vt)
#define DRM_IOCTL_VIA_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_GET_PARAM, struct drm_via_getparam_arg)
#define DRM_IOCTL_VIA_EXTENSION DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_EXTENSION, struct drm_via_extension_arg)
/*
* TTM Ioctls.
*/
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
* or in a piecewise fashion as required.
*/
struct drm_via_futex {
uint32_t ms;
uint32_t lock;
uint32_t val;
enum {
VIA_FUTEX_WAIT = 0x00,
VIA_FUTEX_WAKE = 0X01
} op;
};
/**
* struct drm_via_vt
*
* @enter: 1 for entervt, 0 for leavevt.
*
* Argument to the DRM_VIA_VT ioctl.
*/
struct drm_via_vt {
int32_t enter;
};
/**
* struct drm_via_scanout
*
* @stamp: sequence number identifying last change.
* @handle: buffer object handle
* @width: scanout buffer width
* @height: scanout buffer height
* @stride: scanout buffer stride in bytes
* @depth: scanout buffer depth. 15, 16 or 32.
*
* Part of the shared memory area.
*/
struct drm_via_scanout {
uint64_t stamp;
uint32_t handle;
uint32_t width;
uint32_t height;
uint32_t stride;
uint32_t depth;
uint32_t pad64;
};
/**
* struct drm_via_sarea_xvmc
*
* @xvmc_lock_area: Area for XvMC decoder locks.
* @xvmc_displaying: Currently displaying surface.
* @xvmc_sub_pic_on: 1: subpicture on, 0: subpicture off.
* @xvmc_ctx: Last context to upload Mpeg state
*
* The xvmc part of the driver-specific sarea. The offset of this structure
* into the driver specific sarea can be obtained using the extension
* ioctl.
*/
struct drm_via_sarea_xvmc {
char xvmc_lock_area[DRM_VIA_MAX_CACHELINE_SIZE *
(DRM_VIA_NR_XVMC_LOCKS + 1)];
uint32_t xvmc_displaying[DRM_VIA_NR_XVMC_PORTS];
uint32_t xvmc_sub_pic_on[DRM_VIA_NR_XVMC_PORTS];
uint32_t xvmc_ctx[DRM_VIA_NR_XVMC_LOCKS];
};
/**
* struct drm_via_sarea
*
* @scanouts: Scanout buffer info.
* @pfCurrentOffset: For page-flipping.
*
* via shared memory area.
*/
struct drm_via_sarea {
struct drm_via_scanout scanouts[DRM_VIA_NR_SCANOUTS];
/*
* Below is for XvMC.
* We want the lock integers alone on, and aligned to, a cache line.
* Therefore this somewhat strange construct.
*/
/* Used by the 3d driver only at this point, for pageflipping:
*/
uint32_t pfCurrentOffset;
};
enum drm_via_params {
DRM_VIA_PARAM_VRAM_SIZE,
DRM_VIA_PARAM_TT_SIZE,
DRM_VIA_PARAM_AGP_SIZE,
DRM_VIA_PARAM_HAS_IRQ,
DRM_VIA_PARAM_SAREA_SIZE,
};
/**
* struct drm_via_getparam_arg
*
* @value: Returned value.
* @param: Requested parameter. (int) enum drm_via_params
*
* Argument to the DRM_VIA_GET_PARAM Ioctl.
*/
struct drm_via_getparam_arg {
uint64_t value;
uint32_t param;
uint32_t pad64;
};
/**
* struct drm_via_extension_rep
*
* @exists: extension exists.
* @driver_ioctl_offset: Driver ioctl number of first ioctl in the extension.
* @major: Major version of the extension.
* @minor: Minor version of the extension.
* @pl: Patchlevel version of the extension.
*
* Output from the DRM_VIA_EXTENSION ioctl.
*/
struct drm_via_extension_rep {
int32_t exists;
uint32_t driver_ioctl_offset;
uint32_t driver_sarea_offset;
uint32_t major;
uint32_t minor;
uint32_t pl;
uint32_t pad64;
};
#define DRM_VIA_EXT_NAME_LEN 128
/**
* union drm_via_extension_arg
*
* @extension: Input: Name of the extension.
* @rep: Ouput: Reply.
*/
union drm_via_extension_arg {
char extension[DRM_VIA_EXT_NAME_LEN];
struct drm_via_extension_rep rep;
};
/*
* Below is execbuf stuff.
*/
#define VIA_RELOC_BUF_SIZE 8192
enum drm_via_reloc_type {
VIA_RELOC_ZBUF,
VIA_RELOC_DSTBUF,
VIA_RELOC_PF,
VIA_RELOC_2D,
VIA_RELOC_TEX,
VIA_RELOC_YUV,
VIA_RELOC_NUMTYPE
};
#define VIA_USE_PRESUMED (1 << 0)
#define VIA_PRESUMED_AGP (1 << 1)
/**
* struct drm_via_validate_req
*
* @set_flags: Validation flags to set.
* @clear_flags: Validation flags to clear.
* @next: User space pointer to the next struct drm_via_validate_req in
* a linked list, cast to an uint64_t.
* @presumed_gpu_offset: Presumed gpu offset, used in the command stream,
* of the buffer.
* @presumed_flags: Flags indicating how the presumed gpu offset should be
* interpreted (and if).
* @cmdbuf_first: dword offset of the first state packet referencing this
* buffer in the command stream. Used for command stream splitting.
* @cmdbuf_last: dword offset of the last + 1 state packet referencing this
* buffer in the command stream. Used for command stream splitting.
*
* Information record used for buffer object validation prior to
* command submission.
*/
struct drm_via_validate_req {
uint64_t set_flags;
uint64_t clear_flags;
uint64_t next;
uint64_t presumed_gpu_offset;
uint32_t buffer_handle;
uint32_t presumed_flags;
uint32_t cmdbuf_first;
uint32_t cmdbuf_last;
};
/**
* struct drm_via_validate_rep
*
* @gpu_offset: Last known gpu offset of the buffer.
* @placement: Last known TTM placement flag of the buffer.
* @fence_type_mask: Set of fence type flags used to determine buffer idle.
*/
struct drm_via_validate_rep {
uint64_t gpu_offset;
uint32_t placement;
uint32_t fence_type_mask;
};
/**
* struct drm_via_validate_rep
*/
struct drm_via_validate_arg {
int handled;
int ret;
union {
struct drm_via_validate_req req;
struct drm_via_validate_rep rep;
} d;
};
struct drm_via_reloc_header {
uint64_t next_header;
uint32_t used;
uint32_t num_relocs;
};
struct drm_via_reloc_bufaddr {
uint32_t index;
uint32_t delta;
};
/*
* Relocation types.
*/
struct drm_via_base_reloc {
enum drm_via_reloc_type type;
uint32_t offset;
};
struct drm_via_yuv_reloc {
struct drm_via_base_reloc base;
struct drm_via_reloc_bufaddr addr;
int32_t planes;
uint32_t shift;
uint32_t plane_offs[4];
};
struct drm_via_zbuf_reloc {
struct drm_via_base_reloc base;
struct drm_via_reloc_bufaddr addr;
};
struct drm_via_2d_reloc {
struct drm_via_base_reloc base;
struct drm_via_reloc_bufaddr addr;
uint32_t bpp;
uint32_t pos;
};
struct drm_via_texture_reloc {
struct drm_via_base_reloc base;
uint32_t low_mip;
uint32_t hi_mip;
uint32_t reg_tex_fm;
uint32_t pad64;
struct drm_via_reloc_bufaddr addr[DRM_VIA_MAX_MIP];
};
/*
* Execbuf arg.
*/
struct drm_via_ttm_fence_rep {
uint32_t handle;
uint32_t fence_class;
uint32_t fence_type;
uint32_t signaled_types;
uint32_t error;
};
struct drm_via_clip_rect {
int32_t x1, x2, y1, y2;
};
#define DRM_VIA_HAVE_CLIP (1 << 0)
#define DRM_VIA_FENCE_NO_USER (1 << 1)
#define DRM_VIA_WAIT_BARRIER (1 << 2)
struct drm_via_ttm_execbuf_control {
struct drm_via_ttm_fence_rep rep;
uint32_t first_clip;
uint32_t vram_avail;
uint32_t agp_avail;
uint32_t pad_64;
};
struct drm_via_ttm_execbuf_arg {
uint64_t buffer_list;
uint64_t reloc_list;
uint64_t cmd_buffer;
uint64_t ls_buffer_list;
uint64_t ls_reloc_list;
uint64_t ls_buffer;
uint64_t cliprect_addr;
uint64_t control;
uint32_t num_buffers;
uint32_t num_ls_buffers;
uint32_t cmd_buffer_size;
uint32_t lost_state_size;
uint32_t mechanism;
uint32_t exec_flags;
uint32_t cliprect_offset;
uint32_t num_cliprects;
uint32_t context;
uint32_t pad64;
};
/*
* Flag layout in the 64-bit VIA validate
* flag argument.
*/
#define VIA_PLACEMENT_MASK 0x000000000000FFFFULL
#define VIA_PLACEMENT_SHIFT 0
/*
* Additional placement domain. A pre-bound
* area of AGP memory for fast buffer object
* creation and destruction.
*/
#define VIA_PL_FLAG_AGP TTM_PL_FLAG_PRIV0
#define VIA_ACCESS_MASK 0x0000000000FF0000ULL
#define VIA_ACCESS_SHIFT 16
#define VIA_ACCESS_READ (TTM_ACCESS_READ << VIA_ACCESS_SHIFT)
#define VIA_ACCESS_WRITE (TTM_ACCESS_WRITE << VIA_ACCESS_SHIFT)
/*
* Validation access flags that indicate that the buffer
* will be accessed by the following engines:
*/
#define VIA_VALMODE_MASK 0x0000FFFF00000000ULL
#define VIA_VALMODE_SHIFT 32
#define VIA_VAL_FLAG_HQV0 (1ULL << (VIA_VALMODE_SHIFT + 0))
#define VIA_VAL_FLAG_HQV1 (1ULL << (VIA_VALMODE_SHIFT + 1))
#define VIA_VAL_FLAG_MPEG0 (1ULL << (VIA_VALMODE_SHIFT + 2))
#define VIA_VAL_FLAG_MPEG1 (1ULL << (VIA_VALMODE_SHIFT + 3))
/*
* The command reader.
* It has two submission mechanisms that are serialized,
* and user-space can indicate when a specific mechanism
* needs to be used.
*/
#define VIA_ENGINE_CMD 0
#define _VIA_MECHANISM_AGP 0
#define _VIA_MECHANISM_PCI 1
#define VIA_ENGINE_DMA0 1
#define VIA_ENGINE_DMA1 2
#define VIA_ENGINE_DMA2 3
#define VIA_ENGINE_DMA3 4
#define VIA_NUM_ENGINES 5
/*
* These fence types are defined
* for engine 0 and 1. Indicates that the
* HQV or MPEG engine is done with the
* commands associated with the fence.
*/
#define VIA_FENCE_TYPE_HQV0 (1 << 1)
#define VIA_FENCE_TYPE_HQV1 (1 << 2)
#define VIA_FENCE_TYPE_MPEG0 (1 << 3)
#define VIA_FENCE_TYPE_MPEG1 (1 << 4)
/*
* This fence type is for the DMA engines.
* Indicates that the device mapping for
* the system memory is released, and it's
* OK for reuse by the CPU.
*/
#define VIA_FENCE_TYPE_SYSMEM (1 << 1)
#endif /* _VIA_DRM_H_ */