Remove old TTM.

This commit is contained in:
Thomas Hellstrom 2009-01-17 12:39:50 +01:00
parent e9298a02ac
commit bbfd0d2025
27 changed files with 18 additions and 7264 deletions

View file

@ -1,198 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
#ifndef _XF86MM_H_
#define _XF86MM_H_
#include <stddef.h>
#include <stdint.h>
#include "drm.h"
/*
* Note on multithreaded applications using this interface.
* Libdrm is not threadsafe, so common buffer, TTM, and fence objects need to
* be protected using an external mutex.
*
* Note: Don't protect the following functions, as it may lead to deadlocks:
* drmBOUnmap().
* The kernel is synchronizing and refcounting buffer maps.
* User space only needs to refcount object usage within the same application.
*/
/*
* List macros heavily inspired by the Linux kernel
* list handling. No list looping yet.
*/
typedef struct _drmMMListHead
{
struct _drmMMListHead *prev;
struct _drmMMListHead *next;
} drmMMListHead;
#define DRMINITLISTHEAD(__item) \
do{ \
(__item)->prev = (__item); \
(__item)->next = (__item); \
} while (0)
#define DRMLISTADD(__item, __list) \
do { \
(__item)->prev = (__list); \
(__item)->next = (__list)->next; \
(__list)->next->prev = (__item); \
(__list)->next = (__item); \
} while (0)
#define DRMLISTADDTAIL(__item, __list) \
do { \
(__item)->next = (__list); \
(__item)->prev = (__list)->prev; \
(__list)->prev->next = (__item); \
(__list)->prev = (__item); \
} while(0)
#define DRMLISTDEL(__item) \
do { \
(__item)->prev->next = (__item)->next; \
(__item)->next->prev = (__item)->prev; \
} while(0)
#define DRMLISTDELINIT(__item) \
do { \
(__item)->prev->next = (__item)->next; \
(__item)->next->prev = (__item)->prev; \
(__item)->next = (__item); \
(__item)->prev = (__item); \
} while(0)
#define DRMLISTENTRY(__type, __item, __field) \
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
#define DRMLISTEMPTY(__item) ((__item)->next == (__item))
#define DRMLISTFOREACHSAFE(__item, __temp, __list) \
for ((__item) = (__list)->next, (__temp) = (__item)->next; \
(__item) != (__list); \
(__item) = (__temp), (__temp) = (__item)->next)
#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list) \
for ((__item) = (__list)->prev, (__temp) = (__item)->prev; \
(__item) != (__list); \
(__item) = (__temp), (__temp) = (__item)->prev)
typedef struct _drmFence
{
unsigned handle;
int fence_class;
unsigned type;
unsigned flags;
unsigned signaled;
uint32_t sequence;
unsigned pad[4]; /* for future expansion */
} drmFence;
typedef struct _drmBO
{
unsigned handle;
uint64_t mapHandle;
uint64_t flags;
uint64_t proposedFlags;
unsigned mapFlags;
unsigned long size;
unsigned long offset;
unsigned long start;
unsigned replyFlags;
unsigned fenceFlags;
unsigned pageAlignment;
unsigned tileInfo;
unsigned hwTileStride;
unsigned desiredTileStride;
void *virtual;
void *mapVirtual;
int mapCount;
unsigned pad[8]; /* for future expansion */
} drmBO;
/*
* Fence functions.
*/
extern int drmFenceCreate(int fd, unsigned flags, int fence_class,
unsigned type, drmFence *fence);
extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
extern int drmFenceUnreference(int fd, const drmFence *fence);
extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
extern int drmFenceSignaled(int fd, drmFence *fence,
unsigned fenceType, int *signaled);
extern int drmFenceWait(int fd, unsigned flags, drmFence *fence,
unsigned flush_type);
extern int drmFenceEmit(int fd, unsigned flags, drmFence *fence,
unsigned emit_type);
extern int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fence);
/*
* Buffer object functions.
*/
extern int drmBOCreate(int fd, unsigned long size,
unsigned pageAlignment, void *user_buffer,
uint64_t mask, unsigned hint, drmBO *buf);
extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
extern int drmBOUnreference(int fd, drmBO *buf);
extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
void **address);
extern int drmBOUnmap(int fd, drmBO *buf);
extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle);
extern int drmBOInfo(int fd, drmBO *buf);
extern int drmBOBusy(int fd, drmBO *buf, int *busy);
extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint);
/*
* Initialization functions.
*/
extern int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
unsigned memType);
extern int drmMMTakedown(int fd, unsigned memType);
extern int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict);
extern int drmMMUnlock(int fd, unsigned memType, int unlockBM);
extern int drmMMInfo(int fd, unsigned memType, uint64_t *size);
extern int drmBOSetStatus(int fd, drmBO *buf,
uint64_t flags, uint64_t mask,
unsigned int hint,
unsigned int desired_tile_stride,
unsigned int tile_info);
extern int drmBOVersion(int fd, unsigned int *major,
unsigned int *minor,
unsigned int *patchlevel);
#endif

View file

@ -11,11 +11,10 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_memory_debug.o drm_sman.o \
drm_hashtab.o drm_mm.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o \
drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \
drm_regman.o drm_vm_nopage_compat.o drm_gem.o drm_uncached.o
drm_vm_nopage_compat.o drm_gem.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
@ -49,7 +48,7 @@ sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
via_video.o via_dmablit.o via_fence.o via_buffer.o
via_video.o via_dmablit.o
mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
nv-objs := nv_drv.o
xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \
@ -69,16 +68,16 @@ endif
obj-m += drm.o
obj-$(CONFIG_DRM_TDFX) += tdfx.o
obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
#obj-$(CONFIG_DRM_RADEON)+= radeon.o
obj-$(CONFIG_DRM_MGA) += mga.o
obj-$(CONFIG_DRM_I810) += i810.o
obj-$(CONFIG_DRM_I915) += i915.o
#obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_SIS) += sis.o
obj-$(CONFIG_DRM_FFB) += ffb.o
obj-$(CONFIG_DRM_SAVAGE)+= savage.o
obj-$(CONFIG_DRM_VIA) += via.o
obj-$(CONFIG_DRM_MACH64)+= mach64.o
obj-$(CONFIG_DRM_NV) += nv.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
#obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
obj-$(CONFIG_DRM_XGI) += xgi.o
obj-$(CONFIG_DRM_RADEON_MS) += radeon_ms.o

View file

@ -660,7 +660,6 @@ struct drm_gem_object {
void *driver_private;
};
#include "drm_objects.h"
#include "drm_crtc.h"
/* per-master structure */
@ -883,7 +882,6 @@ struct drm_device {
struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
struct drm_open_hash map_hash; /**< User token hash table for maps */
struct drm_mm offset_manager; /**< User token manager */
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
struct page *ttm_dummy_page;
@ -994,9 +992,6 @@ struct drm_device {
struct drm_minor *control;
struct drm_minor *primary; /**< render type primary screen head */
struct drm_fence_manager fm;
struct drm_buffer_manager bm;
/** \name Drawable information */
/*@{ */
spinlock_t drw_lock;
@ -1022,27 +1017,6 @@ struct drm_device {
/*@} */
};
#if __OS_HAS_AGP
struct drm_agp_ttm_backend {
struct drm_ttm_backend backend;
DRM_AGP_MEM *mem;
struct agp_bridge_data *bridge;
int populated;
};
#endif
struct ati_pcigart_ttm_backend {
struct drm_ttm_backend backend;
int populated;
void (*gart_flush_fn)(struct drm_device *dev);
struct drm_ati_pcigart_info *gart_info;
unsigned long offset;
struct page **pages;
int num_pages;
int bound;
struct drm_device *dev;
};
extern struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev));
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
{
@ -1537,39 +1511,6 @@ extern void *drm_alloc(size_t size, int area);
extern void drm_free(void *pt, size_t size, int area);
#endif
/*
* Accounting variants of standard calls.
*/
static inline void *drm_ctl_alloc(size_t size, int area)
{
void *ret;
if (drm_alloc_memctl(size))
return NULL;
ret = drm_alloc(size, area);
if (!ret)
drm_free_memctl(size);
return ret;
}
static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area)
{
void *ret;
if (drm_alloc_memctl(nmemb*size))
return NULL;
ret = drm_calloc(nmemb, size, area);
if (!ret)
drm_free_memctl(nmemb*size);
return ret;
}
static inline void drm_ctl_free(void *pt, size_t size, int area)
{
drm_free(pt, size, area);
drm_free_memctl(size);
}
/*@}*/
#endif /* __KERNEL__ */

View file

@ -529,181 +529,6 @@ drm_agp_bind_pages(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_agp_bind_pages);
/*
* AGP ttm backend interface.
*/
#ifndef AGP_USER_TYPES
#define AGP_USER_TYPES (1 << 16)
#define AGP_USER_MEMORY (AGP_USER_TYPES)
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
#endif
#define AGP_REQUIRED_MAJOR 0
#define AGP_REQUIRED_MINOR 102
static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
{
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
}
static int drm_agp_populate(struct drm_ttm_backend *backend,
unsigned long num_pages, struct page **pages,
struct page *dummy_read_page)
{
struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
struct page **cur_page, **last_page = pages + num_pages;
DRM_AGP_MEM *mem;
int dummy_page_count = 0;
if (drm_alloc_memctl(num_pages * sizeof(void *)))
return -1;
DRM_DEBUG("drm_agp_populate_ttm\n");
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
#else
mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
#endif
if (!mem) {
drm_free_memctl(num_pages * sizeof(void *));
return -1;
}
DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
mem->page_count = 0;
for (cur_page = pages; cur_page < last_page; ++cur_page) {
struct page *page = *cur_page;
if (!page) {
page = dummy_read_page;
++dummy_page_count;
}
mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page));
}
if (dummy_page_count)
DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count);
agp_be->mem = mem;
return 0;
}
static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
struct drm_bo_mem_reg *bo_mem)
{
struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_AGP_MEM *mem = agp_be->mem;
int ret;
int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
DRM_DEBUG("drm_agp_bind_ttm\n");
mem->is_flushed = true;
mem->type = AGP_USER_MEMORY;
/* CACHED MAPPED implies not snooped memory */
if (snooped)
mem->type = AGP_USER_CACHED_MEMORY;
ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
if (ret)
DRM_ERROR("AGP Bind memory failed\n");
DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
DRM_BE_FLAG_BOUND_CACHED : 0,
DRM_BE_FLAG_BOUND_CACHED);
return ret;
}
static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
{
struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_DEBUG("drm_agp_unbind_ttm\n");
if (agp_be->mem->is_bound)
return drm_agp_unbind_memory(agp_be->mem);
else
return 0;
}
static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
{
struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_AGP_MEM *mem = agp_be->mem;
DRM_DEBUG("drm_agp_clear_ttm\n");
if (mem) {
unsigned long num_pages = mem->page_count;
backend->func->unbind(backend);
agp_free_memory(mem);
drm_free_memctl(num_pages * sizeof(void *));
}
agp_be->mem = NULL;
}
static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
{
struct drm_agp_ttm_backend *agp_be;
if (backend) {
DRM_DEBUG("drm_agp_destroy_ttm\n");
agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
if (agp_be) {
if (agp_be->mem)
backend->func->clear(backend);
drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM);
}
}
}
static struct drm_ttm_backend_func agp_ttm_backend = {
.needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
.populate = drm_agp_populate,
.clear = drm_agp_clear_ttm,
.bind = drm_agp_bind_ttm,
.unbind = drm_agp_unbind_ttm,
.destroy = drm_agp_destroy_ttm,
};
struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
{
struct drm_agp_ttm_backend *agp_be;
struct agp_kern_info *info;
if (!dev->agp) {
DRM_ERROR("AGP is not initialized.\n");
return NULL;
}
info = &dev->agp->agp_info;
if (info->version.major != AGP_REQUIRED_MAJOR ||
info->version.minor < AGP_REQUIRED_MINOR) {
DRM_ERROR("Wrong agpgart version %d.%d\n"
"\tYou need at least version %d.%d.\n",
info->version.major,
info->version.minor,
AGP_REQUIRED_MAJOR,
AGP_REQUIRED_MINOR);
return NULL;
}
agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
if (!agp_be)
return NULL;
agp_be->mem = NULL;
agp_be->bridge = dev->agp->bridge;
agp_be->populated = false;
agp_be->backend.func = &agp_ttm_backend;
agp_be->backend.dev = dev;
return &agp_be->backend;
}
EXPORT_SYMBOL(drm_agp_init_ttm);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
void drm_agp_chipset_flush(struct drm_device *dev)
{

File diff suppressed because it is too large Load diff

View file

@ -1,706 +0,0 @@
/**************************************************************************
*
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
/**
* Free the old memory node unless it's a pinned region and we
* have not been requested to free also pinned regions.
*/
static void drm_bo_free_old_node(struct drm_buffer_object *bo)
{
struct drm_bo_mem_reg *old_mem = &bo->mem;
if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
mutex_lock(&bo->dev->struct_mutex);
drm_mm_put_block(old_mem->mm_node);
mutex_unlock(&bo->dev->struct_mutex);
}
old_mem->mm_node = NULL;
}
int drm_bo_move_ttm(struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
{
struct drm_ttm *ttm = bo->ttm;
struct drm_bo_mem_reg *old_mem = &bo->mem;
uint64_t save_flags = old_mem->flags;
uint64_t save_proposed_flags = old_mem->proposed_flags;
int ret;
if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
if (evict)
drm_ttm_evict(ttm);
else
drm_ttm_unbind(ttm);
drm_bo_free_old_node(bo);
DRM_FLAG_MASKED(old_mem->flags,
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
old_mem->mem_type = DRM_BO_MEM_LOCAL;
save_flags = old_mem->flags;
}
if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
ret = drm_ttm_bind(ttm, new_mem);
if (ret)
return ret;
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
EXPORT_SYMBOL(drm_bo_move_ttm);
/**
* \c Return a kernel virtual address to the buffer object PCI memory.
*
* \param bo The buffer object.
* \return Failure indication.
*
* Returns -EINVAL if the buffer object is currently not mappable.
* Returns -ENOMEM if the ioremap operation failed.
* Otherwise returns zero.
*
* After a successfull call, bo->iomap contains the virtual address, or NULL
* if the buffer object content is not accessible through PCI space.
* Call bo->mutex locked.
*/
int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
void **virtual)
{
struct drm_buffer_manager *bm = &dev->bm;
struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
unsigned long bus_offset;
unsigned long bus_size;
unsigned long bus_base;
int ret;
void *addr;
*virtual = NULL;
ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
if (ret || bus_size == 0)
return ret;
if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
addr = (void *)(((u8 *) man->io_addr) + bus_offset);
else {
addr = ioremap_nocache(bus_base + bus_offset, bus_size);
if (!addr)
return -ENOMEM;
}
*virtual = addr;
return 0;
}
EXPORT_SYMBOL(drm_mem_reg_ioremap);
/**
* \c Unmap mapping obtained using drm_bo_ioremap
*
* \param bo The buffer object.
*
* Call bo->mutex locked.
*/
void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
void *virtual)
{
struct drm_buffer_manager *bm;
struct drm_mem_type_manager *man;
bm = &dev->bm;
man = &bm->man[mem->mem_type];
if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
iounmap(virtual);
}
EXPORT_SYMBOL(drm_mem_reg_iounmap);
static int drm_copy_io_page(void *dst, void *src, unsigned long page)
{
uint32_t *dstP =
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
uint32_t *srcP =
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
int i;
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
iowrite32(ioread32(srcP++), dstP++);
return 0;
}
static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
unsigned long page)
{
struct page *d = drm_ttm_get_page(ttm, page);
void *dst;
if (!d)
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
dst = kmap(d);
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
kunmap(d);
return 0;
}
static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
{
struct page *s = drm_ttm_get_page(ttm, page);
void *src;
if (!s)
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
src = kmap(s);
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
kunmap(s);
return 0;
}
int drm_bo_move_memcpy(struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
{
struct drm_device *dev = bo->dev;
struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
struct drm_ttm *ttm = bo->ttm;
struct drm_bo_mem_reg *old_mem = &bo->mem;
struct drm_bo_mem_reg old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
uint64_t save_flags = old_mem->flags;
uint64_t save_proposed_flags = old_mem->proposed_flags;
unsigned long i;
unsigned long page;
unsigned long add = 0;
int dir;
ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
if (ret)
return ret;
ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
if (ret)
goto out;
if (old_iomap == NULL && new_iomap == NULL)
goto out2;
if (old_iomap == NULL && ttm == NULL)
goto out2;
add = 0;
dir = 1;
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->mm_node->start <
old_mem->mm_node->start + old_mem->mm_node->size)) {
dir = -1;
add = new_mem->num_pages - 1;
}
for (i = 0; i < new_mem->num_pages; ++i) {
page = i * dir + add;
if (old_iomap == NULL)
ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
else if (new_iomap == NULL)
ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
else
ret = drm_copy_io_page(new_iomap, old_iomap, page);
if (ret)
goto out1;
}
mb();
out2:
drm_bo_free_old_node(bo);
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
drm_ttm_unbind(ttm);
drm_ttm_destroy(ttm);
bo->ttm = NULL;
}
out1:
drm_mem_reg_iounmap(dev, new_mem, new_iomap);
out:
drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
return ret;
}
EXPORT_SYMBOL(drm_bo_move_memcpy);
static int drm_memset_io_page(void *dst, unsigned long page)
{
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
memset_io(dst, 0, PAGE_SIZE);
return 0;
}
static int drm_memset_ttm_page(struct drm_ttm *ttm, unsigned long page)
{
struct page *d = drm_ttm_get_page(ttm, page);
void *dst;
dst = kmap(d);
if (!dst)
return -ENOMEM;
memset_io(dst, 0, PAGE_SIZE);
kunmap(d);
return 0;
}
int drm_bo_move_zero(struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
{
struct drm_device *dev = bo->dev;
struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
struct drm_ttm *ttm = bo->ttm;
void *new_iomap;
int ret;
struct drm_bo_mem_reg *old_mem = &bo->mem;
uint64_t save_flags = old_mem->flags;
uint64_t save_proposed_flags = old_mem->proposed_flags;
unsigned long i;
unsigned long page;
ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
if (ret)
goto out;
if (new_iomap == NULL && ttm == NULL)
goto out2;
for (i = 0; i < new_mem->num_pages; ++i) {
if (new_iomap == NULL)
ret = drm_memset_ttm_page(ttm, i);
else
ret = drm_memset_io_page(new_iomap, i);
if (ret)
goto out1;
}
mb();
out2:
drm_bo_free_old_node(bo);
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
drm_ttm_unbind(ttm);
drm_ttm_destroy(ttm);
bo->ttm = NULL;
}
out1:
drm_mem_reg_iounmap(dev, new_mem, new_iomap);
out:
return ret;
}
EXPORT_SYMBOL(drm_bo_move_zero);
/*
* Transfer a buffer object's memory and LRU status to a newly
* created object. User-space references remains with the old
* object. Call bo->mutex locked.
*/
int drm_buffer_object_transfer(struct drm_buffer_object *bo,
struct drm_buffer_object **new_obj)
{
struct drm_buffer_object *fbo;
struct drm_device *dev = bo->dev;
struct drm_buffer_manager *bm = &dev->bm;
fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
if (!fbo)
return -ENOMEM;
*fbo = *bo;
mutex_init(&fbo->mutex);
mutex_lock(&fbo->mutex);
mutex_lock(&dev->struct_mutex);
DRM_INIT_WAITQUEUE(&bo->event_queue);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->pinned_lru);
#ifdef DRM_ODD_MM_COMPAT
INIT_LIST_HEAD(&fbo->vma_list);
INIT_LIST_HEAD(&fbo->p_mm_list);
#endif
fbo->fence = drm_fence_reference_locked(bo->fence);
fbo->pinned_node = NULL;
fbo->mem.mm_node->private = (void *)fbo;
atomic_set(&fbo->usage, 1);
atomic_inc(&bm->count);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&fbo->mutex);
*new_obj = fbo;
return 0;
}
/*
* Since move is underway, we need to block signals in this function.
* We cannot restart until it has finished.
*/
int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
int evict, int no_wait, uint32_t fence_class,
uint32_t fence_type, uint32_t fence_flags,
struct drm_bo_mem_reg *new_mem)
{
struct drm_device *dev = bo->dev;
struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
struct drm_bo_mem_reg *old_mem = &bo->mem;
int ret;
uint64_t save_flags = old_mem->flags;
uint64_t save_proposed_flags = old_mem->proposed_flags;
struct drm_buffer_object *old_obj;
if (bo->fence)
drm_fence_usage_deref_unlocked(&bo->fence);
ret = drm_fence_object_create(dev, fence_class, fence_type,
fence_flags | DRM_FENCE_FLAG_EMIT,
&bo->fence);
bo->fence_type = fence_type;
if (ret)
return ret;
#ifdef DRM_ODD_MM_COMPAT
/*
* In this mode, we don't allow pipelining a copy blit,
* since the buffer will be accessible from user space
* the moment we return and rebuild the page tables.
*
* With normal vm operation, page tables are rebuilt
* on demand using fault(), which waits for buffer idle.
*/
if (1)
#else
if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
bo->mem.mm_node != NULL))
#endif
{
if (bo->fence) {
(void) drm_fence_object_wait(bo->fence, 0, 1,
bo->fence_type);
drm_fence_usage_deref_unlocked(&bo->fence);
}
drm_bo_free_old_node(bo);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
drm_ttm_unbind(bo->ttm);
drm_ttm_destroy(bo->ttm);
bo->ttm = NULL;
}
} else {
/* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
* and leave it to be released when the GPU
* operation has completed.
*/
ret = drm_buffer_object_transfer(bo, &old_obj);
if (ret)
return ret;
if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
old_obj->ttm = NULL;
else
bo->ttm = NULL;
mutex_lock(&dev->struct_mutex);
list_del_init(&old_obj->lru);
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
drm_bo_add_to_lru(old_obj);
drm_bo_usage_deref_locked(&old_obj);
mutex_unlock(&dev->struct_mutex);
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
int drm_bo_same_page(unsigned long offset,
unsigned long offset2)
{
return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
}
EXPORT_SYMBOL(drm_bo_same_page);
unsigned long drm_bo_offset_end(unsigned long offset,
unsigned long end)
{
offset = (offset + PAGE_SIZE) & PAGE_MASK;
return (end < offset) ? end : offset;
}
EXPORT_SYMBOL(drm_bo_offset_end);
static pgprot_t drm_kernel_io_prot(uint32_t map_type)
{
pgprot_t tmp = PAGE_KERNEL;
#if defined(__i386__) || defined(__x86_64__)
#ifdef USE_PAT_WC
#warning using pat
if (drm_use_pat() && map_type == _DRM_TTM) {
pgprot_val(tmp) |= _PAGE_PAT;
return tmp;
}
#endif
if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
pgprot_val(tmp) |= _PAGE_PCD;
pgprot_val(tmp) &= ~_PAGE_PWT;
}
#elif defined(__powerpc__)
pgprot_val(tmp) |= _PAGE_NO_CACHE;
if (map_type == _DRM_REGISTERS)
pgprot_val(tmp) |= _PAGE_GUARDED;
#endif
#if defined(__ia64__)
if (map_type == _DRM_TTM)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
unsigned long bus_offset, unsigned long bus_size,
struct drm_bo_kmap_obj *map)
{
struct drm_device *dev = bo->dev;
struct drm_bo_mem_reg *mem = &bo->mem;
struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
map->bo_kmap_type = bo_map_premapped;
map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
} else {
map->bo_kmap_type = bo_map_iomap;
map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
}
return (!map->virtual) ? -ENOMEM : 0;
}
static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct drm_bo_kmap_obj *map)
{
struct drm_device *dev = bo->dev;
struct drm_bo_mem_reg *mem = &bo->mem;
struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
pgprot_t prot;
struct drm_ttm *ttm = bo->ttm;
struct page *d;
int i;
BUG_ON(!ttm);
if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
*/
map->bo_kmap_type = bo_map_kmap;
map->page = drm_ttm_get_page(ttm, start_page);
map->virtual = kmap(map->page);
} else {
/*
* Populate the part we're mapping;
*/
for (i = start_page; i < start_page + num_pages; ++i) {
d = drm_ttm_get_page(ttm, i);
if (!d)
return -ENOMEM;
}
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contigous.
*/
prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
PAGE_KERNEL :
drm_kernel_io_prot(man->drm_bus_maptype);
map->bo_kmap_type = bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page,
num_pages, 0, prot);
}
return (!map->virtual) ? -ENOMEM : 0;
}
/*
* This function is to be used for kernel mapping of buffer objects.
* It chooses the appropriate mapping method depending on the memory type
* and caching policy the buffer currently has.
* Mapping multiple pages or buffers that live in io memory is a bit slow and
* consumes vmalloc space. Be restrictive with such mappings.
* Mapping single pages usually returns the logical kernel address,
* (which is fast)
* BUG may use slower temporary mappings for high memory pages or
* uncached / write-combined pages.
*
* The function fills in a drm_bo_kmap_obj which can be used to return the
* kernel virtual address of the buffer.
*
* Code servicing a non-priviliged user request is only allowed to map one
* page at a time. We might need to implement a better scheme to stop such
* processes from consuming all vmalloc space.
*/
int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct drm_bo_kmap_obj *map)
{
int ret;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
map->virtual = NULL;
if (num_pages > bo->num_pages)
return -EINVAL;
if (start_page > bo->num_pages)
return -EINVAL;
#if 0
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
&bus_offset, &bus_size);
if (ret)
return ret;
/* clear the clean flags */
bo->mem.flags &= ~DRM_BO_FLAG_CLEAN;
bo->mem.proposed_flags &= ~DRM_BO_FLAG_CLEAN;
if (bus_size == 0) {
return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
bus_offset += start_page << PAGE_SHIFT;
bus_size = num_pages << PAGE_SHIFT;
return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
}
}
EXPORT_SYMBOL(drm_bo_kmap);
void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
{
if (!map->virtual)
return;
switch (map->bo_kmap_type) {
case bo_map_iomap:
iounmap(map->virtual);
break;
case bo_map_vmap:
vunmap(map->virtual);
break;
case bo_map_kmap:
kunmap(map->page);
break;
case bo_map_premapped:
break;
default:
BUG();
}
map->virtual = NULL;
map->page = NULL;
}
EXPORT_SYMBOL(drm_bo_kunmap);
int drm_bo_pfn_prot(struct drm_buffer_object *bo,
unsigned long dst_offset,
unsigned long *pfn,
pgprot_t *prot)
{
struct drm_bo_mem_reg *mem = &bo->mem;
struct drm_device *dev = bo->dev;
unsigned long bus_offset;
unsigned long bus_size;
unsigned long bus_base;
struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
int ret;
ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset,
&bus_size);
if (ret)
return -EINVAL;
if (bus_size != 0)
*pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
else if (!bo->ttm)
return -EINVAL;
else
*pfn = page_to_pfn(drm_ttm_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
*prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
PAGE_KERNEL : drm_kernel_io_prot(man->drm_bus_maptype);
return 0;
}
EXPORT_SYMBOL(drm_bo_pfn_prot);

View file

@ -53,512 +53,6 @@ int drm_unmap_page_from_agp(struct page *page)
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
/*
* The protection map was exported in 2.6.19
*/
pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
#ifdef MODULE
static pgprot_t drm_protection_map[16] = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
return drm_protection_map[vm_flags & 0x0F];
#else
extern pgprot_t protection_map[];
return protection_map[vm_flags & 0x0F];
#endif
};
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
/*
* vm code for kernels below 2.6.15 in which version a major vm write
* occured. This implement a simple straightforward
* version similar to what's going to be
* in kernel 2.6.19+
* Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
* nopfn.
*/
static struct {
spinlock_t lock;
struct page *dummy_page;
atomic_t present;
} drm_np_retry =
{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data);
struct page * get_nopage_retry(void)
{
if (atomic_read(&drm_np_retry.present) == 0) {
struct page *page = alloc_page(GFP_KERNEL);
if (!page)
return NOPAGE_OOM;
spin_lock(&drm_np_retry.lock);
drm_np_retry.dummy_page = page;
atomic_set(&drm_np_retry.present,1);
spin_unlock(&drm_np_retry.lock);
}
get_page(drm_np_retry.dummy_page);
return drm_np_retry.dummy_page;
}
void free_nopage_retry(void)
{
if (atomic_read(&drm_np_retry.present) == 1) {
spin_lock(&drm_np_retry.lock);
__free_page(drm_np_retry.dummy_page);
drm_np_retry.dummy_page = NULL;
atomic_set(&drm_np_retry.present, 0);
spin_unlock(&drm_np_retry.lock);
}
}
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
struct fault_data data;
if (type)
*type = VM_FAULT_MINOR;
data.address = address;
data.vma = vma;
drm_bo_vm_fault(vma, &data);
switch (data.type) {
case VM_FAULT_OOM:
return NOPAGE_OOM;
case VM_FAULT_SIGBUS:
return NOPAGE_SIGBUS;
default:
break;
}
return NOPAGE_REFAULT;
}
#endif
#if !defined(DRM_FULL_MM_COMPAT) && \
((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
static int drm_pte_is_clear(struct vm_area_struct *vma,
unsigned long addr)
{
struct mm_struct *mm = vma->vm_mm;
int ret = 1;
pte_t *pte;
pmd_t *pmd;
pud_t *pud;
pgd_t *pgd;
spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
goto unlock;
pud = pud_offset(pgd, addr);
if (pud_none(*pud))
goto unlock;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
goto unlock;
pte = pte_offset_map(pmd, addr);
if (!pte)
goto unlock;
ret = pte_none(*pte);
pte_unmap(pte);
unlock:
spin_unlock(&mm->page_table_lock);
return ret;
}
static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
int ret;
if (!drm_pte_is_clear(vma, addr))
return -EBUSY;
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
return ret;
}
static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data)
{
unsigned long address = data->address;
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
struct drm_ttm *ttm;
struct drm_device *dev;
unsigned long pfn;
int err;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
dev = bo->dev;
drm_bo_read_lock(&dev->bm.bm_lock, 0);
mutex_lock(&bo->mutex);
err = drm_bo_wait(bo, 0, 1, 0, 1);
if (err) {
data->type = (err == -EAGAIN) ?
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
goto out_unlock;
}
/*
* If buffer happens to be in a non-mappable location,
* move it to a mappable.
*/
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
unsigned long _end = jiffies + 3*DRM_HZ;
uint32_t new_mask = bo->mem.proposed_flags |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
do {
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
} while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
if (err) {
DRM_ERROR("Timeout moving buffer to mappable location.\n");
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
}
if (address > vma->vm_end) {
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
dev = bo->dev;
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
&bus_size);
if (err) {
data->type = VM_FAULT_SIGBUS;
goto out_unlock;
}
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
if (bus_size) {
struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
} else {
ttm = bo->ttm;
drm_ttm_fixup_caching(ttm);
page = drm_ttm_get_page(ttm, page_offset);
if (!page) {
data->type = VM_FAULT_OOM;
goto out_unlock;
}
pfn = page_to_pfn(page);
vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
drm_io_prot(_DRM_TTM, vma);
}
err = vm_insert_pfn(vma, address, pfn);
if (!err || err == -EBUSY)
data->type = VM_FAULT_MINOR;
else
data->type = VM_FAULT_OOM;
out_unlock:
mutex_unlock(&bo->mutex);
drm_bo_read_unlock(&dev->bm.bm_lock);
return NULL;
}
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
!defined(DRM_FULL_MM_COMPAT)
/**
*/
unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
unsigned long address)
{
struct fault_data data;
data.address = address;
(void) drm_bo_vm_fault(vma, &data);
if (data.type == VM_FAULT_OOM)
return NOPFN_OOM;
else if (data.type == VM_FAULT_SIGBUS)
return NOPFN_SIGBUS;
/*
* pfn already set.
*/
return 0;
}
#endif
#ifdef DRM_ODD_MM_COMPAT
/*
* VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
* workaround for a single BUG statement in do_no_page in these versions. The
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
* check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
* fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
* release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
* phew.
*/
typedef struct p_mm_entry {
struct list_head head;
struct mm_struct *mm;
atomic_t refcount;
int locked;
} p_mm_entry_t;
typedef struct vma_entry {
struct list_head head;
struct vm_area_struct *vma;
} vma_entry_t;
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
struct drm_ttm *ttm;
struct drm_device *dev;
mutex_lock(&bo->mutex);
if (type)
*type = VM_FAULT_MINOR;
if (address > vma->vm_end) {
page = NOPAGE_SIGBUS;
goto out_unlock;
}
dev = bo->dev;
if (drm_mem_reg_is_pci(dev, &bo->mem)) {
DRM_ERROR("Invalid compat nopage.\n");
page = NOPAGE_SIGBUS;
goto out_unlock;
}
ttm = bo->ttm;
drm_ttm_fixup_caching(ttm);
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = drm_ttm_get_page(ttm, page_offset);
if (!page) {
page = NOPAGE_OOM;
goto out_unlock;
}
get_page(page);
out_unlock:
mutex_unlock(&bo->mutex);
return page;
}
int drm_bo_map_bound(struct vm_area_struct *vma)
{
struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
int ret = 0;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
&bus_offset, &bus_size);
BUG_ON(ret);
if (bus_size) {
struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type];
unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
vma->vm_end - vma->vm_start,
pgprot);
}
return ret;
}
int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n_entry;
vma_entry_t *v_entry;
struct mm_struct *mm = vma->vm_mm;
v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
if (!v_entry) {
DRM_ERROR("Allocation of vma pointer entry failed\n");
return -ENOMEM;
}
v_entry->vma = vma;
list_add_tail(&v_entry->head, &bo->vma_list);
list_for_each_entry(entry, &bo->p_mm_list, head) {
if (mm == entry->mm) {
atomic_inc(&entry->refcount);
return 0;
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
}
n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
if (!n_entry) {
DRM_ERROR("Allocation of process mm pointer entry failed\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&n_entry->head);
n_entry->mm = mm;
n_entry->locked = 0;
atomic_set(&n_entry->refcount, 0);
list_add_tail(&n_entry->head, &entry->head);
return 0;
}
void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n;
vma_entry_t *v_entry, *v_n;
int found = 0;
struct mm_struct *mm = vma->vm_mm;
list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
if (v_entry->vma == vma) {
found = 1;
list_del(&v_entry->head);
drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
break;
}
}
BUG_ON(!found);
list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
if (mm == entry->mm) {
if (atomic_add_negative(-1, &entry->refcount)) {
list_del(&entry->head);
BUG_ON(entry->locked);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
}
return;
}
}
BUG_ON(1);
}
int drm_bo_lock_kmm(struct drm_buffer_object * bo)
{
p_mm_entry_t *entry;
int lock_ok = 1;
list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(entry->locked);
if (!down_write_trylock(&entry->mm->mmap_sem)) {
lock_ok = 0;
break;
}
entry->locked = 1;
}
if (lock_ok)
return 0;
list_for_each_entry(entry, &bo->p_mm_list, head) {
if (!entry->locked)
break;
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
}
/*
* Possible deadlock. Try again. Our callers should handle this
* and restart.
*/
return -EAGAIN;
}
void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
{
p_mm_entry_t *entry;
list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(!entry->locked);
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
}
}
int drm_bo_remap_bound(struct drm_buffer_object *bo)
{
vma_entry_t *v_entry;
int ret = 0;
if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
list_for_each_entry(v_entry, &bo->vma_list, head) {
ret = drm_bo_map_bound(v_entry->vma);
if (ret)
break;
}
}
return ret;
}
void drm_bo_finish_unmap(struct drm_buffer_object *bo)
{
vma_entry_t *v_entry;
list_for_each_entry(v_entry, &bo->vma_list, head) {
v_entry->vma->vm_flags &= ~VM_PFNMAP;
}
}
#endif
#ifdef DRM_IDR_COMPAT_FN
/* only called when idp->lock is held */
static void __free_layer(struct idr *idp, struct idr_layer *p)
@ -806,102 +300,3 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
EXPORT_SYMBOL(kmap_atomic_prot_pfn);
#endif
#ifdef DRM_FULL_MM_COMPAT
#ifdef DRM_NO_FAULT
unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
unsigned long address)
{
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
struct drm_ttm *ttm;
struct drm_device *dev;
unsigned long pfn;
int err;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
unsigned long ret = NOPFN_REFAULT;
if (address > vma->vm_end)
return NOPFN_SIGBUS;
dev = bo->dev;
err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
if (err)
return NOPFN_REFAULT;
err = mutex_lock_interruptible(&bo->mutex);
if (err) {
drm_bo_read_unlock(&dev->bm.bm_lock);
return NOPFN_REFAULT;
}
err = drm_bo_wait(bo, 0, 1, 0, 1);
if (err) {
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
goto out_unlock;
}
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
/*
* If buffer happens to be in a non-mappable location,
* move it to a mappable.
*/
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
uint32_t new_flags = bo->mem.proposed_flags |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
err = drm_bo_move_buffer(bo, new_flags, 0, 0);
if (err) {
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
goto out_unlock;
}
}
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
&bus_size);
if (err) {
ret = NOPFN_SIGBUS;
goto out_unlock;
}
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
if (bus_size) {
struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
} else {
ttm = bo->ttm;
drm_ttm_fixup_caching(ttm);
page = drm_ttm_get_page(ttm, page_offset);
if (!page) {
ret = NOPFN_OOM;
goto out_unlock;
}
pfn = page_to_pfn(page);
vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
drm_io_prot(_DRM_TTM, vma);
}
err = vm_insert_pfn(vma, address, pfn);
if (err) {
ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
goto out_unlock;
}
out_unlock:
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
mutex_unlock(&bo->mutex);
drm_bo_read_unlock(&dev->bm.bm_lock);
return ret;
}
#endif
#endif

View file

@ -47,11 +47,6 @@
#define preempt_enable()
#endif
#ifndef pte_offset_map
#define pte_offset_map pte_offset
#define pte_unmap(pte)
#endif
#ifndef module_param
#define module_param(name, type, perm)
#endif
@ -171,150 +166,6 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
#endif
#include <linux/mm.h>
#include <asm/page.h>
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
#define DRM_ODD_MM_COMPAT
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
#define DRM_FULL_MM_COMPAT
#endif
/*
* Flush relevant caches and clear a VMA structure so that page references
* will cause a page fault. Don't flush tlbs.
*/
extern void drm_clear_vma(struct vm_area_struct *vma,
unsigned long addr, unsigned long end);
/*
* Return the PTE protection map entries for the VMA flags given by
* flags. This is a functional interface to the kernel's protection map.
*/
extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
#ifndef GFP_DMA32
#define GFP_DMA32 GFP_KERNEL
#endif
#ifndef __GFP_DMA32
#define __GFP_DMA32 GFP_KERNEL
#endif
#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
/*
* These are too slow in earlier kernels.
*/
extern int drm_unmap_page_from_agp(struct page *page);
extern int drm_map_page_into_agp(struct page *page);
#define map_page_into_agp drm_map_page_into_agp
#define unmap_page_from_agp drm_unmap_page_from_agp
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
extern struct page *get_nopage_retry(void);
extern void free_nopage_retry(void);
#define NOPAGE_REFAULT get_nopage_retry()
#endif
#ifndef DRM_FULL_MM_COMPAT
/*
* For now, just return a dummy page that we've allocated out of
* static space. The page will be put by do_nopage() since we've already
* filled out the pte.
*/
struct fault_data {
struct vm_area_struct *vma;
unsigned long address;
pgoff_t pgoff;
unsigned int flags;
int type;
};
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
!defined(DRM_FULL_MM_COMPAT)
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
unsigned long address);
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
#endif /* ndef DRM_FULL_MM_COMPAT */
#ifdef DRM_ODD_MM_COMPAT
struct drm_buffer_object;
/*
* Add a vma to the ttm vma list, and the
* process mm pointer to the ttm mm list. Needs the ttm mutex.
*/
extern int drm_bo_add_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
* Delete a vma and the corresponding mm pointer from the
* ttm lists. Needs the ttm mutex.
*/
extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
* Attempts to lock all relevant mmap_sems for a ttm, while
* not releasing the ttm mutex. May return -EAGAIN to avoid
* deadlocks. In that case the caller shall release the ttm mutex,
* schedule() and try again.
*/
extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
/*
* Unlock all relevant mmap_sems for a ttm.
*/
extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
/*
* If the ttm was bound to the aperture, this function shall be called
* with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
* vmas mapping this ttm. This is needed just after unmapping the ptes of
* the vma, otherwise the do_nopage() function will bug :(. The function
* releases the mmap_sems for this ttm.
*/
extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
/*
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
* fault these pfns in, because the first one will set the vma VM_PFNMAP
* flag, which will make the next fault bug in do_nopage(). The function
* releases the mmap_sems for this ttm.
*/
extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
/*
* Remap a vma for a bound ttm. Call with the ttm mutex held and
* the relevant mmap_sem locked.
*/
extern int drm_bo_map_bound(struct vm_area_struct *vma);
#endif
/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
@ -361,6 +212,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
* pgd_offset_k() is a macro that uses the symbol init_mm,
* check that it is available.
*/
#if 0
# if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) || \
defined(CONFIG_UNUSED_SYMBOLS))
#define DRM_KMAP_ATOMIC_PROT_PFN
@ -376,9 +228,6 @@ static inline void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
}
# endif /* no init_mm */
#endif
#if !defined(flush_agp_mappings)
#define flush_agp_mappings() do {} while(0)
#endif
#ifndef DMA_BIT_MASK

View file

@ -179,9 +179,6 @@ int drm_lastclose(struct drm_device * dev)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
if (!drm_core_check_feature(dev, DRIVER_MODESET))
drm_bo_driver_finish(dev);
if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
drm_irq_uninstall(dev);
@ -222,7 +219,7 @@ int drm_lastclose(struct drm_device * dev)
/* Clear vma list (only built for debugging) */
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
list_del(&vma->head);
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
/*
@ -368,7 +365,6 @@ static void drm_cleanup(struct drm_device * dev)
}
drm_lastclose(dev);
drm_fence_manager_takedown(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp
&& dev->agp->agp_mtrr >= 0) {
@ -382,7 +378,6 @@ static void drm_cleanup(struct drm_device * dev)
if (dev->driver->unload)
dev->driver->unload(dev);
drm_ht_remove(&dev->map_hash);
if (drm_core_has_AGP(dev) && dev->agp) {
drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
dev->agp = NULL;
@ -393,7 +388,6 @@ static void drm_cleanup(struct drm_device * dev)
drm_ctxbitmap_cleanup(dev);
drm_ht_remove(&dev->map_hash);
drm_mm_takedown(&dev->offset_manager);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
@ -452,31 +446,8 @@ static const struct file_operations drm_stub_fops = {
static int __init drm_core_init(void)
{
int ret;
struct sysinfo si;
unsigned long avail_memctl_mem;
unsigned long max_memctl_mem;
idr_init(&drm_minors_idr);
si_meminfo(&si);
/*
* AGP only allows low / DMA32 memory ATM.
*/
avail_memctl_mem = si.totalram - si.totalhigh;
/*
* Avoid overflows
*/
max_memctl_mem = 1UL << (32 - PAGE_SHIFT);
max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
if (avail_memctl_mem >= max_memctl_mem)
avail_memctl_mem = max_memctl_mem;
drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit);
ret = -ENOMEM;
if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))

View file

@ -1,540 +0,0 @@
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
/*
* Convenience function to be called by fence::wait methods that
* need polling.
*/
int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
int interruptible, uint32_t mask,
unsigned long end_jiffies)
{
struct drm_device *dev = fence->dev;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
uint32_t count = 0;
int ret;
DECLARE_WAITQUEUE(entry, current);
add_wait_queue(&fc->fence_queue, &entry);
ret = 0;
for (;;) {
__set_current_state((interruptible) ?
TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
if (drm_fence_object_signaled(fence, mask))
break;
if (time_after_eq(jiffies, end_jiffies)) {
ret = -EBUSY;
break;
}
if (lazy)
schedule_timeout(1);
else if ((++count & 0x0F) == 0){
__set_current_state(TASK_RUNNING);
schedule();
__set_current_state((interruptible) ?
TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
}
if (interruptible && signal_pending(current)) {
ret = -EAGAIN;
break;
}
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&fc->fence_queue, &entry);
return ret;
}
EXPORT_SYMBOL(drm_fence_wait_polling);
/*
* Typically called by the IRQ handler.
*/
void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
uint32_t sequence, uint32_t type, uint32_t error)
{
int wake = 0;
uint32_t diff;
uint32_t relevant_type;
uint32_t new_type;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
struct drm_fence_driver *driver = dev->driver->fence_driver;
struct list_head *head;
struct drm_fence_object *fence, *next;
int found = 0;
if (list_empty(&fc->ring))
return;
list_for_each_entry(fence, &fc->ring, ring) {
diff = (sequence - fence->sequence) & driver->sequence_mask;
if (diff > driver->wrap_diff) {
found = 1;
break;
}
}
fc->waiting_types &= ~type;
head = (found) ? &fence->ring : &fc->ring;
list_for_each_entry_safe_reverse(fence, next, head, ring) {
if (&fence->ring == &fc->ring)
break;
if (error) {
fence->error = error;
fence->signaled_types = fence->type;
list_del_init(&fence->ring);
wake = 1;
break;
}
if (type & DRM_FENCE_TYPE_EXE)
type |= fence->native_types;
relevant_type = type & fence->type;
new_type = (fence->signaled_types | relevant_type) ^
fence->signaled_types;
if (new_type) {
fence->signaled_types |= new_type;
DRM_DEBUG("Fence %p signaled 0x%08x\n",
fence, fence->signaled_types);
if (driver->needed_flush)
fc->pending_flush |= driver->needed_flush(fence);
if (new_type & fence->waiting_types)
wake = 1;
}
fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
if (!(fence->type & ~fence->signaled_types)) {
DRM_DEBUG("Fence completely signaled %p\n",
fence);
list_del_init(&fence->ring);
}
}
/*
* Reinstate lost waiting types.
*/
if ((fc->waiting_types & type) != type) {
head = head->prev;
list_for_each_entry(fence, head, ring) {
if (&fence->ring == &fc->ring)
break;
diff = (fc->highest_waiting_sequence - fence->sequence) &
driver->sequence_mask;
if (diff > driver->wrap_diff)
break;
fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
}
}
if (wake)
wake_up_all(&fc->fence_queue);
}
EXPORT_SYMBOL(drm_fence_handler);
static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
{
struct drm_fence_manager *fm = &dev->fm;
unsigned long flags;
write_lock_irqsave(&fm->lock, flags);
list_del_init(ring);
write_unlock_irqrestore(&fm->lock, flags);
}
void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
{
struct drm_fence_object *tmp_fence = *fence;
struct drm_device *dev = tmp_fence->dev;
struct drm_fence_manager *fm = &dev->fm;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
*fence = NULL;
if (atomic_dec_and_test(&tmp_fence->usage)) {
drm_fence_unring(dev, &tmp_fence->ring);
DRM_DEBUG("Destroyed a fence object %p\n",
tmp_fence);
atomic_dec(&fm->count);
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
}
}
EXPORT_SYMBOL(drm_fence_usage_deref_locked);
void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
{
struct drm_fence_object *tmp_fence = *fence;
struct drm_device *dev = tmp_fence->dev;
struct drm_fence_manager *fm = &dev->fm;
*fence = NULL;
if (atomic_dec_and_test(&tmp_fence->usage)) {
mutex_lock(&dev->struct_mutex);
if (atomic_read(&tmp_fence->usage) == 0) {
drm_fence_unring(dev, &tmp_fence->ring);
atomic_dec(&fm->count);
drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
}
mutex_unlock(&dev->struct_mutex);
}
}
EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
struct drm_fence_object
*drm_fence_reference_locked(struct drm_fence_object *src)
{
DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
atomic_inc(&src->usage);
return src;
}
void drm_fence_reference_unlocked(struct drm_fence_object **dst,
struct drm_fence_object *src)
{
mutex_lock(&src->dev->struct_mutex);
*dst = src;
atomic_inc(&src->usage);
mutex_unlock(&src->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_fence_reference_unlocked);
int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
{
unsigned long flags;
int signaled;
struct drm_device *dev = fence->dev;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_driver *driver = dev->driver->fence_driver;
mask &= fence->type;
read_lock_irqsave(&fm->lock, flags);
signaled = (mask & fence->signaled_types) == mask;
read_unlock_irqrestore(&fm->lock, flags);
if (!signaled && driver->poll) {
write_lock_irqsave(&fm->lock, flags);
driver->poll(dev, fence->fence_class, mask);
signaled = (mask & fence->signaled_types) == mask;
write_unlock_irqrestore(&fm->lock, flags);
}
return signaled;
}
EXPORT_SYMBOL(drm_fence_object_signaled);
int drm_fence_object_flush(struct drm_fence_object *fence,
uint32_t type)
{
struct drm_device *dev = fence->dev;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
struct drm_fence_driver *driver = dev->driver->fence_driver;
unsigned long irq_flags;
uint32_t saved_pending_flush;
uint32_t diff;
int call_flush;
if (type & ~fence->type) {
DRM_ERROR("Flush trying to extend fence type, "
"0x%x, 0x%x\n", type, fence->type);
return -EINVAL;
}
write_lock_irqsave(&fm->lock, irq_flags);
fence->waiting_types |= type;
fc->waiting_types |= fence->waiting_types;
diff = (fence->sequence - fc->highest_waiting_sequence) &
driver->sequence_mask;
if (diff < driver->wrap_diff)
fc->highest_waiting_sequence = fence->sequence;
/*
* fence->waiting_types has changed. Determine whether
* we need to initiate some kind of flush as a result of this.
*/
saved_pending_flush = fc->pending_flush;
if (driver->needed_flush)
fc->pending_flush |= driver->needed_flush(fence);
if (driver->poll)
driver->poll(dev, fence->fence_class, fence->waiting_types);
call_flush = fc->pending_flush;
write_unlock_irqrestore(&fm->lock, irq_flags);
if (call_flush && driver->flush)
driver->flush(dev, fence->fence_class);
return 0;
}
EXPORT_SYMBOL(drm_fence_object_flush);
/*
* Make sure old fence objects are signaled before their fence sequences are
* wrapped around and reused.
*/
void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
uint32_t sequence)
{
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
struct drm_fence_object *fence;
unsigned long irq_flags;
struct drm_fence_driver *driver = dev->driver->fence_driver;
int call_flush;
uint32_t diff;
write_lock_irqsave(&fm->lock, irq_flags);
list_for_each_entry_reverse(fence, &fc->ring, ring) {
diff = (sequence - fence->sequence) & driver->sequence_mask;
if (diff <= driver->flush_diff)
break;
fence->waiting_types = fence->type;
fc->waiting_types |= fence->type;
if (driver->needed_flush)
fc->pending_flush |= driver->needed_flush(fence);
}
if (driver->poll)
driver->poll(dev, fence_class, fc->waiting_types);
call_flush = fc->pending_flush;
write_unlock_irqrestore(&fm->lock, irq_flags);
if (call_flush && driver->flush)
driver->flush(dev, fence->fence_class);
/*
* FIXME: Shold we implement a wait here for really old fences?
*/
}
EXPORT_SYMBOL(drm_fence_flush_old);
int drm_fence_object_wait(struct drm_fence_object *fence,
int lazy, int ignore_signals, uint32_t mask)
{
struct drm_device *dev = fence->dev;
struct drm_fence_driver *driver = dev->driver->fence_driver;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
int ret = 0;
unsigned long _end = 3 * DRM_HZ;
if (mask & ~fence->type) {
DRM_ERROR("Wait trying to extend fence type"
" 0x%08x 0x%08x\n", mask, fence->type);
BUG();
return -EINVAL;
}
if (driver->wait)
return driver->wait(fence, lazy, !ignore_signals, mask);
drm_fence_object_flush(fence, mask);
if (driver->has_irq(dev, fence->fence_class, mask)) {
if (!ignore_signals)
ret = wait_event_interruptible_timeout
(fc->fence_queue,
drm_fence_object_signaled(fence, mask),
3 * DRM_HZ);
else
ret = wait_event_timeout
(fc->fence_queue,
drm_fence_object_signaled(fence, mask),
3 * DRM_HZ);
if (unlikely(ret == -ERESTARTSYS))
return -EAGAIN;
if (unlikely(ret == 0))
return -EBUSY;
return 0;
}
return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
_end);
}
EXPORT_SYMBOL(drm_fence_object_wait);
int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
uint32_t fence_class, uint32_t type)
{
struct drm_device *dev = fence->dev;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_driver *driver = dev->driver->fence_driver;
struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
unsigned long flags;
uint32_t sequence;
uint32_t native_types;
int ret;
drm_fence_unring(dev, &fence->ring);
ret = driver->emit(dev, fence_class, fence_flags, &sequence,
&native_types);
if (ret)
return ret;
write_lock_irqsave(&fm->lock, flags);
fence->fence_class = fence_class;
fence->type = type;
fence->waiting_types = 0;
fence->signaled_types = 0;
fence->error = 0;
fence->sequence = sequence;
fence->native_types = native_types;
if (list_empty(&fc->ring))
fc->highest_waiting_sequence = sequence - 1;
list_add_tail(&fence->ring, &fc->ring);
fc->latest_queued_sequence = sequence;
write_unlock_irqrestore(&fm->lock, flags);
return 0;
}
EXPORT_SYMBOL(drm_fence_object_emit);
static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
uint32_t type,
uint32_t fence_flags,
struct drm_fence_object *fence)
{
int ret = 0;
unsigned long flags;
struct drm_fence_manager *fm = &dev->fm;
mutex_lock(&dev->struct_mutex);
atomic_set(&fence->usage, 1);
mutex_unlock(&dev->struct_mutex);
write_lock_irqsave(&fm->lock, flags);
INIT_LIST_HEAD(&fence->ring);
/*
* Avoid hitting BUG() for kernel-only fence objects.
*/
fence->fence_class = fence_class;
fence->type = type;
fence->signaled_types = 0;
fence->waiting_types = 0;
fence->sequence = 0;
fence->error = 0;
fence->dev = dev;
write_unlock_irqrestore(&fm->lock, flags);
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
ret = drm_fence_object_emit(fence, fence_flags,
fence->fence_class, type);
}
return ret;
}
int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
uint32_t type, unsigned flags,
struct drm_fence_object **c_fence)
{
struct drm_fence_object *fence;
int ret;
struct drm_fence_manager *fm = &dev->fm;
fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
if (!fence) {
DRM_ERROR("Out of memory creating fence object\n");
return -ENOMEM;
}
ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
if (ret) {
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
*c_fence = fence;
atomic_inc(&fm->count);
return 0;
}
EXPORT_SYMBOL(drm_fence_object_create);
void drm_fence_manager_init(struct drm_device *dev)
{
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fence_class;
struct drm_fence_driver *fed = dev->driver->fence_driver;
int i;
unsigned long flags;
rwlock_init(&fm->lock);
write_lock_irqsave(&fm->lock, flags);
fm->initialized = 0;
if (!fed)
goto out_unlock;
fm->initialized = 1;
fm->num_classes = fed->num_classes;
BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
for (i = 0; i < fm->num_classes; ++i) {
fence_class = &fm->fence_class[i];
memset(fence_class, 0, sizeof(*fence_class));
INIT_LIST_HEAD(&fence_class->ring);
DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
}
atomic_set(&fm->count, 0);
out_unlock:
write_unlock_irqrestore(&fm->lock, flags);
}
void drm_fence_manager_takedown(struct drm_device *dev)
{
}

View file

@ -33,115 +33,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/highmem.h>
#include "drmP.h"
static struct {
spinlock_t lock;
uint64_t cur_used;
uint64_t emer_used;
uint64_t low_threshold;
uint64_t high_threshold;
uint64_t emer_threshold;
} drm_memctl = {
.lock = SPIN_LOCK_UNLOCKED
};
static inline size_t drm_size_align(size_t size)
{
size_t tmpSize = 4;
if (size > PAGE_SIZE)
return PAGE_ALIGN(size);
while (tmpSize < size)
tmpSize <<= 1;
return (size_t) tmpSize;
}
int drm_alloc_memctl(size_t size)
{
int ret = 0;
unsigned long a_size = drm_size_align(size);
unsigned long new_used;
spin_lock(&drm_memctl.lock);
new_used = drm_memctl.cur_used + a_size;
if (likely(new_used < drm_memctl.high_threshold)) {
drm_memctl.cur_used = new_used;
goto out;
}
/*
* Allow small allocations from root-only processes to
* succeed until the emergency threshold is reached.
*/
new_used += drm_memctl.emer_used;
if (unlikely(!DRM_SUSER(DRM_CURPROC) ||
(a_size > 16*PAGE_SIZE) ||
(new_used > drm_memctl.emer_threshold))) {
ret = -ENOMEM;
goto out;
}
drm_memctl.cur_used = drm_memctl.high_threshold;
drm_memctl.emer_used = new_used - drm_memctl.high_threshold;
out:
spin_unlock(&drm_memctl.lock);
return ret;
}
EXPORT_SYMBOL(drm_alloc_memctl);
void drm_free_memctl(size_t size)
{
unsigned long a_size = drm_size_align(size);
spin_lock(&drm_memctl.lock);
if (likely(a_size >= drm_memctl.emer_used)) {
a_size -= drm_memctl.emer_used;
drm_memctl.emer_used = 0;
} else {
drm_memctl.emer_used -= a_size;
a_size = 0;
}
drm_memctl.cur_used -= a_size;
spin_unlock(&drm_memctl.lock);
}
EXPORT_SYMBOL(drm_free_memctl);
void drm_query_memctl(uint64_t *cur_used,
uint64_t *emer_used,
uint64_t *low_threshold,
uint64_t *high_threshold,
uint64_t *emer_threshold)
{
spin_lock(&drm_memctl.lock);
*cur_used = drm_memctl.cur_used;
*emer_used = drm_memctl.emer_used;
*low_threshold = drm_memctl.low_threshold;
*high_threshold = drm_memctl.high_threshold;
*emer_threshold = drm_memctl.emer_threshold;
spin_unlock(&drm_memctl.lock);
}
EXPORT_SYMBOL(drm_query_memctl);
void drm_init_memctl(size_t p_low_threshold,
size_t p_high_threshold,
size_t unit_size)
{
spin_lock(&drm_memctl.lock);
drm_memctl.emer_used = 0;
drm_memctl.cur_used = 0;
drm_memctl.low_threshold = p_low_threshold * unit_size;
drm_memctl.high_threshold = p_high_threshold * unit_size;
drm_memctl.emer_threshold = (drm_memctl.high_threshold >> 4) +
drm_memctl.high_threshold;
spin_unlock(&drm_memctl.lock);
}
#ifndef DEBUG_MEMORY
/** No-op. */

View file

@ -82,7 +82,7 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
struct drm_mm_node *child;
child = (struct drm_mm_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
drm_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return -ENOMEM;
@ -118,7 +118,7 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
struct drm_mm_node *child;
child = (struct drm_mm_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
drm_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return NULL;
@ -199,8 +199,8 @@ void drm_mm_put_block(struct drm_mm_node * cur)
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
drm_ctl_free(next_node, sizeof(*next_node),
DRM_MEM_MM);
drm_free(next_node, sizeof(*next_node),
DRM_MEM_MM);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
@ -213,7 +213,7 @@ void drm_mm_put_block(struct drm_mm_node * cur)
list_add(&cur->fl_entry, &mm->fl_entry);
} else {
list_del(&cur->ml_entry);
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
drm_free(cur, sizeof(*cur), DRM_MEM_MM);
}
}
EXPORT_SYMBOL(drm_mm_put_block);
@ -292,6 +292,6 @@ void drm_mm_takedown(struct drm_mm * mm)
list_del(&entry->fl_entry);
list_del(&entry->ml_entry);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
}
EXPORT_SYMBOL(drm_mm_takedown);

View file

@ -1,917 +0,0 @@
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _DRM_OBJECTS_H
#define _DRM_OBJECTS_H
struct drm_device;
struct drm_bo_mem_reg;
#define DRM_FENCE_FLAG_EMIT 0x00000001
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
/**
* On hardware with no interrupt events for operation completion,
* indicates that the kernel should sleep while waiting for any blocking
* operation to complete rather than spinning.
*
* Has no effect otherwise.
*/
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
#define DRM_FENCE_FLAG_NO_USER 0x00000010
/* Reserved for driver use */
#define DRM_FENCE_MASK_DRIVER 0xFF000000
#define DRM_FENCE_TYPE_EXE 0x00000001
struct drm_fence_arg {
unsigned int handle;
unsigned int fence_class;
unsigned int type;
unsigned int flags;
unsigned int signaled;
unsigned int error;
unsigned int sequence;
unsigned int pad64;
uint64_t expand_pad[2]; /*Future expansion */
};
/* Buffer permissions, referring to how the GPU uses the buffers.
* these translate to fence types used for the buffers.
* Typically a texture buffer is read, A destination buffer is write and
* a command (batch-) buffer is exe. Can be or-ed together.
*/
#define DRM_BO_FLAG_READ (1ULL << 0)
#define DRM_BO_FLAG_WRITE (1ULL << 1)
#define DRM_BO_FLAG_EXE (1ULL << 2)
/*
* All of the bits related to access mode
*/
#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
/*
* Status flags. Can be read to determine the actual state of a buffer.
* Can also be set in the buffer mask before validation.
*/
/*
* Mask: Never evict this buffer. Not even with force. This type of buffer is only
* available to root and must be manually removed before buffer manager shutdown
* or lock.
* Flags: Acknowledge
*/
#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
/*
* Mask: Require that the buffer is placed in mappable memory when validated.
* If not set the buffer may or may not be in mappable memory when validated.
* Flags: If set, the buffer is in mappable memory.
*/
#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
/* Mask: The buffer should be shareable with other processes.
* Flags: The buffer is shareable with other processes.
*/
#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
/* Mask: If set, place the buffer in cache-coherent memory if available.
* If clear, never place the buffer in cache coherent memory if validated.
* Flags: The buffer is currently in cache-coherent memory.
*/
#define DRM_BO_FLAG_CACHED (1ULL << 7)
/* Mask: Make sure that every time this buffer is validated,
* it ends up on the same location provided that the memory mask is the same.
* The buffer will also not be evicted when claiming space for
* other buffers. Basically a pinned buffer but it may be thrown out as
* part of buffer manager shutdown or locking.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
/*
* Mask: if set the note the buffer contents are discardable
* Flags: if set the buffer contents are discardable on migration
*/
#define DRM_BO_FLAG_DISCARDABLE (1ULL << 9)
/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
* with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
* with unsnooped PTEs instead of snooped, by using chipset-specific cache
* flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
* as the eviction to local memory (TTM unbind) on map is just a side effect
* to prevent aggressive cache prefetch from the GPU disturbing the cache
* management that the DRM is doing.
*
* Flags: Acknowledge.
* Buffers allocated with this flag should not be used for suballocators
* This type may have issues on CPUs with over-aggressive caching
* http://marc.info/?l=linux-kernel&m=102376926732464&w=2
*/
#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
/*
* Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
#define DRM_BO_FLAG_TILE (1ULL << 15)
/*
* Buffer has been mapped or touched since creation
* for VRAM we don't need to migrate, just fill with 0s for non-dirty
*/
#define DRM_BO_FLAG_CLEAN (1ULL << 16)
/*
* Memory type flags that can be or'ed together in the mask, but only
* one appears in flags.
*/
/* System memory */
#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
/* Translation table memory */
#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
/* Vram memory */
#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
/* Up to the driver to define. */
#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
/* We can add more of these now with a 64-bit flag type */
/*
* This is a mask covering all of the memory type flags; easier to just
* use a single constant than a bunch of | values. It covers
* DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
*/
#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
/*
* This adds all of the CPU-mapping options in with the memory
* type to label all bits which change how the page gets mapped
*/
#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
DRM_BO_FLAG_CACHED_MAPPED | \
DRM_BO_FLAG_CACHED | \
DRM_BO_FLAG_MAPPABLE)
/* Driver-private flags */
#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
/*
* Don't block on validate and map. Instead, return EBUSY.
*/
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
/*
* Don't place this buffer on the unfenced list. This means
* that the buffer will not end up having a fence associated
* with it as a result of this operation
*/
#define DRM_BO_HINT_DONT_FENCE 0x00000004
/**
* On hardware with no interrupt events for operation completion,
* indicates that the kernel should sleep while waiting for any blocking
* operation to complete rather than spinning.
*
* Has no effect otherwise.
*/
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
/*
* The client has compute relocations refering to this buffer using the
* offset in the presumed_offset field. If that offset ends up matching
* where this buffer lands, the kernel is free to skip executing those
* relocations
*/
#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
#define DRM_BO_MEM_LOCAL 0
#define DRM_BO_MEM_TT 1
#define DRM_BO_MEM_VRAM 2
#define DRM_BO_MEM_PRIV0 3
#define DRM_BO_MEM_PRIV1 4
#define DRM_BO_MEM_PRIV2 5
#define DRM_BO_MEM_PRIV3 6
#define DRM_BO_MEM_PRIV4 7
#define DRM_BO_MEM_TYPES 8 /* For now. */
#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
/***************************************************
* Fence objects. (drm_fence.c)
*/
struct drm_fence_object {
struct drm_device *dev;
atomic_t usage;
/*
* The below three fields are protected by the fence manager spinlock.
*/
struct list_head ring;
int fence_class;
uint32_t native_types;
uint32_t type;
uint32_t signaled_types;
uint32_t sequence;
uint32_t waiting_types;
uint32_t error;
};
#define _DRM_FENCE_CLASSES 8
struct drm_fence_class_manager {
struct list_head ring;
uint32_t pending_flush;
uint32_t waiting_types;
wait_queue_head_t fence_queue;
uint32_t highest_waiting_sequence;
uint32_t latest_queued_sequence;
};
struct drm_fence_manager {
int initialized;
rwlock_t lock;
struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
uint32_t num_classes;
atomic_t count;
};
struct drm_fence_driver {
unsigned long *waiting_jiffies;
uint32_t num_classes;
uint32_t wrap_diff;
uint32_t flush_diff;
uint32_t sequence_mask;
/*
* Driver implemented functions:
* has_irq() : 1 if the hardware can update the indicated type_flags using an
* irq handler. 0 if polling is required.
*
* emit() : Emit a sequence number to the command stream.
* Return the sequence number.
*
* flush() : Make sure the flags indicated in fc->pending_flush will eventually
* signal for fc->highest_received_sequence and all preceding sequences.
* Acknowledge by clearing the flags fc->pending_flush.
*
* poll() : Call drm_fence_handler with any new information.
*
* needed_flush() : Given the current state of the fence->type flags and previusly
* executed or queued flushes, return the type_flags that need flushing.
*
* wait(): Wait for the "mask" flags to signal on a given fence, performing
* whatever's necessary to make this happen.
*/
int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
uint32_t flags);
int (*emit) (struct drm_device *dev, uint32_t fence_class,
uint32_t flags, uint32_t *breadcrumb,
uint32_t *native_type);
void (*flush) (struct drm_device *dev, uint32_t fence_class);
void (*poll) (struct drm_device *dev, uint32_t fence_class,
uint32_t types);
uint32_t (*needed_flush) (struct drm_fence_object *fence);
int (*wait) (struct drm_fence_object *fence, int lazy,
int interruptible, uint32_t mask);
};
extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
int interruptible, uint32_t mask,
unsigned long end_jiffies);
extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
uint32_t sequence, uint32_t type,
uint32_t error);
extern void drm_fence_manager_init(struct drm_device *dev);
extern void drm_fence_manager_takedown(struct drm_device *dev);
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
uint32_t sequence);
extern int drm_fence_object_flush(struct drm_fence_object *fence,
uint32_t type);
extern int drm_fence_object_signaled(struct drm_fence_object *fence,
uint32_t type);
extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
struct drm_fence_object *src);
extern int drm_fence_object_wait(struct drm_fence_object *fence,
int lazy, int ignore_signals, uint32_t mask);
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
uint32_t fence_flags, uint32_t fence_class,
struct drm_fence_object **c_fence);
extern int drm_fence_object_emit(struct drm_fence_object *fence,
uint32_t fence_flags, uint32_t class,
uint32_t type);
extern void drm_fence_fill_arg(struct drm_fence_object *fence,
struct drm_fence_arg *arg);
extern int drm_fence_add_user_object(struct drm_file *priv,
struct drm_fence_object *fence,
int shareable);
extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/**************************************************
*TTMs
*/
/*
* The ttm backend GTT interface. (In our case AGP).
* Any similar type of device (PCIE?)
* needs only to implement these functions to be usable with the TTM interface.
* The AGP backend implementation lives in drm_agpsupport.c
* basically maps these calls to available functions in agpgart.
* Each drm device driver gets an
* additional function pointer that creates these types,
* so that the device can choose the correct aperture.
* (Multiple AGP apertures, etc.)
* Most device drivers will let this point to the standard AGP implementation.
*/
#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
struct drm_ttm_backend;
struct drm_ttm_backend_func {
int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
int (*populate) (struct drm_ttm_backend *backend,
unsigned long num_pages, struct page **pages,
struct page *dummy_read_page);
void (*clear) (struct drm_ttm_backend *backend);
int (*bind) (struct drm_ttm_backend *backend,
struct drm_bo_mem_reg *bo_mem);
int (*unbind) (struct drm_ttm_backend *backend);
void (*destroy) (struct drm_ttm_backend *backend);
};
/**
* This structure associates a set of flags and methods with a drm_ttm
* object, and will also be subclassed by the particular backend.
*
* \sa #drm_agp_ttm_backend
*/
struct drm_ttm_backend {
struct drm_device *dev;
uint32_t flags;
struct drm_ttm_backend_func *func;
};
struct drm_ttm {
struct page *dummy_read_page;
struct page **pages;
long first_himem_page;
long last_lomem_page;
uint32_t page_flags;
unsigned long num_pages;
atomic_t vma_count;
struct drm_device *dev;
int destroy;
uint32_t mapping_offset;
struct drm_ttm_backend *be;
unsigned long highest_lomem_entry;
unsigned long lowest_himem_entry;
enum {
ttm_bound,
ttm_evicted,
ttm_unbound,
ttm_unpopulated,
} state;
};
extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
uint32_t page_flags,
struct page *dummy_read_page);
extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
extern void drm_ttm_unbind(struct drm_ttm *ttm);
extern void drm_ttm_evict(struct drm_ttm *ttm);
extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
extern void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages);
extern int drm_ttm_populate(struct drm_ttm *ttm);
extern int drm_ttm_set_user(struct drm_ttm *ttm,
struct task_struct *tsk,
unsigned long start,
unsigned long num_pages);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
* this which calls this function iff there are no vmas referencing it anymore.
* Otherwise it is called when the last vma exits.
*/
extern int drm_ttm_destroy(struct drm_ttm *ttm);
#define DRM_FLAG_MASKED(_old, _new, _mask) {\
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
}
#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
/*
* Page flags.
*/
/*
* This ttm should not be cached by the CPU
*/
#define DRM_TTM_PAGE_UNCACHED (1 << 0)
/*
* This flat is not used at this time; I don't know what the
* intent was
*/
#define DRM_TTM_PAGE_USED (1 << 1)
/*
* This flat is not used at this time; I don't know what the
* intent was
*/
#define DRM_TTM_PAGE_BOUND (1 << 2)
/*
* This flat is not used at this time; I don't know what the
* intent was
*/
#define DRM_TTM_PAGE_PRESENT (1 << 3)
/*
* The array of page pointers was allocated with vmalloc
* instead of drm_calloc.
*/
#define DRM_TTM_PAGEDIR_VMALLOC (1 << 4)
/*
* This ttm is mapped from user space
*/
#define DRM_TTM_PAGE_USER (1 << 5)
/*
* This ttm will be written to by the GPU
*/
#define DRM_TTM_PAGE_WRITE (1 << 6)
/*
* This ttm was mapped to the GPU, and so the contents may have
* been modified
*/
#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
/*
* This flag is not used at this time; I don't know what the
* intent was.
*/
#define DRM_TTM_PAGE_USER_DMA (1 << 8)
/***************************************************
* Buffer objects. (drm_bo.c, drm_bo_move.c)
*/
struct drm_bo_mem_reg {
struct drm_mm_node *mm_node;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
/*
* Current buffer status flags, indicating
* where the buffer is located and which
* access modes are in effect
*/
uint64_t flags;
/**
* These are the flags proposed for
* a validate operation. If the
* validate succeeds, they'll get moved
* into the flags field
*/
uint64_t proposed_flags;
uint32_t desired_tile_stride;
uint32_t hw_tile_stride;
};
enum drm_bo_type {
/*
* drm_bo_type_device are 'normal' drm allocations,
* pages are allocated from within the kernel automatically
* and the objects can be mmap'd from the drm device. Each
* drm_bo_type_device object has a unique name which can be
* used by other processes to share access to the underlying
* buffer.
*/
drm_bo_type_device,
/*
* drm_bo_type_user are buffers of pages that already exist
* in the process address space. They are more limited than
* drm_bo_type_device buffers in that they must always
* remain cached (as we assume the user pages are mapped cached),
* and they are not sharable to other processes through DRM
* (although, regular shared memory should still work fine).
*/
drm_bo_type_user,
/*
* drm_bo_type_kernel are buffers that exist solely for use
* within the kernel. The pages cannot be mapped into the
* process. One obvious use would be for the ring
* buffer where user access would not (ideally) be required.
*/
drm_bo_type_kernel,
};
struct drm_buffer_object {
struct drm_device *dev;
/*
* If there is a possibility that the usage variable is zero,
* then dev->struct_mutext should be locked before incrementing it.
*/
atomic_t usage;
unsigned long buffer_start;
enum drm_bo_type type;
unsigned long offset;
atomic_t mapped;
struct drm_bo_mem_reg mem;
struct list_head lru;
struct list_head ddestroy;
uint32_t fence_type;
uint32_t fence_class;
uint32_t new_fence_type;
uint32_t new_fence_class;
struct drm_fence_object *fence;
uint32_t priv_flags;
wait_queue_head_t event_queue;
struct mutex mutex;
unsigned long num_pages;
/* For pinned buffers */
struct drm_mm_node *pinned_node;
uint32_t pinned_mem_type;
struct list_head pinned_lru;
/* For vm */
struct drm_ttm *ttm;
struct drm_map_list map_list;
uint32_t memory_type;
unsigned long bus_offset;
uint32_t vm_flags;
void *iomap;
#ifdef DRM_ODD_MM_COMPAT
/* dev->struct_mutex only protected. */
struct list_head vma_list;
struct list_head p_mm_list;
#endif
};
#define _DRM_BO_FLAG_UNFENCED 0x00000001
#define _DRM_BO_FLAG_EVICTED 0x00000002
/*
* This flag indicates that a flag called with bo->mutex held has
* temporarily released the buffer object mutex, (usually to wait for something).
* and thus any post-lock validation needs to be rerun.
*/
#define _DRM_BO_FLAG_UNLOCKED 0x00000004
struct drm_mem_type_manager {
int has_type;
int use_type;
int kern_init_type;
struct drm_mm manager;
struct list_head lru;
struct list_head pinned;
uint32_t flags;
uint32_t drm_bus_maptype;
unsigned long gpu_offset;
unsigned long io_offset;
unsigned long io_size;
void *io_addr;
uint64_t size; /* size of managed area for reporting to userspace */
};
struct drm_bo_lock {
// struct drm_user_object base;
wait_queue_head_t queue;
atomic_t write_lock_pending;
atomic_t readers;
};
#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
before kernel access. */
#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
#define _DRM_BM_ALLOCATOR_CACHED 0x0
#define _DRM_BM_ALLOCATOR_UNCACHED 0x1
struct drm_buffer_manager {
struct drm_bo_lock bm_lock;
struct mutex evict_mutex;
int nice_mode;
int initialized;
struct drm_file *last_to_validate;
struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
struct list_head unfenced;
struct list_head ddestroy;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
struct work_struct wq;
#else
struct delayed_work wq;
#endif
uint32_t fence_type;
unsigned long cur_pages;
atomic_t count;
struct page *dummy_read_page;
int allocator_type;
};
struct drm_bo_driver {
const uint32_t *mem_type_prio;
const uint32_t *mem_busy_prio;
uint32_t num_mem_type_prio;
uint32_t num_mem_busy_prio;
struct drm_ttm_backend *(*create_ttm_backend_entry)
(struct drm_device *dev);
int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
uint32_t *type);
int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
int (*init_mem_type) (struct drm_device *dev, uint32_t type,
struct drm_mem_type_manager *man);
/*
* evict_flags:
*
* @bo: the buffer object to be evicted
*
* Return the bo flags for a buffer which is not mapped to the hardware.
* These will be placed in proposed_flags so that when the move is
* finished, they'll end up in bo->mem.flags
*/
uint64_t(*evict_flags) (struct drm_buffer_object *bo);
/*
* move:
*
* @bo: the buffer to move
*
* @evict: whether this motion is evicting the buffer from
* the graphics address space
*
* @no_wait: whether this should give up and return -EBUSY
* if this move would require sleeping
*
* @new_mem: the new memory region receiving the buffer
*
* Move a buffer between two memory regions.
*/
int (*move) (struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
/*
* ttm_cache_flush
*/
void (*ttm_cache_flush)(struct drm_ttm *ttm);
/*
* command_stream_barrier
*
* @dev: The drm device.
*
* @bo: The buffer object to validate.
*
* @new_fence_class: The new fence class for the buffer object.
*
* @new_fence_type: The new fence type for the buffer object.
*
* @no_wait: whether this should give up and return -EBUSY
* if this operation would require sleeping
*
* Insert a command stream barrier that makes sure that the
* buffer is idle once the commands associated with the
* current validation are starting to execute. If an error
* condition is returned, or the function pointer is NULL,
* the drm core will force buffer idle
* during validation.
*/
int (*command_stream_barrier) (struct drm_buffer_object *bo,
uint32_t new_fence_class,
uint32_t new_fence_type,
int no_wait);
};
/*
* buffer objects (drm_bo.c)
*/
int drm_bo_do_validate(struct drm_buffer_object *bo,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class);
extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
extern int drm_bo_driver_finish(struct drm_device *dev);
extern int drm_bo_driver_init(struct drm_device *dev);
extern int drm_bo_pci_offset(struct drm_device *dev,
struct drm_bo_mem_reg *mem,
unsigned long *bus_base,
unsigned long *bus_offset,
unsigned long *bus_size);
extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
extern int drm_bo_add_user_object(struct drm_file *file_priv,
struct drm_buffer_object *bo, int shareable);
extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
extern void drm_putback_buffer_objects(struct drm_device *dev);
extern int drm_fence_buffer_objects(struct drm_device *dev,
struct list_head *list,
uint32_t fence_flags,
struct drm_fence_object *fence,
struct drm_fence_object **used_fence);
extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
enum drm_bo_type type, uint64_t flags,
uint32_t hint, uint32_t page_alignment,
unsigned long buffer_start,
struct drm_buffer_object **bo);
extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
int no_wait, int check_unfenced);
extern int drm_bo_mem_space(struct drm_buffer_object *bo,
struct drm_bo_mem_reg *mem, int no_wait);
extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
uint64_t new_mem_flags,
int no_wait, int move_unfenced);
extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean);
extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
unsigned long p_offset, unsigned long p_size,
int kern_init);
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
uint32_t handle,
int check_owner);
extern int drm_bo_evict_cached(struct drm_buffer_object *bo);
extern void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
extern void drm_bo_evict_mm(struct drm_device *dev, int mem_type, int no_wait);
/*
* Buffer object memory move- and map helpers.
* drm_bo_move.c
*/
extern int drm_bo_add_ttm(struct drm_buffer_object *bo);
extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
int evict, int no_wait,
struct drm_bo_mem_reg *new_mem);
extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
int evict,
int no_wait, struct drm_bo_mem_reg *new_mem);
extern int drm_bo_move_zero(struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
int evict, int no_wait,
uint32_t fence_class, uint32_t fence_type,
uint32_t fence_flags,
struct drm_bo_mem_reg *new_mem);
extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
extern unsigned long drm_bo_offset_end(unsigned long offset,
unsigned long end);
struct drm_bo_kmap_obj {
void *virtual;
struct page *page;
enum {
bo_map_iomap,
bo_map_vmap,
bo_map_kmap,
bo_map_premapped,
} bo_kmap_type;
};
static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
{
*is_iomem = (map->bo_kmap_type == bo_map_iomap ||
map->bo_kmap_type == bo_map_premapped);
return map->virtual;
}
extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct drm_bo_kmap_obj *map);
extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
unsigned long dst_offset,
unsigned long *pfn,
pgprot_t *prot);
/*
* drm_regman.c
*/
struct drm_reg {
struct list_head head;
struct drm_fence_object *fence;
uint32_t fence_type;
uint32_t new_fence_type;
};
struct drm_reg_manager {
struct list_head free;
struct list_head lru;
struct list_head unfenced;
int (*reg_reusable)(const struct drm_reg *reg, const void *data);
void (*reg_destroy)(struct drm_reg *reg);
};
extern int drm_regs_alloc(struct drm_reg_manager *manager,
const void *data,
uint32_t fence_class,
uint32_t fence_type,
int interruptible,
int no_wait,
struct drm_reg **reg);
extern void drm_regs_fence(struct drm_reg_manager *regs,
struct drm_fence_object *fence);
extern void drm_regs_free(struct drm_reg_manager *manager);
extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
extern void drm_regs_init(struct drm_reg_manager *manager,
int (*reg_reusable)(const struct drm_reg *,
const void *),
void (*reg_destroy)(struct drm_reg *));
extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
void **virtual);
extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
void *virtual);
/*
* drm_uncached.c
*/
extern int drm_uncached_init(void);
extern void drm_uncached_fini(void);
extern struct page *drm_get_uncached_page(void);
extern void drm_put_uncached_page(struct page *page);
#ifdef CONFIG_DEBUG_MUTEXES
#define DRM_ASSERT_LOCKED(_mutex) \
BUG_ON(!mutex_is_locked(_mutex) || \
((_mutex)->owner != current_thread_info()))
#else
#define DRM_ASSERT_LOCKED(_mutex)
#endif
#endif

View file

@ -49,8 +49,6 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_objects_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_gem_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_gem_object_info(char *buf, char **start, off_t offset,
@ -73,7 +71,6 @@ static struct drm_proc_list {
{"clients", drm_clients_info},
{"queues", drm_queues_info},
{"bufs", drm_bufs_info},
{"objects", drm_objects_info},
{"gem_names", drm_gem_name_info},
{"gem_objects", drm_gem_object_info},
#if DRM_DEBUG_CODE
@ -435,105 +432,6 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
return ret;
}
/**
* Called when "/proc/dri/.../objects" is read.
*
* \param buf output buffer.
* \param start start of output data.
* \param offset requested start offset.
* \param request requested number of bytes.
* \param eof whether there is no more data to return.
* \param data private data.
* \return number of written bytes.
*/
static int drm__objects_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
struct drm_buffer_manager *bm = &dev->bm;
struct drm_fence_manager *fm = &dev->fm;
uint64_t used_mem;
uint64_t used_emer;
uint64_t low_mem;
uint64_t high_mem;
uint64_t emer_mem;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Object accounting:\n\n");
if (fm->initialized) {
DRM_PROC_PRINT("Number of active fence objects: %d.\n",
atomic_read(&fm->count));
} else {
DRM_PROC_PRINT("Fence objects are not supported by this driver\n");
}
if (bm->initialized) {
DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
atomic_read(&bm->count));
}
DRM_PROC_PRINT("Memory accounting:\n\n");
if (bm->initialized) {
DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
} else {
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n");
}
drm_query_memctl(&used_mem, &used_emer, &low_mem, &high_mem, &emer_mem);
if (used_mem > 16*PAGE_SIZE) {
DRM_PROC_PRINT("Used object memory is %lu pages.\n",
(unsigned long) (used_mem >> PAGE_SHIFT));
} else {
DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
(unsigned long) used_mem);
}
if (used_emer > 16*PAGE_SIZE) {
DRM_PROC_PRINT("Used emergency memory is %lu pages.\n",
(unsigned long) (used_emer >> PAGE_SHIFT));
} else {
DRM_PROC_PRINT("Used emergency memory is %lu bytes.\n\n",
(unsigned long) used_emer);
}
DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
(unsigned long) (low_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
(unsigned long) (high_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("Emergency root only memory usage threshold is %lu pages.\n",
(unsigned long) (emer_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("\n");
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
/**
* Simply calls _objects_info() while holding the drm_device::struct_mutex lock.
*/
static int drm_objects_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm__objects_info(buf, start, offset, request, eof, data);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/**
* Called when "/proc/dri/.../clients" is read.
*

View file

@ -1,200 +0,0 @@
/**************************************************************************
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* An allocate-fence manager implementation intended for sets of base-registers
* or tiling-registers.
*/
#include "drmP.h"
/*
* Allocate a compatible register and put it on the unfenced list.
*/
int drm_regs_alloc(struct drm_reg_manager *manager,
const void *data,
uint32_t fence_class,
uint32_t fence_type,
int interruptible, int no_wait, struct drm_reg **reg)
{
struct drm_reg *entry, *next_entry;
int ret;
*reg = NULL;
/*
* Search the unfenced list.
*/
list_for_each_entry(entry, &manager->unfenced, head) {
if (manager->reg_reusable(entry, data)) {
entry->new_fence_type |= fence_type;
goto out;
}
}
/*
* Search the lru list.
*/
list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
struct drm_fence_object *fence = entry->fence;
if (fence->fence_class == fence_class &&
(entry->fence_type & fence_type) == entry->fence_type &&
manager->reg_reusable(entry, data)) {
list_del(&entry->head);
entry->new_fence_type = fence_type;
list_add_tail(&entry->head, &manager->unfenced);
goto out;
}
}
/*
* Search the free list.
*/
list_for_each_entry(entry, &manager->free, head) {
list_del(&entry->head);
entry->new_fence_type = fence_type;
list_add_tail(&entry->head, &manager->unfenced);
goto out;
}
if (no_wait)
return -EBUSY;
/*
* Go back to the lru list and try to expire fences.
*/
list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
BUG_ON(!entry->fence);
ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
entry->fence_type);
if (ret)
return ret;
drm_fence_usage_deref_unlocked(&entry->fence);
list_del(&entry->head);
entry->new_fence_type = fence_type;
list_add_tail(&entry->head, &manager->unfenced);
goto out;
}
/*
* Oops. All registers are used up :(.
*/
return -EBUSY;
out:
*reg = entry;
return 0;
}
EXPORT_SYMBOL(drm_regs_alloc);
void drm_regs_fence(struct drm_reg_manager *manager,
struct drm_fence_object *fence)
{
struct drm_reg *entry;
struct drm_reg *next_entry;
if (!fence) {
/*
* Old fence (if any) is still valid.
* Put back on free and lru lists.
*/
list_for_each_entry_safe_reverse(entry, next_entry,
&manager->unfenced, head) {
list_del(&entry->head);
list_add(&entry->head, (entry->fence) ?
&manager->lru : &manager->free);
}
} else {
/*
* Fence with a new fence and put on lru list.
*/
list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
head) {
list_del(&entry->head);
if (entry->fence)
drm_fence_usage_deref_unlocked(&entry->fence);
drm_fence_reference_unlocked(&entry->fence, fence);
entry->fence_type = entry->new_fence_type;
BUG_ON((entry->fence_type & fence->type) !=
entry->fence_type);
list_add_tail(&entry->head, &manager->lru);
}
}
}
EXPORT_SYMBOL(drm_regs_fence);
void drm_regs_free(struct drm_reg_manager *manager)
{
struct drm_reg *entry;
struct drm_reg *next_entry;
drm_regs_fence(manager, NULL);
list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
list_del(&entry->head);
manager->reg_destroy(entry);
}
list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
(void)drm_fence_object_wait(entry->fence, 1, 1,
entry->fence_type);
list_del(&entry->head);
drm_fence_usage_deref_unlocked(&entry->fence);
manager->reg_destroy(entry);
}
}
EXPORT_SYMBOL(drm_regs_free);
void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
{
reg->fence = NULL;
list_add_tail(&reg->head, &manager->free);
}
EXPORT_SYMBOL(drm_regs_add);
void drm_regs_init(struct drm_reg_manager *manager,
int (*reg_reusable) (const struct drm_reg *, const void *),
void (*reg_destroy) (struct drm_reg *))
{
INIT_LIST_HEAD(&manager->free);
INIT_LIST_HEAD(&manager->lru);
INIT_LIST_HEAD(&manager->unfenced);
manager->reg_reusable = reg_reusable;
manager->reg_destroy = reg_destroy;
}
EXPORT_SYMBOL(drm_regs_init);

View file

@ -203,7 +203,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->bm.evict_mutex);
idr_init(&dev->drw_idr);
@ -219,11 +218,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER))
return -ENOMEM;
if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_ht_remove(&dev->map_hash);
return -ENOMEM;
}
/* the DRM has 6 counters */
dev->counters = 6;
@ -269,8 +263,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
}
}
drm_fence_manager_init(dev);
return 0;
error_out_unreg:

View file

@ -1,529 +0,0 @@
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
static void drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
if (unlikely(page == NULL))
return;
page_virtual = kmap_atomic(page, KM_USER0);
for (i=0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
clflush(page_virtual + i);
kunmap_atomic(page_virtual, KM_USER0);
}
static void drm_ttm_cache_flush_clflush(struct page *pages[], unsigned long num_pages)
{
unsigned long i;
mb();
for (i=0; i < num_pages; ++i)
drm_clflush_page(*pages++);
mb();
}
#endif
static void drm_ttm_ipi_handler(void *null)
{
flush_agp_cache();
}
void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages)
{
#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
if (cpu_has_clflush) {
drm_ttm_cache_flush_clflush(pages, num_pages);
return;
}
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1))
#else
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
#endif
DRM_ERROR("Timed out waiting for drm cache flush.\n");
}
EXPORT_SYMBOL(drm_ttm_cache_flush);
/**
* Allocates storage for pointers to the pages that back the ttm.
*
* Uses kmalloc if possible. Otherwise falls back to vmalloc.
*/
static void drm_ttm_alloc_page_directory(struct drm_ttm *ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
ttm->pages = NULL;
if (drm_alloc_memctl(size))
return;
if (size <= PAGE_SIZE)
ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
if (!ttm->pages) {
ttm->pages = vmalloc_user(size);
if (ttm->pages)
ttm->page_flags |= DRM_TTM_PAGEDIR_VMALLOC;
}
if (!ttm->pages)
drm_free_memctl(size);
}
static void drm_ttm_free_page_directory(struct drm_ttm *ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
if (ttm->page_flags & DRM_TTM_PAGEDIR_VMALLOC) {
vfree(ttm->pages);
ttm->page_flags &= ~DRM_TTM_PAGEDIR_VMALLOC;
} else {
drm_free(ttm->pages, size, DRM_MEM_TTM);
}
drm_free_memctl(size);
ttm->pages = NULL;
}
static struct page *drm_ttm_alloc_page(struct drm_ttm *ttm)
{
struct page *page;
if (drm_alloc_memctl(PAGE_SIZE))
return NULL;
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
page = drm_get_uncached_page();
else
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
drm_free_memctl(PAGE_SIZE);
return NULL;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
SetPageReserved(page);
#endif
return page;
}
/*
* Change caching policy for the linear kernel map
* for range of pages in a ttm.
*/
static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
{
int i;
struct page **cur_page;
int do_tlbflush = 0;
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
return 0;
if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
return 0;
if (noncached)
drm_ttm_cache_flush(ttm->pages, ttm->num_pages);
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages + i;
if (*cur_page) {
if (!PageHighMem(*cur_page)) {
if (noncached) {
map_page_into_agp(*cur_page);
} else {
unmap_page_from_agp(*cur_page);
}
do_tlbflush = 1;
}
}
}
if (do_tlbflush)
flush_agp_mappings();
DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
return 0;
}
static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
{
int write;
int dirty;
struct page *page;
int i;
BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0);
dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
for (i = 0; i < ttm->num_pages; ++i) {
page = ttm->pages[i];
if (page == NULL)
continue;
if (page == ttm->dummy_read_page) {
BUG_ON(write);
continue;
}
if (write && dirty && !PageReserved(page))
set_page_dirty_lock(page);
ttm->pages[i] = NULL;
put_page(page);
}
}
static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
{
int i;
struct drm_buffer_manager *bm = &ttm->dev->bm;
struct page **cur_page;
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages + i;
if (*cur_page) {
if (ttm->dev->bm.allocator_type == _DRM_BM_ALLOCATOR_UNCACHED)
drm_put_uncached_page(*cur_page);
else {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
ClearPageReserved(*cur_page);
#endif
if (page_count(*cur_page) != 1)
DRM_ERROR("Erroneous page count. Leaking pages.\n");
if (page_mapped(*cur_page))
DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
__free_page(*cur_page);
}
drm_free_memctl(PAGE_SIZE);
--bm->cur_pages;
}
}
}
/*
* Free all resources associated with a ttm.
*/
int drm_ttm_destroy(struct drm_ttm *ttm)
{
struct drm_ttm_backend *be;
if (!ttm)
return 0;
be = ttm->be;
if (be) {
be->func->destroy(be);
ttm->be = NULL;
}
if (ttm->pages) {
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
drm_ttm_set_caching(ttm, 0);
if (ttm->page_flags & DRM_TTM_PAGE_USER)
drm_ttm_free_user_pages(ttm);
else
drm_ttm_free_alloced_pages(ttm);
drm_ttm_free_page_directory(ttm);
}
drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
return 0;
}
struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
{
struct page *p;
struct drm_buffer_manager *bm = &ttm->dev->bm;
while(NULL == (p = ttm->pages[index])) {
p = drm_ttm_alloc_page(ttm);
if (!p)
return NULL;
if (PageHighMem(p))
ttm->pages[--ttm->first_himem_page] = p;
else
ttm->pages[++ttm->last_lomem_page] = p;
++bm->cur_pages;
}
return p;
}
EXPORT_SYMBOL(drm_ttm_get_page);
/**
* drm_ttm_set_user:
*
* @ttm: the ttm to map pages to. This must always be
* a freshly created ttm.
*
* @tsk: a pointer to the address space from which to map
* pages.
*
* @write: a boolean indicating that write access is desired
*
* start: the starting address
*
* Map a range of user addresses to a new ttm object. This
* provides access to user memory from the graphics device.
*/
int drm_ttm_set_user(struct drm_ttm *ttm,
struct task_struct *tsk,
unsigned long start,
unsigned long num_pages)
{
struct mm_struct *mm = tsk->mm;
int ret;
int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0;
BUG_ON(num_pages != ttm->num_pages);
BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0);
down_read(&mm->mmap_sem);
ret = get_user_pages(tsk, mm, start, num_pages,
write, 0, ttm->pages, NULL);
up_read(&mm->mmap_sem);
if (ret != num_pages && write) {
drm_ttm_free_user_pages(ttm);
return -ENOMEM;
}
return 0;
}
/**
* drm_ttm_populate:
*
* @ttm: the object to allocate pages for
*
* Allocate pages for all unset page entries, then
* call the backend to create the hardware mappings
*/
int drm_ttm_populate(struct drm_ttm *ttm)
{
struct page *page;
unsigned long i;
struct drm_ttm_backend *be;
if (ttm->state != ttm_unpopulated)
return 0;
be = ttm->be;
for (i = 0; i < ttm->num_pages; ++i) {
page = drm_ttm_get_page(ttm, i);
if (!page)
return -ENOMEM;
}
be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page);
ttm->state = ttm_unbound;
return 0;
}
/**
* drm_ttm_create:
*
* @dev: the drm_device
*
* @size: The size (in bytes) of the desired object
*
* @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h.
*
* Allocate and initialize a ttm, leaving it unpopulated at this time
*/
struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
uint32_t page_flags, struct page *dummy_read_page)
{
struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
struct drm_ttm *ttm;
if (!bo_driver)
return NULL;
ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
if (!ttm)
return NULL;
ttm->dev = dev;
atomic_set(&ttm->vma_count, 0);
ttm->destroy = 0;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
ttm->page_flags = page_flags;
ttm->dummy_read_page = dummy_read_page;
/*
* Account also for AGP module memory usage.
*/
drm_ttm_alloc_page_directory(ttm);
if (!ttm->pages) {
drm_ttm_destroy(ttm);
DRM_ERROR("Failed allocating page table\n");
return NULL;
}
ttm->be = bo_driver->create_ttm_backend_entry(dev);
if (!ttm->be) {
drm_ttm_destroy(ttm);
DRM_ERROR("Failed creating ttm backend entry\n");
return NULL;
}
ttm->state = ttm_unpopulated;
return ttm;
}
/**
* drm_ttm_evict:
*
* @ttm: the object to be unbound from the aperture.
*
* Transition a ttm from bound to evicted, where it
* isn't present in the aperture, but various caches may
* not be consistent.
*/
void drm_ttm_evict(struct drm_ttm *ttm)
{
struct drm_ttm_backend *be = ttm->be;
int ret;
if (ttm->state == ttm_bound) {
ret = be->func->unbind(be);
BUG_ON(ret);
}
ttm->state = ttm_evicted;
}
/**
* drm_ttm_fixup_caching:
*
* @ttm: the object to set unbound
*
* XXX this function is misnamed. Transition a ttm from evicted to
* unbound, flushing caches as appropriate.
*/
void drm_ttm_fixup_caching(struct drm_ttm *ttm)
{
if (ttm->state == ttm_evicted) {
struct drm_ttm_backend *be = ttm->be;
if (be->func->needs_ub_cache_adjust(be))
drm_ttm_set_caching(ttm, 0);
ttm->state = ttm_unbound;
}
}
/**
* drm_ttm_unbind:
*
* @ttm: the object to unbind from the graphics device
*
* Unbind an object from the aperture. This removes the mappings
* from the graphics device and flushes caches if necessary.
*/
void drm_ttm_unbind(struct drm_ttm *ttm)
{
if (ttm->state == ttm_bound)
drm_ttm_evict(ttm);
drm_ttm_fixup_caching(ttm);
}
/**
* drm_ttm_bind:
*
* @ttm: the ttm object to bind to the graphics device
*
* @bo_mem: the aperture memory region which will hold the object
*
* Bind a ttm object to the aperture. This ensures that the necessary
* pages are allocated, flushes CPU caches as needed and marks the
* ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
* modified by the GPU
*/
int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
{
struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
int ret = 0;
struct drm_ttm_backend *be;
if (!ttm)
return -EINVAL;
if (ttm->state == ttm_bound)
return 0;
be = ttm->be;
ret = drm_ttm_populate(ttm);
if (ret)
return ret;
if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
bo_driver->ttm_cache_flush)
bo_driver->ttm_cache_flush(ttm);
ret = be->func->bind(be, bo_mem);
if (ret) {
ttm->state = ttm_evicted;
DRM_ERROR("Couldn't bind backend.\n");
return ret;
}
ttm->state = ttm_bound;
if (ttm->page_flags & DRM_TTM_PAGE_USER)
ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
return 0;
}
EXPORT_SYMBOL(drm_ttm_bind);

View file

@ -1,138 +0,0 @@
/*
* Copyright (c) Red Hat Inc.
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie <airlied@redhat.com>
*/
/* simple list based uncached page allocator
* - Add chunks of 1MB to the allocator at a time.
* - Use page->lru to keep a free list
* - doesn't track currently in use pages
*
* TODO: Add shrinker support
*/
#include "drmP.h"
#include <asm/agp.h>
static struct list_head uncached_free_list;
static struct mutex uncached_mutex;
static int uncached_inited;
static int total_uncached_pages;
/* add 1MB at a time */
#define NUM_PAGES_TO_ADD 256
static void drm_uncached_page_put(struct page *page)
{
unmap_page_from_agp(page);
put_page(page);
__free_page(page);
}
int drm_uncached_add_pages_locked(int num_pages)
{
struct page *page;
int i;
DRM_DEBUG("adding uncached memory %ld\n", num_pages * PAGE_SIZE);
for (i = 0; i < num_pages; i++) {
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
DRM_ERROR("unable to get page %d\n", i);
return i;
}
get_page(page);
#ifdef CONFIG_X86
set_memory_wc((unsigned long)page_address(page), 1);
#else
map_page_into_agp(page);
#endif
list_add(&page->lru, &uncached_free_list);
total_uncached_pages++;
}
return i;
}
struct page *drm_get_uncached_page(void)
{
struct page *page = NULL;
int ret;
mutex_lock(&uncached_mutex);
if (list_empty(&uncached_free_list)) {
ret = drm_uncached_add_pages_locked(NUM_PAGES_TO_ADD);
if (ret == 0)
return NULL;
}
page = list_first_entry(&uncached_free_list, struct page, lru);
list_del(&page->lru);
mutex_unlock(&uncached_mutex);
return page;
}
void drm_put_uncached_page(struct page *page)
{
mutex_lock(&uncached_mutex);
list_add(&page->lru, &uncached_free_list);
mutex_unlock(&uncached_mutex);
}
void drm_uncached_release_all_pages(void)
{
struct page *page, *tmp;
list_for_each_entry_safe(page, tmp, &uncached_free_list, lru) {
list_del(&page->lru);
drm_uncached_page_put(page);
}
}
int drm_uncached_init(void)
{
if (uncached_inited)
return 0;
INIT_LIST_HEAD(&uncached_free_list);
mutex_init(&uncached_mutex);
uncached_inited = 1;
return 0;
}
void drm_uncached_fini(void)
{
if (!uncached_inited)
return;
uncached_inited = 0;
drm_uncached_release_all_pages();
}

View file

@ -41,10 +41,6 @@
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
static int drm_bo_mmap_locked(struct vm_area_struct *vma,
struct file *filp,
drm_local_map_t *map);
pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
{
@ -232,7 +228,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
found_maps++;
if (pt->vma == vma) {
list_del(&pt->head);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
}
}
/* We were the only map that was found */
@ -416,7 +411,7 @@ static void drm_vm_open_locked(struct vm_area_struct *vma)
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_inc(&dev->vma_count);
vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
vma_entry->vma = vma;
vma_entry->pid = current->pid;
@ -456,7 +451,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
list_del(&pt->head);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
break;
}
}
@ -651,8 +646,6 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
break;
case _DRM_TTM:
return drm_bo_mmap_locked(vma, filp, map);
default:
return -EINVAL; /* This should never happen. */
}
@ -677,213 +670,3 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
}
EXPORT_SYMBOL(drm_mmap);
/**
* buffer object vm functions.
*/
/**
* \c Pagefault method for buffer objects.
*
* \param vma Virtual memory area.
* \param vmf vm fault data
* \return Error or VM_FAULT_NOPAGE:. The pfn is manually inserted.
*
* It's important that pfns are inserted while holding the bo->mutex lock.
* otherwise we might race with unmap_mapping_range() which is always
* called with the bo->mutex lock held.
*
* We're modifying the page attribute bits of the vma->vm_page_prot field,
* without holding the mmap_sem in write mode. Only in read mode.
* These bits are not used by the mm subsystem code, and we consider them
* protected by the bo->mutex lock.
*/
#if defined(DRM_FULL_MM_COMPAT) && !defined(DRM_NO_FAULT)
static int drm_bo_vm_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
struct drm_ttm *ttm;
struct drm_device *dev;
unsigned long pfn;
int err;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
unsigned long ret = VM_FAULT_NOPAGE;
dev = bo->dev;
err = mutex_lock_interruptible(&bo->mutex);
if (err) {
return VM_FAULT_NOPAGE;
}
err = drm_bo_wait(bo, 0, 1, 0, 1);
if (err) {
ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
goto out_unlock;
}
bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
/*
* If buffer happens to be in a non-mappable location,
* move it to a mappable.
*/
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
uint32_t new_flags = bo->mem.proposed_flags |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
err = drm_bo_move_buffer(bo, new_flags, 0, 0);
if (err) {
ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
goto out_unlock;
}
}
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
&bus_size);
if (err) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
if (bus_size) {
struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
} else {
ttm = bo->ttm;
drm_ttm_fixup_caching(ttm);
page = drm_ttm_get_page(ttm, page_offset);
if (!page) {
ret = VM_FAULT_OOM;
goto out_unlock;
}
pfn = page_to_pfn(page);
vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
drm_io_prot(_DRM_TTM, vma);
}
err = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
if (err) {
ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
goto out_unlock;
}
out_unlock:
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
mutex_unlock(&bo->mutex);
return ret;
}
#endif
static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
{
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
/* clear the clean flags */
bo->mem.flags &= ~DRM_BO_FLAG_CLEAN;
bo->mem.proposed_flags &= ~DRM_BO_FLAG_CLEAN;
drm_vm_open_locked(vma);
atomic_inc(&bo->usage);
#ifdef DRM_ODD_MM_COMPAT
drm_bo_add_vma(bo, vma);
#endif
}
/**
* \c vma open method for buffer objects.
*
* \param vma virtual memory area.
*/
static void drm_bo_vm_open(struct vm_area_struct *vma)
{
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
struct drm_device *dev = bo->dev;
mutex_lock(&dev->struct_mutex);
drm_bo_vm_open_locked(vma);
mutex_unlock(&dev->struct_mutex);
}
/**
* \c vma close method for buffer objects.
*
* \param vma virtual memory area.
*/
static void drm_bo_vm_close(struct vm_area_struct *vma)
{
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
struct drm_device *dev = bo->dev;
drm_vm_close(vma);
if (bo) {
mutex_lock(&dev->struct_mutex);
#ifdef DRM_ODD_MM_COMPAT
drm_bo_delete_vma(bo, vma);
#endif
drm_bo_usage_deref_locked((struct drm_buffer_object **)
&vma->vm_private_data);
mutex_unlock(&dev->struct_mutex);
}
return;
}
static struct vm_operations_struct drm_bo_vm_ops = {
#ifdef DRM_FULL_MM_COMPAT
#ifdef DRM_NO_FAULT
.nopfn = drm_bo_vm_nopfn,
#else
.fault = drm_bo_vm_fault,
#endif
#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
.nopfn = drm_bo_vm_nopfn,
#else
.nopage = drm_bo_vm_nopage,
#endif
#endif
.open = drm_bo_vm_open,
.close = drm_bo_vm_close,
};
/**
* mmap buffer object memory.
*
* \param vma virtual memory area.
* \param file_priv DRM file private.
* \param map The buffer object drm map.
* \return zero on success or a negative number on failure.
*/
int drm_bo_mmap_locked(struct vm_area_struct *vma,
struct file *filp,
drm_local_map_t *map)
{
vma->vm_ops = &drm_bo_vm_ops;
vma->vm_private_data = map->handle;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
vma->vm_flags |= VM_PFNMAP;
#endif
drm_bo_vm_open_locked(vma);
#ifdef DRM_ODD_MM_COMPAT
drm_bo_map_bound(vma);
#endif
return 0;
}

View file

@ -1,163 +0,0 @@
/**************************************************************************
*
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev)
{
return drm_agp_init_ttm(dev);
}
int via_fence_types(struct drm_buffer_object *bo, uint32_t * fclass,
uint32_t * type)
{
*type = 3;
return 0;
}
int via_invalidate_caches(struct drm_device * dev, uint64_t flags)
{
/*
* FIXME: Invalidate texture caches here.
*/
return 0;
}
static int via_vram_info(struct drm_device *dev,
unsigned long *offset,
unsigned long *size)
{
struct pci_dev *pdev = dev->pdev;
unsigned long flags;
int ret = -EINVAL;
int i;
for (i=0; i<6; ++i) {
flags = pci_resource_flags(pdev, i);
if ((flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) ==
(IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
ret = 0;
break;
}
}
if (ret) {
DRM_ERROR("Could not find VRAM PCI resource\n");
return ret;
}
*offset = pci_resource_start(pdev, i);
*size = pci_resource_end(pdev, i) - *offset + 1;
return 0;
}
int via_init_mem_type(struct drm_device * dev, uint32_t type,
struct drm_mem_type_manager * man)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
/* System memory */
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_CACHED;
man->drm_bus_maptype = 0;
break;
case DRM_BO_MEM_TT:
/* Dynamic agpgart memory */
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
/* Only to get pte protection right. */
man->drm_bus_maptype = _DRM_AGP;
break;
case DRM_BO_MEM_VRAM:
/* "On-card" video ram */
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_FRAME_BUFFER;
man->io_addr = NULL;
return via_vram_info(dev, &man->io_offset, &man->io_size);
break;
case DRM_BO_MEM_PRIV0:
/* Pre-bound agpgart memory */
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
man->io_offset = dev->agp->agp_info.aper_base;
man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
man->io_addr = NULL;
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
_DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_AGP;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
uint64_t via_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
case DRM_BO_MEM_TT:
return DRM_BO_FLAG_MEM_LOCAL; /* Evict TT to local */
case DRM_BO_MEM_PRIV0: /* Evict pre-bound AGP to TT */
return DRM_BO_MEM_TT;
case DRM_BO_MEM_VRAM:
if (bo->mem.num_pages > 128)
return DRM_BO_MEM_TT;
else
return DRM_BO_MEM_LOCAL;
default:
return DRM_BO_MEM_LOCAL;
}
}

View file

@ -1,169 +0,0 @@
/**************************************************************************
*
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
/*
* DRM_FENCE_TYPE_EXE guarantees that all command buffers can be evicted.
* DRM_VIA_FENCE_TYPE_ACCEL guarantees that all 2D & 3D rendering is complete.
*/
static void via_fence_poll(struct drm_device *dev, uint32_t class,
uint32_t waiting_types)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
uint32_t signaled_flush_types = 0;
uint32_t status;
if (class != 0)
return;
if (unlikely(!dev_priv))
return;
spin_lock(&dev_priv->fence_lock);
if (waiting_types) {
/*
* Take the idlelock. This guarantees that the next time a client tries
* to grab the lock, it will stall until the idlelock is released. This
* guarantees that eventually, the GPU engines will be idle, but nothing
* else. It cannot be used to protect the hardware.
*/
if (!dev_priv->have_idlelock) {
drm_idlelock_take(&dev->primary->master->lock);
dev_priv->have_idlelock = 1;
}
/*
* Check if AGP command reader is idle.
*/
if (waiting_types & DRM_FENCE_TYPE_EXE)
if (VIA_READ(0x41C) & 0x80000000)
signaled_flush_types |= DRM_FENCE_TYPE_EXE;
/*
* Check VRAM command queue empty and 2D + 3D engines idle.
*/
if (waiting_types & DRM_VIA_FENCE_TYPE_ACCEL) {
status = VIA_READ(VIA_REG_STATUS);
if ((status & VIA_VR_QUEUE_BUSY) &&
!(status & (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)))
signaled_flush_types |= DRM_VIA_FENCE_TYPE_ACCEL;
}
if (signaled_flush_types) {
waiting_types &= ~signaled_flush_types;
if (!waiting_types && dev_priv->have_idlelock) {
drm_idlelock_release(&dev->primary->master->lock);
dev_priv->have_idlelock = 0;
}
drm_fence_handler(dev, 0, dev_priv->emit_0_sequence,
signaled_flush_types, 0);
}
}
spin_unlock(&dev_priv->fence_lock);
return;
}
/**
* Emit a fence sequence.
*/
static int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags,
uint32_t * sequence, uint32_t * native_type)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
int ret = 0;
if (!dev_priv)
return -EINVAL;
switch(class) {
case 0: /* AGP command stream */
/*
* The sequence number isn't really used by the hardware yet.
*/
spin_lock(&dev_priv->fence_lock);
*sequence = ++dev_priv->emit_0_sequence;
spin_unlock(&dev_priv->fence_lock);
/*
* When drm_fence_handler() is called with flush type 0x01, and a
* sequence number, That means that the EXE flag is expired.
* Nothing else. No implicit flushing or other engines idle.
*/
*native_type = DRM_FENCE_TYPE_EXE;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/**
* No irq fence expirations implemented yet.
* Although both the HQV engines and PCI dmablit engines signal
* idle with an IRQ, we haven't implemented this yet.
* This means that the drm fence manager will always poll for engine idle,
* unless the caller wanting to wait for a fence object has indicated a lazy wait.
*/
static int via_fence_has_irq(struct drm_device * dev, uint32_t class,
uint32_t flags)
{
return 0;
}
struct drm_fence_driver via_fence_driver = {
.num_classes = 1,
.wrap_diff = (1 << 30),
.flush_diff = (1 << 20),
.sequence_mask = 0xffffffffU,
.has_irq = via_fence_has_irq,
.emit = via_fence_emit_sequence,
.poll = via_fence_poll,
.needed_flush = NULL,
.wait = NULL
};

View file

@ -1028,9 +1028,6 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#ifdef I915_HAVE_BUFFER
DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
#endif
#ifdef I915_HAVE_GEM
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),

View file

@ -74,11 +74,6 @@ enum pipe {
PIPE_B,
};
#ifdef I915_HAVE_BUFFER
#define I915_MAX_VALIDATE_BUFFERS 4096
struct drm_i915_validate_buffer;
#endif
#define WATCH_COHERENCY 0
#define WATCH_BUF 0
#define WATCH_EXEC 0
@ -177,11 +172,7 @@ struct drm_i915_private {
struct drm_mm vram;
#ifdef I915_HAVE_BUFFER
void *agp_iomap;
unsigned int max_validate_buffers;
struct mutex cmdbuf_mutex;
u32 stolen_base;
struct drm_i915_validate_buffer *val_bufs;
// void *agp_iomap;
#endif
DRM_SPINTYPE swaps_lock;
@ -197,7 +188,7 @@ struct drm_i915_private {
#if defined(I915_HAVE_BUFFER) && defined(DRI2)
/* DRI2 sarea */
struct drm_gem_object *sarea_object;
struct drm_bo_kmap_obj sarea_kmap;
// struct drm_bo_kmap_obj sarea_kmap;
#endif
/* Feature bits from the VBIOS */

View file

@ -513,13 +513,6 @@ void i915_driver_lastclose(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
#ifdef I915_HAVE_BUFFER
if (dev_priv->val_bufs) {
vfree(dev_priv->val_bufs);
dev_priv->val_bufs = NULL;
}
#endif
i915_gem_lastclose(dev);
if (dev_priv->agp_heap)

View file

@ -39,40 +39,6 @@ static struct pci_device_id pciidlist[] = {
};
#ifdef VIA_HAVE_FENCE
extern struct drm_fence_driver via_fence_driver;
#endif
#ifdef VIA_HAVE_BUFFER
/**
* If there's no thrashing. This is the preferred memory type order.
*/
static uint32_t via_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
/**
* If we have thrashing, most memory will be evicted to TT anyway, so we might as well
* just move the new buffer into TT from the start.
*/
static uint32_t via_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
static struct drm_bo_driver via_bo_driver = {
.mem_type_prio = via_mem_prios,
.mem_busy_prio = via_busy_prios,
.num_mem_type_prio = ARRAY_SIZE(via_mem_prios),
.num_mem_busy_prio = ARRAY_SIZE(via_busy_prios),
.create_ttm_backend_entry = via_create_ttm_backend_entry,
.fence_type = via_fence_types,
.invalidate_caches = via_invalidate_caches,
.init_mem_type = via_init_mem_type,
.evict_flags = via_evict_flags,
.move = NULL,
.ttm_cache_flush = NULL,
.command_stream_barrier = NULL
};
#endif
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features =
@ -117,12 +83,6 @@ static struct drm_driver driver = {
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
},
#ifdef VIA_HAVE_FENCE
.fence_driver = &via_fence_driver,
#endif
#ifdef VIA_HAVE_BUFFER
.bo_driver = &via_bo_driver,
#endif
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = VIA_DRM_DRIVER_DATE,

View file

@ -62,8 +62,6 @@
*/
#define VIA_HAVE_DMABLIT 1
#define VIA_HAVE_CORE_MM 1
#define VIA_HAVE_FENCE 1
#define VIA_HAVE_BUFFER 1
#endif
#define VIA_PCI_BUF_SIZE 60000
@ -127,12 +125,6 @@ typedef struct drm_via_private {
drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
#endif
uint32_t dma_diff;
#ifdef VIA_HAVE_FENCE
spinlock_t fence_lock;
uint32_t emit_0_sequence;
int have_idlelock;
struct timer_list fence_timer;
#endif
} drm_via_private_t;
enum via_family {
@ -196,16 +188,4 @@ extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq
extern void via_init_dmablit(struct drm_device *dev);
#endif
#ifdef VIA_HAVE_BUFFER
extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev);
extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
uint32_t *type);
extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
extern int via_init_mem_type(struct drm_device *dev, uint32_t type,
struct drm_mem_type_manager *man);
extern uint64_t via_evict_flags(struct drm_buffer_object *bo);
extern int via_move(struct drm_buffer_object *bo, int evict,
int no_wait, struct drm_bo_mem_reg *new_mem);
#endif
#endif

View file

@ -65,17 +65,7 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
#ifdef VIA_HAVE_DMABLIT
via_init_dmablit( dev );
#endif
#ifdef VIA_HAVE_FENCE
dev_priv->emit_0_sequence = 0;
dev_priv->have_idlelock = 0;
spin_lock_init(&dev_priv->fence_lock);
#endif /* VIA_HAVE_FENCE */
dev->dev_private = (void *)dev_priv;
#ifdef VIA_HAVE_BUFFER
ret = drm_bo_driver_init(dev);
if (ret)
DRM_ERROR("Could not initialize buffer object driver.\n");
#endif
return ret;
}