Simplify the AGP backend interface somewhat.

Fix buffer bound caching policy changing, Allow
on-the-fly changing of caching policy on bound buffers if the hardware
supports it.

Allow drivers to use driver-specific AGP memory types for TTM AGP pages.
Will make AGP drivers much easier to migrate.
This commit is contained in:
Thomas Hellstrom 2006-10-12 12:09:16 +02:00
parent 3070389367
commit 10150df02b
7 changed files with 96 additions and 100 deletions

View file

@ -664,7 +664,7 @@ typedef struct drm_bo_driver{
int cached_vram;
drm_local_map_t *vram_map;
drm_ttm_backend_t *(*create_ttm_backend_entry)
(struct drm_device *dev, int cached);
(struct drm_device *dev);
int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type);
int (*invalidate_caches)(struct drm_device *dev, uint32_t flags);
} drm_bo_driver_t;
@ -977,7 +977,9 @@ typedef struct drm_device {
typedef struct drm_agp_ttm_priv {
DRM_AGP_MEM *mem;
struct agp_bridge_data *bridge;
unsigned mem_type;
unsigned alloc_type;
unsigned cached_type;
unsigned uncached_type;
int populated;
} drm_agp_ttm_priv;
#endif
@ -1289,11 +1291,11 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
extern drm_ttm_backend_t *drm_agp_init_ttm_cached(struct drm_device *dev,
drm_ttm_backend_t *backend);
extern drm_ttm_backend_t *drm_agp_init_ttm_uncached(struct drm_device *dev,
drm_ttm_backend_t *backend);
extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
drm_ttm_backend_t *backend,
unsigned alloc_type,
unsigned cached_type,
unsigned uncached_type);
/* Stub support (drm_stub.h) */
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);

View file

@ -552,20 +552,16 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
return agp_unbind_memory(handle);
}
/*
* AGP ttm backend interface.
*/
static int drm_agp_needs_cache_adjust_true(drm_ttm_backend_t *backend) {
return TRUE;
static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) {
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 1 : 0);
}
static int drm_agp_needs_cache_adjust_false(drm_ttm_backend_t *backend) {
return FALSE;
}
#define AGP_MEM_USER (1 << 16)
#define AGP_MEM_UCACHED (2 << 16)
static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
struct page **pages) {
@ -576,9 +572,9 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
DRM_DEBUG("drm_agp_populate_ttm\n");
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
mem = drm_agp_allocate_memory(num_pages, agp_priv->mem_type);
mem = drm_agp_allocate_memory(num_pages, agp_priv->alloc_type);
#else
mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->mem_type);
mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type);
#endif
if (!mem)
return -1;
@ -592,14 +588,19 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
return 0;
}
static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, unsigned long offset) {
static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
unsigned long offset,
int cached)
{
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
DRM_AGP_MEM *mem = agp_priv->mem;
int ret;
DRM_DEBUG("drm_agp_bind_ttm\n");
DRM_MASK_VAL(backend->flags, DRM_BE_FLAG_BOUND_CACHED,
(cached) ? DRM_BE_FLAG_BOUND_CACHED : 0);
mem->is_flushed = FALSE;
mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type;
ret = drm_agp_bind_memory(mem, offset);
if (ret) {
DRM_ERROR("AGP Bind memory failed\n");
@ -645,14 +646,17 @@ static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
}
drm_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
}
if (backend->needs_free)
if (backend->flags & DRM_BE_FLAG_NEEDS_FREE)
drm_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
}
}
drm_ttm_backend_t *drm_agp_init_ttm_uncached(struct drm_device *dev,
drm_ttm_backend_t *backend) {
drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
drm_ttm_backend_t *backend,
unsigned alloc_type,
unsigned cached_type,
unsigned uncached_type) {
drm_ttm_backend_t *agp_be;
drm_agp_ttm_priv *agp_priv;
@ -671,59 +675,26 @@ drm_ttm_backend_t *drm_agp_init_ttm_uncached(struct drm_device *dev,
}
agp_priv->mem = NULL;
agp_priv->mem_type = AGP_MEM_USER;
agp_priv->alloc_type = alloc_type;
agp_priv->cached_type = cached_type;
agp_priv->uncached_type = uncached_type;
agp_priv->bridge = dev->agp->bridge;
agp_priv->populated = FALSE;
agp_be->aperture_base = dev->agp->agp_info.aper_base;
agp_be->private = (void *) agp_priv;
agp_be->needs_cache_adjust = drm_agp_needs_cache_adjust_true;
agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust;
agp_be->populate = drm_agp_populate;
agp_be->clear = drm_agp_clear_ttm;
agp_be->bind = drm_agp_bind_ttm;
agp_be->unbind = drm_agp_unbind_ttm;
agp_be->destroy = drm_agp_destroy_ttm;
agp_be->needs_free = (backend == NULL);
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE,
(backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0);
DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CBA,
(dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CBA : 0);
agp_be->drm_map_type = _DRM_AGP;
return agp_be;
}
EXPORT_SYMBOL(drm_agp_init_ttm_uncached);
drm_ttm_backend_t *drm_agp_init_ttm_cached(struct drm_device *dev,
drm_ttm_backend_t *backend) {
drm_ttm_backend_t *agp_be;
drm_agp_ttm_priv *agp_priv;
agp_be = (backend != NULL) ? backend:
drm_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
if (!agp_be)
return NULL;
agp_priv = drm_calloc(1, sizeof(agp_priv), DRM_MEM_MAPPINGS);
if (!agp_priv) {
drm_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
return NULL;
}
agp_priv->mem = NULL;
agp_priv->mem_type = AGP_MEM_UCACHED;
agp_priv->bridge = dev->agp->bridge;
agp_priv->populated = FALSE;
agp_be->aperture_base = dev->agp->agp_info.aper_base;
agp_be->private = (void *) agp_priv;
agp_be->needs_cache_adjust = drm_agp_needs_cache_adjust_false;
agp_be->populate = drm_agp_populate;
agp_be->clear = drm_agp_clear_ttm;
agp_be->bind = drm_agp_bind_ttm;
agp_be->unbind = drm_agp_unbind_ttm;
agp_be->destroy = drm_agp_destroy_ttm;
agp_be->needs_free = (backend == NULL);
agp_be->drm_map_type = _DRM_AGP;
return agp_be;
}
EXPORT_SYMBOL(drm_agp_init_ttm_cached);
EXPORT_SYMBOL(drm_agp_init_ttm);
#endif /* __OS_HAS_AGP */

View file

@ -553,7 +553,8 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->tt->start);
mutex_lock(&dev->struct_mutex);
ret = drm_bind_ttm(bo->ttm, bo->tt->start);
ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
bo->tt->start);
if (ret) {
drm_mm_put_block(&bm->tt_manager, bo->tt);
bo->tt = NULL;
@ -565,7 +566,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
}
be = bo->ttm->be;
if (be->needs_cache_adjust(be))
if (be->needs_ub_cache_adjust(be))
bo->flags &= ~DRM_BO_FLAG_CACHED;
bo->flags &= ~DRM_BO_MASK_MEM;
bo->flags |= DRM_BO_FLAG_MEM_TT;
@ -1089,16 +1090,35 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
DRM_ERROR("Driver did not support given buffer permissions\n");
return ret;
}
/*
* Move out if we need to change caching policy.
* FIXME: Failing is strictly not needed for NO_MOVE buffers.
* We just have to implement NO_MOVE buffers.
*/
if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
!(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) {
if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
DRM_ERROR("Cannot change caching policy of "
"pinned buffer.\n");
return -EINVAL;
}
ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait);
if (ret) {
if (ret != -EAGAIN)
DRM_ERROR("Failed moving buffer.\n");
return ret;
}
}
DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags);
flag_diff = (new_flags ^ bo->flags);
/*
* Check whether we need to move buffer.
*/
if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
if (bo->type == drm_bo_type_user) {
DRM_ERROR("User buffers are not implemented yet.\n");
return -EINVAL;
}
ret = drm_bo_move_buffer(bo, new_flags, no_wait);
if (ret) {
if (ret != -EAGAIN)
@ -1251,7 +1271,6 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
case drm_bo_type_dc:
mutex_lock(&dev->struct_mutex);
ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE,
bo->mask & DRM_BO_FLAG_BIND_CACHED,
ttm_flags, &to);
mutex_unlock(&dev->struct_mutex);
break;
@ -1674,7 +1693,7 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
if (arg.req.tt_p_size) {
ret = drm_mm_init(&bm->tt_manager,
arg.req.tt_p_offset,
3000 /* arg.req.tt_p_size */);
arg.req.tt_p_size);
bm->has_tt = 1;
bm->use_tt = 1;

View file

@ -230,8 +230,7 @@ static int drm_ttm_populate(drm_ttm_t *ttm)
* Initialize a ttm.
*/
static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size,
int cached)
static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
{
drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
drm_ttm_t *ttm;
@ -263,7 +262,7 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size,
return NULL;
}
memset(ttm->pages, 0, ttm->num_pages * sizeof(*ttm->pages));
ttm->be = bo_driver->create_ttm_backend_entry(dev, cached);
ttm->be = bo_driver->create_ttm_backend_entry(dev);
if (!ttm->be) {
drm_destroy_ttm(ttm);
DRM_ERROR("Failed creating ttm backend entry\n");
@ -284,7 +283,7 @@ int drm_evict_ttm(drm_ttm_t * ttm)
switch (ttm->state) {
case ttm_bound:
if (be->needs_cache_adjust(be)) {
if (be->needs_ub_cache_adjust(be)) {
ret = unmap_vma_pages(ttm);
if (ret) {
return ret;
@ -304,7 +303,7 @@ void drm_fixup_ttm_caching(drm_ttm_t * ttm)
if (ttm->state == ttm_evicted) {
drm_ttm_backend_t *be = ttm->be;
if (be->needs_cache_adjust(be)) {
if (be->needs_ub_cache_adjust(be)) {
drm_set_caching(ttm, 0);
}
ttm->state = ttm_unbound;
@ -326,7 +325,7 @@ int drm_unbind_ttm(drm_ttm_t * ttm)
return 0;
}
int drm_bind_ttm(drm_ttm_t * ttm,
int drm_bind_ttm(drm_ttm_t * ttm, int cached,
unsigned long aper_offset)
{
@ -343,7 +342,7 @@ int drm_bind_ttm(drm_ttm_t * ttm,
ret = drm_ttm_populate(ttm);
if (ret)
return ret;
if (ttm->state == ttm_unbound && be->needs_cache_adjust(be)) {
if (ttm->state == ttm_unbound && !cached) {
ret = unmap_vma_pages(ttm);
if (ret)
return ret;
@ -351,16 +350,16 @@ int drm_bind_ttm(drm_ttm_t * ttm,
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
}
#ifdef DRM_ODD_MM_COMPAT
else if (ttm->state == ttm_evicted && be->needs_cache_adjust(be)) {
else if (ttm->state == ttm_evicted && !cached) {
ret = drm_ttm_lock_mm(ttm);
if (ret)
return ret;
}
#endif
if ((ret = be->bind(be, aper_offset))) {
if ((ret = be->bind(be, aper_offset, cached))) {
ttm->state = ttm_evicted;
#ifdef DRM_ODD_MM_COMPAT
if (be->needs_cache_adjust(be))
if (be->needs_ub_cache_adjust(be))
drm_ttm_unlock_mm(ttm);
#endif
DRM_ERROR("Couldn't bind backend.\n");
@ -372,7 +371,7 @@ int drm_bind_ttm(drm_ttm_t * ttm,
ttm->state = ttm_bound;
#ifdef DRM_ODD_MM_COMPAT
if (be->needs_cache_adjust(be)) {
if (be->needs_ub_cache_adjust(be)) {
ret = drm_ttm_remap_bound(ttm);
if (ret)
return ret;
@ -437,7 +436,7 @@ void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
*/
int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
uint32_t flags, int cached,
uint32_t flags,
drm_ttm_object_t ** ttm_object)
{
drm_ttm_object_t *object;
@ -458,7 +457,7 @@ int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
}
map = list->map;
ttm = drm_init_ttm(dev, size, cached);
ttm = drm_init_ttm(dev, size);
if (!ttm) {
DRM_ERROR("Could not create ttm\n");
drm_ttm_object_remove(dev, object);

View file

@ -44,16 +44,21 @@
* Most device drivers will let this point to the standard AGP implementation.
*/
#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
#define DRM_BE_FLAG_CBA 0x00000004
typedef struct drm_ttm_backend {
unsigned long aperture_base;
void *private;
int needs_free;
uint32_t flags;
uint32_t drm_map_type;
int (*needs_cache_adjust) (struct drm_ttm_backend * backend);
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
int (*populate) (struct drm_ttm_backend * backend,
unsigned long num_pages, struct page ** pages);
void (*clear) (struct drm_ttm_backend * backend);
int (*bind) (struct drm_ttm_backend * backend, unsigned long offset);
int (*bind) (struct drm_ttm_backend * backend,
unsigned long offset, int cached);
int (*unbind) (struct drm_ttm_backend * backend);
void (*destroy) (struct drm_ttm_backend * backend);
} drm_ttm_backend_t;
@ -88,7 +93,7 @@ typedef struct drm_ttm_object {
} drm_ttm_object_t;
extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
uint32_t flags, int cached,
uint32_t flags,
drm_ttm_object_t ** ttm_object);
extern void drm_ttm_object_deref_locked(struct drm_device *dev,
drm_ttm_object_t * to);
@ -97,7 +102,7 @@ extern void drm_ttm_object_deref_unlocked(struct drm_device *dev,
extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
uint32_t handle,
int check_owner);
extern int drm_bind_ttm(drm_ttm_t * ttm,
extern int drm_bind_ttm(drm_ttm_t * ttm, int cached,
unsigned long aper_offset);
extern int drm_unbind_ttm(drm_ttm_t * ttm);

View file

@ -33,12 +33,13 @@
#include "i915_drm.h"
#include "i915_drv.h"
drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev, int cached)
#define INTEL_AGP_MEM_USER (1 << 16)
#define INTEL_AGP_MEM_UCACHED (2 << 16)
drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev)
{
if (cached)
return drm_agp_init_ttm_cached(dev, NULL);
else
return drm_agp_init_ttm_uncached(dev, NULL);
return drm_agp_init_ttm(dev, NULL, INTEL_AGP_MEM_USER, INTEL_AGP_MEM_UCACHED,
INTEL_AGP_MEM_USER);
}
int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type)
@ -53,9 +54,9 @@ int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type)
int i915_invalidate_caches(drm_device_t *dev, uint32_t flags)
{
/*
* FIXME: Only emit once per batchbuffer submission.
*/
/*
* FIXME: Only emit once per batchbuffer submission.
*/
uint32_t flush_cmd = MI_NO_WRITE_FLUSH;

View file

@ -183,8 +183,7 @@ extern void i915_poke_flush(drm_device_t *dev);
#ifdef I915_HAVE_BUFFER
/* i915_buffer.c */
extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev,
int cached);
extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev);
extern int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type);
extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
#endif