mirror of
https://gitlab.freedesktop.org/mesa/drm.git
synced 2025-12-24 17:10:13 +01:00
Merge branch 'master' into modesetting-gem
Conflicts: libdrm/Makefile.am libdrm/intel/intel_bufmgr.h libdrm/intel/intel_bufmgr_fake.c libdrm/intel/intel_bufmgr_gem.c shared-core/drm.h shared-core/i915_dma.c shared-core/i915_irq.c shared-core/radeon_cp.c shared-core/radeon_drv.h
This commit is contained in:
commit
965b4d662a
46 changed files with 10483 additions and 8745 deletions
|
|
@ -61,7 +61,8 @@ drm_ati_alloc_pcigart_table(struct drm_device *dev,
|
|||
struct drm_dma_handle *dmah;
|
||||
int flags, ret;
|
||||
|
||||
dmah = malloc(sizeof(struct drm_dma_handle), M_DRM, M_ZERO | M_NOWAIT);
|
||||
dmah = malloc(sizeof(struct drm_dma_handle), DRM_MEM_DMA,
|
||||
M_ZERO | M_NOWAIT);
|
||||
if (dmah == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
|
|
@ -74,7 +75,7 @@ drm_ati_alloc_pcigart_table(struct drm_device *dev,
|
|||
BUS_DMA_ALLOCNOW, NULL, NULL, /* flags, lockfunc, lockfuncargs */
|
||||
&dmah->tag);
|
||||
if (ret != 0) {
|
||||
free(dmah, M_DRM);
|
||||
free(dmah, DRM_MEM_DMA);
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -85,7 +86,7 @@ drm_ati_alloc_pcigart_table(struct drm_device *dev,
|
|||
ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr, flags, &dmah->map);
|
||||
if (ret != 0) {
|
||||
bus_dma_tag_destroy(dmah->tag);
|
||||
free(dmah, M_DRM);
|
||||
free(dmah, DRM_MEM_DMA);
|
||||
return ENOMEM;
|
||||
}
|
||||
DRM_LOCK();
|
||||
|
|
@ -95,7 +96,7 @@ drm_ati_alloc_pcigart_table(struct drm_device *dev,
|
|||
if (ret != 0) {
|
||||
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
|
||||
bus_dma_tag_destroy(dmah->tag);
|
||||
free(dmah, M_DRM);
|
||||
free(dmah, DRM_MEM_DMA);
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -112,7 +113,7 @@ drm_ati_free_pcigart_table(struct drm_device *dev,
|
|||
|
||||
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
|
||||
bus_dma_tag_destroy(dmah->tag);
|
||||
free(dmah, M_DRM);
|
||||
free(dmah, DRM_MEM_DMA);
|
||||
dev->sg->dmah = NULL;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -126,27 +126,24 @@ struct drm_file;
|
|||
#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
|
||||
#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
|
||||
|
||||
#define DRM_MEM_DMA 0
|
||||
#define DRM_MEM_SAREA 1
|
||||
#define DRM_MEM_DRIVER 2
|
||||
#define DRM_MEM_MAGIC 3
|
||||
#define DRM_MEM_IOCTLS 4
|
||||
#define DRM_MEM_MAPS 5
|
||||
#define DRM_MEM_BUFS 6
|
||||
#define DRM_MEM_SEGS 7
|
||||
#define DRM_MEM_PAGES 8
|
||||
#define DRM_MEM_FILES 9
|
||||
#define DRM_MEM_QUEUES 10
|
||||
#define DRM_MEM_CMDS 11
|
||||
#define DRM_MEM_MAPPINGS 12
|
||||
#define DRM_MEM_BUFLISTS 13
|
||||
#define DRM_MEM_AGPLISTS 14
|
||||
#define DRM_MEM_TOTALAGP 15
|
||||
#define DRM_MEM_BOUNDAGP 16
|
||||
#define DRM_MEM_CTXBITMAP 17
|
||||
#define DRM_MEM_STUB 18
|
||||
#define DRM_MEM_SGLISTS 19
|
||||
#define DRM_MEM_DRAWABLE 20
|
||||
MALLOC_DECLARE(DRM_MEM_DMA);
|
||||
MALLOC_DECLARE(DRM_MEM_SAREA);
|
||||
MALLOC_DECLARE(DRM_MEM_DRIVER);
|
||||
MALLOC_DECLARE(DRM_MEM_MAGIC);
|
||||
MALLOC_DECLARE(DRM_MEM_IOCTLS);
|
||||
MALLOC_DECLARE(DRM_MEM_MAPS);
|
||||
MALLOC_DECLARE(DRM_MEM_BUFS);
|
||||
MALLOC_DECLARE(DRM_MEM_SEGS);
|
||||
MALLOC_DECLARE(DRM_MEM_PAGES);
|
||||
MALLOC_DECLARE(DRM_MEM_FILES);
|
||||
MALLOC_DECLARE(DRM_MEM_QUEUES);
|
||||
MALLOC_DECLARE(DRM_MEM_CMDS);
|
||||
MALLOC_DECLARE(DRM_MEM_MAPPINGS);
|
||||
MALLOC_DECLARE(DRM_MEM_BUFLISTS);
|
||||
MALLOC_DECLARE(DRM_MEM_AGPLISTS);
|
||||
MALLOC_DECLARE(DRM_MEM_CTXBITMAP);
|
||||
MALLOC_DECLARE(DRM_MEM_SGLISTS);
|
||||
MALLOC_DECLARE(DRM_MEM_DRAWABLE);
|
||||
|
||||
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
|
||||
|
||||
|
|
@ -157,8 +154,6 @@ struct drm_file;
|
|||
|
||||
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
|
||||
|
||||
MALLOC_DECLARE(M_DRM);
|
||||
|
||||
#define __OS_HAS_AGP 1
|
||||
|
||||
#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
|
||||
|
|
@ -745,11 +740,6 @@ extern int drm_open_helper(struct cdev *kdev, int flags, int fmt,
|
|||
/* Memory management support (drm_memory.c) */
|
||||
void drm_mem_init(void);
|
||||
void drm_mem_uninit(void);
|
||||
void *drm_alloc(size_t size, int area);
|
||||
void *drm_calloc(size_t nmemb, size_t size, int area);
|
||||
void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
|
||||
int area);
|
||||
void drm_free(void *pt, size_t size, int area);
|
||||
void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map);
|
||||
void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map);
|
||||
void drm_ioremapfree(drm_local_map_t *map);
|
||||
|
|
@ -963,6 +953,32 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
|
|||
size_t align, dma_addr_t maxaddr);
|
||||
void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
|
||||
|
||||
/* Inline replacements for drm_alloc and friends */
|
||||
static __inline__ void *
|
||||
drm_alloc(size_t size, struct malloc_type *area)
|
||||
{
|
||||
return malloc(size, area, M_NOWAIT);
|
||||
}
|
||||
|
||||
static __inline__ void *
|
||||
drm_calloc(size_t nmemb, size_t size, struct malloc_type *area)
|
||||
{
|
||||
return malloc(size * nmemb, area, M_NOWAIT | M_ZERO);
|
||||
}
|
||||
|
||||
static __inline__ void *
|
||||
drm_realloc(void *oldpt, size_t oldsize, size_t size,
|
||||
struct malloc_type *area)
|
||||
{
|
||||
return reallocf(oldpt, size, area, M_NOWAIT);
|
||||
}
|
||||
|
||||
static __inline__ void
|
||||
drm_free(void *pt, size_t size, struct malloc_type *area)
|
||||
{
|
||||
free(pt, area);
|
||||
}
|
||||
|
||||
/* Inline replacements for DRM_IOREMAP macros */
|
||||
static __inline__ void
|
||||
drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
|
||||
|
|
|
|||
|
|
@ -209,7 +209,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
|
|||
if (!dev->agp || !dev->agp->acquired)
|
||||
return EINVAL;
|
||||
|
||||
entry = malloc(sizeof(*entry), M_DRM, M_NOWAIT | M_ZERO);
|
||||
entry = malloc(sizeof(*entry), DRM_MEM_AGPLISTS, M_NOWAIT | M_ZERO);
|
||||
if (entry == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
|
|
@ -220,7 +220,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
|
|||
handle = drm_agp_allocate_memory(pages, type);
|
||||
DRM_LOCK();
|
||||
if (handle == NULL) {
|
||||
free(entry, M_DRM);
|
||||
free(entry, DRM_MEM_AGPLISTS);
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -371,7 +371,7 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
|
|||
drm_agp_free_memory(entry->handle);
|
||||
DRM_LOCK();
|
||||
|
||||
free(entry, M_DRM);
|
||||
free(entry, DRM_MEM_AGPLISTS);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
@ -405,7 +405,8 @@ drm_agp_head_t *drm_agp_init(void)
|
|||
DRM_DEBUG("agp_available = %d\n", agp_available);
|
||||
|
||||
if (agp_available) {
|
||||
head = malloc(sizeof(*head), M_DRM, M_NOWAIT | M_ZERO);
|
||||
head = malloc(sizeof(*head), DRM_MEM_AGPLISTS,
|
||||
M_NOWAIT | M_ZERO);
|
||||
if (head == NULL)
|
||||
return NULL;
|
||||
head->agpdev = agpdev;
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ static int drm_add_magic(struct drm_device *dev, struct drm_file *priv,
|
|||
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
|
||||
|
||||
hash = drm_hash_magic(magic);
|
||||
entry = malloc(sizeof(*entry), M_DRM, M_ZERO | M_NOWAIT);
|
||||
entry = malloc(sizeof(*entry), DRM_MEM_MAGIC, M_ZERO | M_NOWAIT);
|
||||
if (!entry)
|
||||
return ENOMEM;
|
||||
entry->magic = magic;
|
||||
|
|
@ -118,7 +118,7 @@ static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic)
|
|||
if (prev) {
|
||||
prev->next = pt->next;
|
||||
}
|
||||
free(pt, M_DRM);
|
||||
free(pt, DRM_MEM_MAGIC);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -135,7 +135,7 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
|
|||
/* Allocate a new map structure, fill it in, and do any type-specific
|
||||
* initialization necessary.
|
||||
*/
|
||||
map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
|
||||
map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
|
||||
if (!map) {
|
||||
DRM_LOCK();
|
||||
return ENOMEM;
|
||||
|
|
@ -157,11 +157,11 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
|
|||
map->mtrr = 1;
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
map->handle = malloc(map->size, M_DRM, M_NOWAIT);
|
||||
map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
|
||||
DRM_DEBUG("%lu %d %p\n",
|
||||
map->size, drm_order(map->size), map->handle);
|
||||
if (!map->handle) {
|
||||
free(map, M_DRM);
|
||||
free(map, DRM_MEM_MAPS);
|
||||
DRM_LOCK();
|
||||
return ENOMEM;
|
||||
}
|
||||
|
|
@ -171,8 +171,8 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
|
|||
DRM_LOCK();
|
||||
if (dev->lock.hw_lock != NULL) {
|
||||
DRM_UNLOCK();
|
||||
free(map->handle, M_DRM);
|
||||
free(map, M_DRM);
|
||||
free(map->handle, DRM_MEM_MAPS);
|
||||
free(map, DRM_MEM_MAPS);
|
||||
return EBUSY;
|
||||
}
|
||||
dev->lock.hw_lock = map->handle; /* Pointer to lock */
|
||||
|
|
@ -202,14 +202,14 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
|
|||
}
|
||||
}
|
||||
if (!valid) {
|
||||
free(map, M_DRM);
|
||||
free(map, DRM_MEM_MAPS);
|
||||
DRM_LOCK();
|
||||
return EACCES;
|
||||
}*/
|
||||
break;
|
||||
case _DRM_SCATTER_GATHER:
|
||||
if (!dev->sg) {
|
||||
free(map, M_DRM);
|
||||
free(map, DRM_MEM_MAPS);
|
||||
DRM_LOCK();
|
||||
return EINVAL;
|
||||
}
|
||||
|
|
@ -227,7 +227,7 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
|
|||
align = PAGE_SIZE;
|
||||
map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
|
||||
if (map->dmah == NULL) {
|
||||
free(map, M_DRM);
|
||||
free(map, DRM_MEM_MAPS);
|
||||
DRM_LOCK();
|
||||
return ENOMEM;
|
||||
}
|
||||
|
|
@ -236,7 +236,7 @@ int drm_addmap(struct drm_device * dev, unsigned long offset,
|
|||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad map type %d\n", map->type);
|
||||
free(map, M_DRM);
|
||||
free(map, DRM_MEM_MAPS);
|
||||
DRM_LOCK();
|
||||
return EINVAL;
|
||||
}
|
||||
|
|
@ -310,7 +310,7 @@ void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
|
|||
}
|
||||
break;
|
||||
case _DRM_SHM:
|
||||
free(map->handle, M_DRM);
|
||||
free(map->handle, DRM_MEM_MAPS);
|
||||
break;
|
||||
case _DRM_AGP:
|
||||
case _DRM_SCATTER_GATHER:
|
||||
|
|
@ -328,7 +328,7 @@ void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
|
|||
map->bsr);
|
||||
}
|
||||
|
||||
free(map, M_DRM);
|
||||
free(map, DRM_MEM_MAPS);
|
||||
}
|
||||
|
||||
/* Remove a map private from list and deallocate resources if the mapping
|
||||
|
|
@ -371,16 +371,16 @@ static void drm_cleanup_buf_error(struct drm_device *dev,
|
|||
for (i = 0; i < entry->seg_count; i++) {
|
||||
drm_pci_free(dev, entry->seglist[i]);
|
||||
}
|
||||
free(entry->seglist, M_DRM);
|
||||
free(entry->seglist, DRM_MEM_SEGS);
|
||||
|
||||
entry->seg_count = 0;
|
||||
}
|
||||
|
||||
if (entry->buf_count) {
|
||||
for (i = 0; i < entry->buf_count; i++) {
|
||||
free(entry->buflist[i].dev_private, M_DRM);
|
||||
free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
|
||||
}
|
||||
free(entry->buflist, M_DRM);
|
||||
free(entry->buflist, DRM_MEM_BUFS);
|
||||
|
||||
entry->buf_count = 0;
|
||||
}
|
||||
|
|
@ -447,7 +447,7 @@ static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *reque
|
|||
|
||||
entry = &dma->bufs[order];
|
||||
|
||||
entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
|
||||
entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
|
||||
M_NOWAIT | M_ZERO);
|
||||
if (!entry->buflist) {
|
||||
return ENOMEM;
|
||||
|
|
@ -473,7 +473,7 @@ static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *reque
|
|||
buf->file_priv = NULL;
|
||||
|
||||
buf->dev_priv_size = dev->driver->buf_priv_size;
|
||||
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
|
||||
buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
|
||||
M_NOWAIT | M_ZERO);
|
||||
if (buf->dev_private == NULL) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
|
|
@ -490,8 +490,8 @@ static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *reque
|
|||
DRM_DEBUG("byte_count: %d\n", byte_count);
|
||||
|
||||
temp_buflist = realloc(dma->buflist,
|
||||
(dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
|
||||
M_NOWAIT);
|
||||
(dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
|
||||
DRM_MEM_BUFS, M_NOWAIT);
|
||||
if (temp_buflist == NULL) {
|
||||
/* Free the entry because it isn't valid */
|
||||
drm_cleanup_buf_error(dev, entry);
|
||||
|
|
@ -549,22 +549,22 @@ static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *reque
|
|||
|
||||
entry = &dma->bufs[order];
|
||||
|
||||
entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
|
||||
entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
|
||||
M_NOWAIT | M_ZERO);
|
||||
entry->seglist = malloc(count * sizeof(*entry->seglist), M_DRM,
|
||||
entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
|
||||
M_NOWAIT | M_ZERO);
|
||||
|
||||
/* Keep the original pagelist until we know all the allocations
|
||||
* have succeeded
|
||||
*/
|
||||
temp_pagelist = malloc((dma->page_count + (count << page_order)) *
|
||||
sizeof(*dma->pagelist), M_DRM, M_NOWAIT);
|
||||
sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
|
||||
|
||||
if (entry->buflist == NULL || entry->seglist == NULL ||
|
||||
temp_pagelist == NULL) {
|
||||
free(temp_pagelist, M_DRM);
|
||||
free(entry->seglist, M_DRM);
|
||||
free(entry->buflist, M_DRM);
|
||||
free(temp_pagelist, DRM_MEM_PAGES);
|
||||
free(entry->seglist, DRM_MEM_SEGS);
|
||||
free(entry->buflist, DRM_MEM_BUFS);
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -589,7 +589,7 @@ static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *reque
|
|||
entry->buf_count = count;
|
||||
entry->seg_count = count;
|
||||
drm_cleanup_buf_error(dev, entry);
|
||||
free(temp_pagelist, M_DRM);
|
||||
free(temp_pagelist, DRM_MEM_PAGES);
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -617,14 +617,14 @@ static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *reque
|
|||
buf->file_priv = NULL;
|
||||
|
||||
buf->dev_priv_size = dev->driver->buf_priv_size;
|
||||
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
|
||||
M_NOWAIT | M_ZERO);
|
||||
buf->dev_private = malloc(buf->dev_priv_size,
|
||||
DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
|
||||
if (buf->dev_private == NULL) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
entry->buf_count = count;
|
||||
entry->seg_count = count;
|
||||
drm_cleanup_buf_error(dev, entry);
|
||||
free(temp_pagelist, M_DRM);
|
||||
free(temp_pagelist, DRM_MEM_PAGES);
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -635,12 +635,12 @@ static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *reque
|
|||
}
|
||||
|
||||
temp_buflist = realloc(dma->buflist,
|
||||
(dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
|
||||
M_NOWAIT);
|
||||
(dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
|
||||
DRM_MEM_BUFS, M_NOWAIT);
|
||||
if (temp_buflist == NULL) {
|
||||
/* Free the entry because it isn't valid */
|
||||
drm_cleanup_buf_error(dev, entry);
|
||||
free(temp_pagelist, M_DRM);
|
||||
free(temp_pagelist, DRM_MEM_PAGES);
|
||||
return ENOMEM;
|
||||
}
|
||||
dma->buflist = temp_buflist;
|
||||
|
|
@ -652,7 +652,7 @@ static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *reque
|
|||
/* No allocations failed, so now we can replace the orginal pagelist
|
||||
* with the new one.
|
||||
*/
|
||||
free(dma->pagelist, M_DRM);
|
||||
free(dma->pagelist, DRM_MEM_PAGES);
|
||||
dma->pagelist = temp_pagelist;
|
||||
|
||||
dma->buf_count += entry->buf_count;
|
||||
|
|
@ -706,7 +706,7 @@ static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *reques
|
|||
|
||||
entry = &dma->bufs[order];
|
||||
|
||||
entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
|
||||
entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
|
||||
M_NOWAIT | M_ZERO);
|
||||
if (entry->buflist == NULL)
|
||||
return ENOMEM;
|
||||
|
|
@ -731,7 +731,7 @@ static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *reques
|
|||
buf->file_priv = NULL;
|
||||
|
||||
buf->dev_priv_size = dev->driver->buf_priv_size;
|
||||
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
|
||||
buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
|
||||
M_NOWAIT | M_ZERO);
|
||||
if (buf->dev_private == NULL) {
|
||||
/* Set count correctly so we free the proper amount. */
|
||||
|
|
@ -751,8 +751,8 @@ static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *reques
|
|||
DRM_DEBUG("byte_count: %d\n", byte_count);
|
||||
|
||||
temp_buflist = realloc(dma->buflist,
|
||||
(dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
|
||||
M_NOWAIT);
|
||||
(dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
|
||||
DRM_MEM_BUFS, M_NOWAIT);
|
||||
if (temp_buflist == NULL) {
|
||||
/* Free the entry because it isn't valid */
|
||||
drm_cleanup_buf_error(dev, entry);
|
||||
|
|
@ -1099,11 +1099,12 @@ int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|||
int drm_order(unsigned long size)
|
||||
{
|
||||
int order;
|
||||
unsigned long tmp;
|
||||
|
||||
for (order = 0, tmp = size; tmp >>= 1; ++order);
|
||||
if (size == 0)
|
||||
return 0;
|
||||
|
||||
if (size & ~(1 << order))
|
||||
order = ffsl(size) - 1;
|
||||
if (size & ~(1ul << order))
|
||||
++order;
|
||||
|
||||
return order;
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ int drm_ctxbitmap_next(struct drm_device *dev)
|
|||
|
||||
ctx_sareas = realloc(dev->context_sareas,
|
||||
dev->max_context * sizeof(*dev->context_sareas),
|
||||
M_DRM, M_NOWAIT);
|
||||
DRM_MEM_SAREA, M_NOWAIT);
|
||||
if (ctx_sareas == NULL) {
|
||||
clear_bit(bit, dev->ctx_bitmap);
|
||||
DRM_UNLOCK();
|
||||
|
|
@ -88,7 +88,8 @@ int drm_ctxbitmap_next(struct drm_device *dev)
|
|||
} else {
|
||||
/* max_context == 1 at this point */
|
||||
dev->context_sareas = malloc(dev->max_context *
|
||||
sizeof(*dev->context_sareas), M_DRM, M_NOWAIT);
|
||||
sizeof(*dev->context_sareas), DRM_MEM_SAREA,
|
||||
M_NOWAIT);
|
||||
if (dev->context_sareas == NULL) {
|
||||
clear_bit(bit, dev->ctx_bitmap);
|
||||
DRM_UNLOCK();
|
||||
|
|
@ -107,7 +108,8 @@ int drm_ctxbitmap_init(struct drm_device *dev)
|
|||
int temp;
|
||||
|
||||
DRM_LOCK();
|
||||
dev->ctx_bitmap = malloc(PAGE_SIZE, M_DRM, M_NOWAIT | M_ZERO);
|
||||
dev->ctx_bitmap = malloc(PAGE_SIZE, DRM_MEM_CTXBITMAP,
|
||||
M_NOWAIT | M_ZERO);
|
||||
if (dev->ctx_bitmap == NULL) {
|
||||
DRM_UNLOCK();
|
||||
return ENOMEM;
|
||||
|
|
@ -128,8 +130,8 @@ void drm_ctxbitmap_cleanup(struct drm_device *dev)
|
|||
{
|
||||
DRM_LOCK();
|
||||
if (dev->context_sareas != NULL)
|
||||
free(dev->context_sareas, M_DRM);
|
||||
free(dev->ctx_bitmap, M_DRM);
|
||||
free(dev->context_sareas, DRM_MEM_SAREA);
|
||||
free(dev->ctx_bitmap, DRM_MEM_CTXBITMAP);
|
||||
DRM_UNLOCK();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@
|
|||
int drm_dma_setup(struct drm_device *dev)
|
||||
{
|
||||
|
||||
dev->dma = malloc(sizeof(*dev->dma), M_DRM, M_NOWAIT | M_ZERO);
|
||||
dev->dma = malloc(sizeof(*dev->dma), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
|
||||
if (dev->dma == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
|
|
@ -67,21 +67,21 @@ void drm_dma_takedown(struct drm_device *dev)
|
|||
for (j = 0; j < dma->bufs[i].seg_count; j++) {
|
||||
drm_pci_free(dev, dma->bufs[i].seglist[j]);
|
||||
}
|
||||
free(dma->bufs[i].seglist, M_DRM);
|
||||
free(dma->bufs[i].seglist, DRM_MEM_SEGS);
|
||||
}
|
||||
|
||||
if (dma->bufs[i].buf_count) {
|
||||
for (j = 0; j < dma->bufs[i].buf_count; j++) {
|
||||
free(dma->bufs[i].buflist[j].dev_private,
|
||||
M_DRM);
|
||||
DRM_MEM_BUFS);
|
||||
}
|
||||
free(dma->bufs[i].buflist, M_DRM);
|
||||
free(dma->bufs[i].buflist, DRM_MEM_BUFS);
|
||||
}
|
||||
}
|
||||
|
||||
free(dma->buflist, M_DRM);
|
||||
free(dma->pagelist, M_DRM);
|
||||
free(dev->dma, M_DRM);
|
||||
free(dma->buflist, DRM_MEM_BUFS);
|
||||
free(dma->pagelist, DRM_MEM_PAGES);
|
||||
free(dev->dma, DRM_MEM_DRIVER);
|
||||
dev->dma = NULL;
|
||||
DRM_SPINUNINIT(&dev->dma_lock);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -71,8 +71,8 @@ int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|||
struct drm_draw *draw = data;
|
||||
struct bsd_drm_drawable_info *info;
|
||||
|
||||
info = drm_calloc(1, sizeof(struct bsd_drm_drawable_info),
|
||||
DRM_MEM_DRAWABLE);
|
||||
info = malloc(sizeof(struct bsd_drm_drawable_info), DRM_MEM_DRAWABLE,
|
||||
M_NOWAIT | M_ZERO);
|
||||
if (info == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
|
|
@ -99,8 +99,8 @@ int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|||
(struct bsd_drm_drawable_info *)info);
|
||||
DRM_SPINUNLOCK(&dev->drw_lock);
|
||||
free_unr(dev->drw_unrhdr, draw->handle);
|
||||
drm_free(info, sizeof(struct bsd_drm_drawable_info),
|
||||
DRM_MEM_DRAWABLE);
|
||||
free(info->rects, DRM_MEM_DRAWABLE);
|
||||
free(info, DRM_MEM_DRAWABLE);
|
||||
return 0;
|
||||
} else {
|
||||
DRM_SPINUNLOCK(&dev->drw_lock);
|
||||
|
|
@ -123,9 +123,7 @@ int drm_update_draw(struct drm_device *dev, void *data,
|
|||
case DRM_DRAWABLE_CLIPRECTS:
|
||||
DRM_SPINLOCK(&dev->drw_lock);
|
||||
if (update->num != info->num_rects) {
|
||||
drm_free(info->rects,
|
||||
sizeof(*info->rects) * info->num_rects,
|
||||
DRM_MEM_DRAWABLE);
|
||||
free(info->rects, DRM_MEM_DRAWABLE);
|
||||
info->rects = NULL;
|
||||
info->num_rects = 0;
|
||||
}
|
||||
|
|
@ -134,8 +132,8 @@ int drm_update_draw(struct drm_device *dev, void *data,
|
|||
return 0;
|
||||
}
|
||||
if (info->rects == NULL) {
|
||||
info->rects = drm_alloc(sizeof(*info->rects) *
|
||||
update->num, DRM_MEM_DRAWABLE);
|
||||
info->rects = malloc(sizeof(*info->rects) *
|
||||
update->num, DRM_MEM_DRAWABLE, M_NOWAIT);
|
||||
if (info->rects == NULL) {
|
||||
DRM_SPINUNLOCK(&dev->drw_lock);
|
||||
return ENOMEM;
|
||||
|
|
@ -164,8 +162,8 @@ void drm_drawable_free_all(struct drm_device *dev)
|
|||
(struct bsd_drm_drawable_info *)info);
|
||||
DRM_SPINUNLOCK(&dev->drw_lock);
|
||||
free_unr(dev->drw_unrhdr, info->handle);
|
||||
drm_free(info, sizeof(struct bsd_drm_drawable_info),
|
||||
DRM_MEM_DRAWABLE);
|
||||
free(info->info.rects, DRM_MEM_DRAWABLE);
|
||||
free(info, DRM_MEM_DRAWABLE);
|
||||
DRM_SPINLOCK(&dev->drw_lock);
|
||||
}
|
||||
DRM_SPINUNLOCK(&dev->drw_lock);
|
||||
|
|
|
|||
|
|
@ -280,7 +280,7 @@ static int drm_lastclose(struct drm_device *dev)
|
|||
drm_irq_uninstall(dev);
|
||||
|
||||
if (dev->unique) {
|
||||
free(dev->unique, M_DRM);
|
||||
free(dev->unique, DRM_MEM_DRIVER);
|
||||
dev->unique = NULL;
|
||||
dev->unique_len = 0;
|
||||
}
|
||||
|
|
@ -288,7 +288,7 @@ static int drm_lastclose(struct drm_device *dev)
|
|||
for (i = 0; i < DRM_HASH_SIZE; i++) {
|
||||
for (pt = dev->magiclist[i].head; pt; pt = next) {
|
||||
next = pt->next;
|
||||
free(pt, M_DRM);
|
||||
free(pt, DRM_MEM_MAGIC);
|
||||
}
|
||||
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
|
||||
}
|
||||
|
|
@ -310,7 +310,7 @@ static int drm_lastclose(struct drm_device *dev)
|
|||
if (entry->bound)
|
||||
drm_agp_unbind_memory(entry->handle);
|
||||
drm_agp_free_memory(entry->handle);
|
||||
free(entry, M_DRM);
|
||||
free(entry, DRM_MEM_AGPLISTS);
|
||||
}
|
||||
dev->agp->memory = NULL;
|
||||
|
||||
|
|
@ -479,7 +479,7 @@ static void drm_unload(struct drm_device *dev)
|
|||
}
|
||||
|
||||
if (dev->agp) {
|
||||
free(dev->agp, M_DRM);
|
||||
free(dev->agp, DRM_MEM_AGPLISTS);
|
||||
dev->agp = NULL;
|
||||
}
|
||||
|
||||
|
|
@ -534,7 +534,7 @@ int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
|
|||
struct drm_device *dev = NULL;
|
||||
int retcode = 0;
|
||||
|
||||
dev = DRIVER_SOFTC(minor(kdev));
|
||||
dev = DRIVER_SOFTC(dev2unit(kdev));
|
||||
|
||||
DRM_DEBUG("open_count = %d\n", dev->open_count);
|
||||
|
||||
|
|
@ -623,7 +623,7 @@ void drm_close(void *data)
|
|||
if (dev->driver->postclose != NULL)
|
||||
dev->driver->postclose(dev, file_priv);
|
||||
TAILQ_REMOVE(&dev->files, file_priv, link);
|
||||
free(file_priv, M_DRM);
|
||||
free(file_priv, DRM_MEM_FILES);
|
||||
|
||||
/* ========================================================
|
||||
* End inline drm_release
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
|
|||
struct drm_device *dev)
|
||||
{
|
||||
struct drm_file *priv;
|
||||
int m = minor(kdev);
|
||||
int m = dev2unit(kdev);
|
||||
int retcode;
|
||||
|
||||
if (flags & O_EXCL)
|
||||
|
|
@ -50,14 +50,14 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
|
|||
|
||||
DRM_DEBUG("pid = %d, minor = %d\n", DRM_CURRENTPID, m);
|
||||
|
||||
priv = malloc(sizeof(*priv), M_DRM, M_NOWAIT | M_ZERO);
|
||||
priv = malloc(sizeof(*priv), DRM_MEM_FILES, M_NOWAIT | M_ZERO);
|
||||
if (priv == NULL) {
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
retcode = devfs_set_cdevpriv(priv, drm_close);
|
||||
if (retcode != 0) {
|
||||
free(priv, M_DRM);
|
||||
free(priv, DRM_MEM_FILES);
|
||||
return retcode;
|
||||
}
|
||||
|
||||
|
|
@ -76,7 +76,7 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
|
|||
retcode = -dev->driver->open(dev, priv);
|
||||
if (retcode != 0) {
|
||||
devfs_clear_cdevpriv();
|
||||
free(priv, M_DRM);
|
||||
free(priv, DRM_MEM_FILES);
|
||||
DRM_UNLOCK();
|
||||
return retcode;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,12 +69,12 @@ int drm_setunique(struct drm_device *dev, void *data,
|
|||
if (!u->unique_len || u->unique_len > 1024)
|
||||
return EINVAL;
|
||||
|
||||
busid = malloc(u->unique_len + 1, M_DRM, M_WAITOK);
|
||||
busid = malloc(u->unique_len + 1, DRM_MEM_DRIVER, M_WAITOK);
|
||||
if (busid == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
if (DRM_COPY_FROM_USER(busid, u->unique, u->unique_len)) {
|
||||
free(busid, M_DRM);
|
||||
free(busid, DRM_MEM_DRIVER);
|
||||
return EFAULT;
|
||||
}
|
||||
busid[u->unique_len] = '\0';
|
||||
|
|
@ -84,7 +84,7 @@ int drm_setunique(struct drm_device *dev, void *data,
|
|||
*/
|
||||
ret = sscanf(busid, "PCI:%d:%d:%d", &bus, &slot, &func);
|
||||
if (ret != 3) {
|
||||
free(busid, M_DRM);
|
||||
free(busid, DRM_MEM_DRIVER);
|
||||
return EINVAL;
|
||||
}
|
||||
domain = bus >> 8;
|
||||
|
|
@ -94,7 +94,7 @@ int drm_setunique(struct drm_device *dev, void *data,
|
|||
(bus != dev->pci_bus) ||
|
||||
(slot != dev->pci_slot) ||
|
||||
(func != dev->pci_func)) {
|
||||
free(busid, M_DRM);
|
||||
free(busid, DRM_MEM_DRIVER);
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
|
|
@ -125,7 +125,7 @@ drm_set_busid(struct drm_device *dev)
|
|||
}
|
||||
|
||||
dev->unique_len = 20;
|
||||
dev->unique = malloc(dev->unique_len + 1, M_DRM, M_NOWAIT);
|
||||
dev->unique = malloc(dev->unique_len + 1, DRM_MEM_DRIVER, M_NOWAIT);
|
||||
if (dev->unique == NULL) {
|
||||
DRM_UNLOCK();
|
||||
return ENOMEM;
|
||||
|
|
|
|||
|
|
@ -111,8 +111,7 @@ static void drm_vblank_cleanup(struct drm_device *dev)
|
|||
|
||||
vblank_disable_fn((void *)dev);
|
||||
|
||||
drm_free(dev->vblank, sizeof(struct drm_vblank_info) * dev->num_crtcs,
|
||||
DRM_MEM_DRIVER);
|
||||
free(dev->vblank, DRM_MEM_DRIVER);
|
||||
|
||||
dev->num_crtcs = 0;
|
||||
}
|
||||
|
|
@ -125,8 +124,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
|||
atomic_set(&dev->vbl_signal_pending, 0);
|
||||
dev->num_crtcs = num_crtcs;
|
||||
|
||||
dev->vblank = drm_calloc(num_crtcs, sizeof(struct drm_vblank_info),
|
||||
DRM_MEM_DRIVER);
|
||||
dev->vblank = malloc(sizeof(struct drm_vblank_info) * num_crtcs,
|
||||
DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
|
||||
if (!dev->vblank)
|
||||
goto err;
|
||||
|
||||
|
|
@ -429,8 +428,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
|
|||
|
||||
if (flags & _DRM_VBLANK_SIGNAL) {
|
||||
#if 0 /* disabled */
|
||||
drm_vbl_sig_t *vbl_sig = malloc(sizeof(drm_vbl_sig_t), M_DRM,
|
||||
M_NOWAIT | M_ZERO);
|
||||
drm_vbl_sig_t *vbl_sig = malloc(sizeof(drm_vbl_sig_t),
|
||||
DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
|
||||
if (vbl_sig == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
|
|
|
|||
|
|
@ -66,6 +66,6 @@ list_del(struct list_head *entry) {
|
|||
|
||||
#define list_for_each_safe(entry, temp, head) \
|
||||
for (entry = (head)->next, temp = (entry)->next; \
|
||||
temp != head; \
|
||||
entry = temp, temp = temp->next)
|
||||
entry != head; \
|
||||
entry = temp, temp = entry->next)
|
||||
|
||||
|
|
|
|||
|
|
@ -102,17 +102,15 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
|||
{
|
||||
struct drm_lock *lock = data;
|
||||
|
||||
DRM_DEBUG("%d (pid %d) requests unlock (0x%08x), flags = 0x%08x\n",
|
||||
lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
|
||||
lock->flags);
|
||||
|
||||
if (lock->context == DRM_KERNEL_CONTEXT) {
|
||||
DRM_ERROR("Process %d using kernel context %d\n",
|
||||
DRM_CURRENTPID, lock->context);
|
||||
return EINVAL;
|
||||
}
|
||||
/* Check that the context unlock being requested actually matches
|
||||
* who currently holds the lock.
|
||||
*/
|
||||
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
|
||||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) != lock->context)
|
||||
return EINVAL;
|
||||
|
||||
DRM_SPINLOCK(&dev->tsk_lock);
|
||||
if (dev->locked_task_call != NULL) {
|
||||
|
|
|
|||
|
|
@ -38,7 +38,25 @@
|
|||
|
||||
#include "drmP.h"
|
||||
|
||||
MALLOC_DEFINE(M_DRM, "drm", "DRM Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap",
|
||||
"DRM CTXBITMAP Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
|
||||
MALLOC_DEFINE(DRM_MEM_DRAWABLE, "drm_drawable", "DRM DRAWABLE Data Structures");
|
||||
|
||||
void drm_mem_init(void)
|
||||
{
|
||||
|
|
@ -48,35 +66,6 @@ void drm_mem_uninit(void)
|
|||
{
|
||||
}
|
||||
|
||||
void *drm_alloc(size_t size, int area)
|
||||
{
|
||||
return malloc(size, M_DRM, M_NOWAIT);
|
||||
}
|
||||
|
||||
void *drm_calloc(size_t nmemb, size_t size, int area)
|
||||
{
|
||||
return malloc(size * nmemb, M_DRM, M_NOWAIT | M_ZERO);
|
||||
}
|
||||
|
||||
void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
|
||||
{
|
||||
void *pt;
|
||||
|
||||
pt = malloc(size, M_DRM, M_NOWAIT);
|
||||
if (pt == NULL)
|
||||
return NULL;
|
||||
if (oldpt && oldsize) {
|
||||
memcpy(pt, oldpt, DRM_MIN(oldsize,size));
|
||||
free(oldpt, M_DRM);
|
||||
}
|
||||
return pt;
|
||||
}
|
||||
|
||||
void drm_free(void *pt, size_t size, int area)
|
||||
{
|
||||
free(pt, M_DRM);
|
||||
}
|
||||
|
||||
void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map)
|
||||
{
|
||||
return pmap_mapdev_attr(map->offset, map->size, PAT_WRITE_COMBINING);
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
dmah = malloc(sizeof(drm_dma_handle_t), M_DRM, M_ZERO | M_NOWAIT);
|
||||
dmah = malloc(sizeof(drm_dma_handle_t), DRM_MEM_DMA, M_ZERO | M_NOWAIT);
|
||||
if (dmah == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
@ -83,7 +83,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
|
|||
BUS_DMA_ALLOCNOW, NULL, NULL, /* flags, lockfunc, lockfuncargs */
|
||||
&dmah->tag);
|
||||
if (ret != 0) {
|
||||
free(dmah, M_DRM);
|
||||
free(dmah, DRM_MEM_DMA);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -91,7 +91,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
|
|||
&dmah->map);
|
||||
if (ret != 0) {
|
||||
bus_dma_tag_destroy(dmah->tag);
|
||||
free(dmah, M_DRM);
|
||||
free(dmah, DRM_MEM_DMA);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -100,7 +100,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
|
|||
if (ret != 0) {
|
||||
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
|
||||
bus_dma_tag_destroy(dmah->tag);
|
||||
free(dmah, M_DRM);
|
||||
free(dmah, DRM_MEM_DMA);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
@ -119,7 +119,7 @@ drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
|
|||
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
|
||||
bus_dma_tag_destroy(dmah->tag);
|
||||
|
||||
free(dmah, M_DRM);
|
||||
free(dmah, DRM_MEM_DMA);
|
||||
}
|
||||
|
||||
/*@}*/
|
||||
|
|
|
|||
|
|
@ -40,9 +40,9 @@
|
|||
|
||||
void drm_sg_cleanup(drm_sg_mem_t *entry)
|
||||
{
|
||||
free((void *)entry->handle, M_DRM);
|
||||
free(entry->busaddr, M_DRM);
|
||||
free(entry, M_DRM);
|
||||
free((void *)entry->handle, DRM_MEM_PAGES);
|
||||
free(entry->busaddr, DRM_MEM_PAGES);
|
||||
free(entry, DRM_MEM_SGLISTS);
|
||||
}
|
||||
|
||||
int drm_sg_alloc(struct drm_device * dev, struct drm_scatter_gather * request)
|
||||
|
|
@ -54,7 +54,7 @@ int drm_sg_alloc(struct drm_device * dev, struct drm_scatter_gather * request)
|
|||
if (dev->sg)
|
||||
return EINVAL;
|
||||
|
||||
entry = malloc(sizeof(*entry), M_DRM, M_WAITOK | M_ZERO);
|
||||
entry = malloc(sizeof(*entry), DRM_MEM_SGLISTS, M_WAITOK | M_ZERO);
|
||||
if (!entry)
|
||||
return ENOMEM;
|
||||
|
||||
|
|
@ -63,14 +63,14 @@ int drm_sg_alloc(struct drm_device * dev, struct drm_scatter_gather * request)
|
|||
|
||||
entry->pages = pages;
|
||||
|
||||
entry->busaddr = malloc(pages * sizeof(*entry->busaddr), M_DRM,
|
||||
entry->busaddr = malloc(pages * sizeof(*entry->busaddr), DRM_MEM_PAGES,
|
||||
M_WAITOK | M_ZERO);
|
||||
if (!entry->busaddr) {
|
||||
drm_sg_cleanup(entry);
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
entry->handle = (long)malloc(pages << PAGE_SHIFT, M_DRM,
|
||||
entry->handle = (long)malloc(pages << PAGE_SHIFT, DRM_MEM_PAGES,
|
||||
M_WAITOK | M_ZERO);
|
||||
if (entry->handle == 0) {
|
||||
drm_sg_cleanup(entry);
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ int drm_sysctl_init(struct drm_device *dev)
|
|||
struct sysctl_oid *top, *drioid;
|
||||
int i;
|
||||
|
||||
info = malloc(sizeof *info, M_DRM, M_WAITOK | M_ZERO);
|
||||
info = malloc(sizeof *info, DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
|
||||
if ( !info )
|
||||
return 1;
|
||||
dev->sysctl = info;
|
||||
|
|
@ -111,7 +111,7 @@ int drm_sysctl_cleanup(struct drm_device *dev)
|
|||
int error;
|
||||
error = sysctl_ctx_free( &dev->sysctl->ctx );
|
||||
|
||||
free(dev->sysctl, M_DRM);
|
||||
free(dev->sysctl, DRM_MEM_DRIVER);
|
||||
dev->sysctl = NULL;
|
||||
|
||||
return error;
|
||||
|
|
@ -169,7 +169,8 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
|
|||
TAILQ_FOREACH(map, &dev->maplist, link)
|
||||
mapcount++;
|
||||
|
||||
tempmaps = malloc(sizeof(drm_local_map_t) * mapcount, M_DRM, M_NOWAIT);
|
||||
tempmaps = malloc(sizeof(drm_local_map_t) * mapcount, DRM_MEM_DRIVER,
|
||||
M_NOWAIT);
|
||||
if (tempmaps == NULL) {
|
||||
DRM_UNLOCK();
|
||||
return ENOMEM;
|
||||
|
|
@ -205,7 +206,7 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
|
|||
SYSCTL_OUT(req, "", 1);
|
||||
|
||||
done:
|
||||
free(tempmaps, M_DRM);
|
||||
free(tempmaps, DRM_MEM_DRIVER);
|
||||
return retcode;
|
||||
}
|
||||
|
||||
|
|
@ -229,7 +230,8 @@ static int drm_bufs_info DRM_SYSCTL_HANDLER_ARGS
|
|||
}
|
||||
DRM_SPINLOCK(&dev->dma_lock);
|
||||
tempdma = *dma;
|
||||
templists = malloc(sizeof(int) * dma->buf_count, M_DRM, M_NOWAIT);
|
||||
templists = malloc(sizeof(int) * dma->buf_count, DRM_MEM_DRIVER,
|
||||
M_NOWAIT);
|
||||
for (i = 0; i < dma->buf_count; i++)
|
||||
templists[i] = dma->buflist[i]->list;
|
||||
dma = &tempdma;
|
||||
|
|
@ -261,7 +263,7 @@ static int drm_bufs_info DRM_SYSCTL_HANDLER_ARGS
|
|||
|
||||
SYSCTL_OUT(req, "", 1);
|
||||
done:
|
||||
free(templists, M_DRM);
|
||||
free(templists, DRM_MEM_DRIVER);
|
||||
return retcode;
|
||||
}
|
||||
|
||||
|
|
@ -279,7 +281,8 @@ static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS
|
|||
TAILQ_FOREACH(priv, &dev->files, link)
|
||||
privcount++;
|
||||
|
||||
tempprivs = malloc(sizeof(struct drm_file) * privcount, M_DRM, M_NOWAIT);
|
||||
tempprivs = malloc(sizeof(struct drm_file) * privcount, DRM_MEM_DRIVER,
|
||||
M_NOWAIT);
|
||||
if (tempprivs == NULL) {
|
||||
DRM_UNLOCK();
|
||||
return ENOMEM;
|
||||
|
|
@ -304,6 +307,6 @@ static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS
|
|||
|
||||
SYSCTL_OUT(req, "", 1);
|
||||
done:
|
||||
free(tempprivs, M_DRM);
|
||||
free(tempprivs, DRM_MEM_DRIVER);
|
||||
return retcode;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,20 +32,23 @@ int drm_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr,
|
|||
int prot)
|
||||
{
|
||||
struct drm_device *dev = drm_get_device_from_kdev(kdev);
|
||||
struct drm_file *file_priv;
|
||||
struct drm_file *file_priv = NULL;
|
||||
drm_local_map_t *map;
|
||||
enum drm_map_type type;
|
||||
vm_paddr_t phys;
|
||||
int error;
|
||||
|
||||
DRM_LOCK();
|
||||
TAILQ_FOREACH(file_priv, &dev->files, link)
|
||||
if (file_priv->pid == curthread->td_proc->p_pid &&
|
||||
file_priv->uid == curthread->td_ucred->cr_svuid &&
|
||||
file_priv->authenticated == 1)
|
||||
break;
|
||||
DRM_UNLOCK();
|
||||
/* d_mmap gets called twice, we can only reference file_priv during
|
||||
* the first call. We need to assume that if error is EBADF the
|
||||
* call was succesful and the client is authenticated.
|
||||
*/
|
||||
error = devfs_get_cdevpriv((void **)&file_priv);
|
||||
if (error == ENOENT) {
|
||||
DRM_ERROR("Could not find authenticator!\n");
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
if (!file_priv)
|
||||
if (file_priv && !file_priv->authenticated)
|
||||
return EACCES;
|
||||
|
||||
if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) {
|
||||
|
|
|
|||
|
|
@ -109,9 +109,9 @@ i915_attach(device_t nbdev)
|
|||
{
|
||||
struct drm_device *dev = device_get_softc(nbdev);
|
||||
|
||||
bzero(dev, sizeof(struct drm_device));
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
|
||||
i915_configure(dev);
|
||||
|
||||
return drm_attach(nbdev, i915_pciidlist);
|
||||
|
|
@ -125,7 +125,7 @@ i915_detach(device_t nbdev)
|
|||
|
||||
ret = drm_detach(nbdev);
|
||||
|
||||
free(dev->driver, M_DRM);
|
||||
free(dev->driver, DRM_MEM_DRIVER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -83,9 +83,9 @@ mach64_attach(device_t nbdev)
|
|||
{
|
||||
struct drm_device *dev = device_get_softc(nbdev);
|
||||
|
||||
bzero(dev, sizeof(struct drm_device));
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
|
||||
mach64_configure(dev);
|
||||
|
||||
return drm_attach(nbdev, mach64_pciidlist);
|
||||
|
|
@ -99,7 +99,7 @@ mach64_detach(device_t nbdev)
|
|||
|
||||
ret = drm_detach(nbdev);
|
||||
|
||||
free(dev->driver, M_DRM);
|
||||
free(dev->driver, DRM_MEM_DRIVER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -127,9 +127,9 @@ mga_attach(device_t nbdev)
|
|||
{
|
||||
struct drm_device *dev = device_get_softc(nbdev);
|
||||
|
||||
bzero(dev, sizeof(struct drm_device));
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
|
||||
mga_configure(dev);
|
||||
|
||||
return drm_attach(nbdev, mga_pciidlist);
|
||||
|
|
@ -143,7 +143,7 @@ mga_detach(device_t nbdev)
|
|||
|
||||
ret = drm_detach(nbdev);
|
||||
|
||||
free(dev->driver, M_DRM);
|
||||
free(dev->driver, DRM_MEM_DRIVER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,9 +82,9 @@ r128_attach(device_t nbdev)
|
|||
{
|
||||
struct drm_device *dev = device_get_softc(nbdev);
|
||||
|
||||
bzero(dev, sizeof(struct drm_device));
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
|
||||
r128_configure(dev);
|
||||
|
||||
return drm_attach(nbdev, r128_pciidlist);
|
||||
|
|
@ -98,7 +98,7 @@ r128_detach(device_t nbdev)
|
|||
|
||||
ret = drm_detach(nbdev);
|
||||
|
||||
free(dev->driver, M_DRM);
|
||||
free(dev->driver, DRM_MEM_DRIVER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,9 +87,9 @@ radeon_attach(device_t nbdev)
|
|||
{
|
||||
struct drm_device *dev = device_get_softc(nbdev);
|
||||
|
||||
bzero(dev, sizeof(struct drm_device));
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
|
||||
radeon_configure(dev);
|
||||
|
||||
return drm_attach(nbdev, radeon_pciidlist);
|
||||
|
|
@ -103,7 +103,7 @@ radeon_detach(device_t nbdev)
|
|||
|
||||
ret = drm_detach(nbdev);
|
||||
|
||||
free(dev->driver, M_DRM);
|
||||
free(dev->driver, DRM_MEM_DRIVER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,9 +73,9 @@ savage_attach(device_t nbdev)
|
|||
{
|
||||
struct drm_device *dev = device_get_softc(nbdev);
|
||||
|
||||
bzero(dev, sizeof(struct drm_device));
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
|
||||
savage_configure(dev);
|
||||
|
||||
return drm_attach(nbdev, savage_pciidlist);
|
||||
|
|
@ -89,7 +89,7 @@ savage_detach(device_t nbdev)
|
|||
|
||||
ret = drm_detach(nbdev);
|
||||
|
||||
free(dev->driver, M_DRM);
|
||||
free(dev->driver, DRM_MEM_DRIVER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -67,9 +67,9 @@ sis_attach(device_t nbdev)
|
|||
{
|
||||
struct drm_device *dev = device_get_softc(nbdev);
|
||||
|
||||
bzero(dev, sizeof(struct drm_device));
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
|
||||
sis_configure(dev);
|
||||
|
||||
return drm_attach(nbdev, sis_pciidlist);
|
||||
|
|
@ -83,7 +83,7 @@ sis_detach(device_t nbdev)
|
|||
|
||||
ret = drm_detach(nbdev);
|
||||
|
||||
free(dev->driver, M_DRM);
|
||||
free(dev->driver, DRM_MEM_DRIVER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,9 +69,9 @@ tdfx_attach(device_t nbdev)
|
|||
{
|
||||
struct drm_device *dev = device_get_softc(nbdev);
|
||||
|
||||
bzero(dev, sizeof(struct drm_device));
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
|
||||
tdfx_configure(dev);
|
||||
|
||||
return drm_attach(nbdev, tdfx_pciidlist);
|
||||
|
|
@ -85,7 +85,7 @@ tdfx_detach(device_t nbdev)
|
|||
|
||||
ret = drm_detach(nbdev);
|
||||
|
||||
free(dev->driver, M_DRM);
|
||||
free(dev->driver, DRM_MEM_DRIVER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -80,9 +80,9 @@ via_attach(device_t nbdev)
|
|||
{
|
||||
struct drm_device *dev = device_get_softc(nbdev);
|
||||
|
||||
bzero(dev, sizeof(struct drm_device));
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
dev->driver = malloc(sizeof(struct drm_driver_info), M_DRM, M_NOWAIT | M_ZERO);
|
||||
via_configure(dev);
|
||||
|
||||
return drm_attach(nbdev, via_pciidlist);
|
||||
|
|
@ -96,7 +96,7 @@ via_detach(device_t nbdev)
|
|||
|
||||
ret = drm_detach(nbdev);
|
||||
|
||||
free(dev->driver, M_DRM);
|
||||
free(dev->driver, DRM_MEM_DRIVER);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@
|
|||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
AC_PREREQ(2.57)
|
||||
AC_INIT([libdrm], 2.4.0, [dri-devel@lists.sourceforge.net], libdrm)
|
||||
AC_INIT([libdrm], 2.4.1, [dri-devel@lists.sourceforge.net], libdrm)
|
||||
AC_CONFIG_SRCDIR([Makefile.am])
|
||||
AM_INIT_AUTOMAKE([dist-bzip2])
|
||||
|
||||
|
|
|
|||
|
|
@ -39,26 +39,26 @@
|
|||
#include "intel_bufmgr.h"
|
||||
#include "intel_bufmgr_priv.h"
|
||||
|
||||
/** @file dri_bufmgr.c
|
||||
/** @file intel_bufmgr.c
|
||||
*
|
||||
* Convenience functions for buffer management methods.
|
||||
*/
|
||||
|
||||
dri_bo *
|
||||
dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
|
||||
unsigned int alignment)
|
||||
drm_intel_bo *
|
||||
drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment)
|
||||
{
|
||||
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
|
||||
}
|
||||
|
||||
void
|
||||
dri_bo_reference(dri_bo *bo)
|
||||
drm_intel_bo_reference(drm_intel_bo *bo)
|
||||
{
|
||||
bo->bufmgr->bo_reference(bo);
|
||||
}
|
||||
|
||||
void
|
||||
dri_bo_unreference(dri_bo *bo)
|
||||
drm_intel_bo_unreference(drm_intel_bo *bo)
|
||||
{
|
||||
if (bo == NULL)
|
||||
return;
|
||||
|
|
@ -67,38 +67,39 @@ dri_bo_unreference(dri_bo *bo)
|
|||
}
|
||||
|
||||
int
|
||||
dri_bo_map(dri_bo *buf, int write_enable)
|
||||
drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
|
||||
{
|
||||
return buf->bufmgr->bo_map(buf, write_enable);
|
||||
}
|
||||
|
||||
int
|
||||
dri_bo_unmap(dri_bo *buf)
|
||||
drm_intel_bo_unmap(drm_intel_bo *buf)
|
||||
{
|
||||
return buf->bufmgr->bo_unmap(buf);
|
||||
}
|
||||
|
||||
int
|
||||
dri_bo_subdata(dri_bo *bo, unsigned long offset,
|
||||
unsigned long size, const void *data)
|
||||
drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
|
||||
unsigned long size, const void *data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (bo->bufmgr->bo_subdata)
|
||||
return bo->bufmgr->bo_subdata(bo, offset, size, data);
|
||||
if (size == 0 || data == NULL)
|
||||
return 0;
|
||||
|
||||
ret = dri_bo_map(bo, 1);
|
||||
ret = drm_intel_bo_map(bo, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
memcpy((unsigned char *)bo->virtual + offset, data, size);
|
||||
dri_bo_unmap(bo);
|
||||
drm_intel_bo_unmap(bo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
|
||||
unsigned long size, void *data)
|
||||
drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
|
||||
unsigned long size, void *data)
|
||||
{
|
||||
int ret;
|
||||
if (bo->bufmgr->bo_subdata)
|
||||
|
|
@ -107,48 +108,48 @@ dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
|
|||
if (size == 0 || data == NULL)
|
||||
return 0;
|
||||
|
||||
ret = dri_bo_map(bo, 0);
|
||||
ret = drm_intel_bo_map(bo, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
memcpy(data, (unsigned char *)bo->virtual + offset, size);
|
||||
dri_bo_unmap(bo);
|
||||
drm_intel_bo_unmap(bo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
dri_bo_wait_rendering(dri_bo *bo)
|
||||
drm_intel_bo_wait_rendering(drm_intel_bo *bo)
|
||||
{
|
||||
bo->bufmgr->bo_wait_rendering(bo);
|
||||
}
|
||||
|
||||
void
|
||||
dri_bufmgr_destroy(dri_bufmgr *bufmgr)
|
||||
drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
|
||||
{
|
||||
bufmgr->destroy(bufmgr);
|
||||
}
|
||||
|
||||
int
|
||||
dri_bo_exec(dri_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
drm_intel_bo_exec(drm_intel_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
{
|
||||
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
|
||||
}
|
||||
|
||||
void
|
||||
dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug)
|
||||
drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
|
||||
{
|
||||
bufmgr->debug = enable_debug;
|
||||
}
|
||||
|
||||
int
|
||||
dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count)
|
||||
drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count)
|
||||
{
|
||||
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
|
||||
}
|
||||
|
||||
int
|
||||
dri_bo_flink(dri_bo *bo, uint32_t *name)
|
||||
drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name)
|
||||
{
|
||||
if (bo->bufmgr->bo_flink)
|
||||
return bo->bufmgr->bo_flink(bo, name);
|
||||
|
|
@ -157,17 +158,17 @@ dri_bo_flink(dri_bo *bo, uint32_t *name)
|
|||
}
|
||||
|
||||
int
|
||||
dri_bo_emit_reloc(dri_bo *reloc_buf,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target_buf)
|
||||
drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
||||
drm_intel_bo *target_bo, uint32_t target_offset,
|
||||
uint32_t read_domains, uint32_t write_domain)
|
||||
{
|
||||
return reloc_buf->bufmgr->bo_emit_reloc(reloc_buf,
|
||||
read_domains, write_domain,
|
||||
delta, offset, target_buf);
|
||||
return bo->bufmgr->bo_emit_reloc(bo, offset,
|
||||
target_bo, target_offset,
|
||||
read_domains, write_domain);
|
||||
}
|
||||
|
||||
int
|
||||
dri_bo_pin(dri_bo *bo, uint32_t alignment)
|
||||
drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
|
||||
{
|
||||
if (bo->bufmgr->bo_pin)
|
||||
return bo->bufmgr->bo_pin(bo, alignment);
|
||||
|
|
@ -176,7 +177,7 @@ dri_bo_pin(dri_bo *bo, uint32_t alignment)
|
|||
}
|
||||
|
||||
int
|
||||
dri_bo_unpin(dri_bo *bo)
|
||||
drm_intel_bo_unpin(drm_intel_bo *bo)
|
||||
{
|
||||
if (bo->bufmgr->bo_unpin)
|
||||
return bo->bufmgr->bo_unpin(bo);
|
||||
|
|
@ -184,11 +185,23 @@ dri_bo_unpin(dri_bo *bo)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
int dri_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
|
||||
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
|
||||
uint32_t stride)
|
||||
{
|
||||
if (bo->bufmgr->bo_set_tiling)
|
||||
return bo->bufmgr->bo_set_tiling(bo, tiling_mode);
|
||||
return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
|
||||
|
||||
*tiling_mode = I915_TILING_NONE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
|
||||
uint32_t *swizzle_mode)
|
||||
{
|
||||
if (bo->bufmgr->bo_get_tiling)
|
||||
return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
|
||||
|
||||
*tiling_mode = I915_TILING_NONE;
|
||||
*swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,10 +36,10 @@
|
|||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct _dri_bufmgr dri_bufmgr;
|
||||
typedef struct _dri_bo dri_bo;
|
||||
typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
|
||||
typedef struct _drm_intel_bo drm_intel_bo;
|
||||
|
||||
struct _dri_bo {
|
||||
struct _drm_intel_bo {
|
||||
/**
|
||||
* Size in bytes of the buffer object.
|
||||
*
|
||||
|
|
@ -47,6 +47,13 @@ struct _dri_bo {
|
|||
* allocation, such as being aligned to page size.
|
||||
*/
|
||||
unsigned long size;
|
||||
/**
|
||||
* Alignment requirement for object
|
||||
*
|
||||
* Used for GTT mapping & pinning the object.
|
||||
*/
|
||||
unsigned long align;
|
||||
|
||||
/**
|
||||
* Card virtual address (offset from the beginning of the aperture) for the
|
||||
* object. Only valid while validated.
|
||||
|
|
@ -58,74 +65,123 @@ struct _dri_bo {
|
|||
void *virtual;
|
||||
|
||||
/** Buffer manager context associated with this buffer object */
|
||||
dri_bufmgr *bufmgr;
|
||||
drm_intel_bufmgr *bufmgr;
|
||||
|
||||
/**
|
||||
* MM-specific handle for accessing object
|
||||
*/
|
||||
int handle;
|
||||
};
|
||||
|
||||
dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
|
||||
unsigned int alignment);
|
||||
void dri_bo_reference(dri_bo *bo);
|
||||
void dri_bo_unreference(dri_bo *bo);
|
||||
int dri_bo_map(dri_bo *buf, int write_enable);
|
||||
int dri_bo_unmap(dri_bo *buf);
|
||||
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment);
|
||||
void drm_intel_bo_reference(drm_intel_bo *bo);
|
||||
void drm_intel_bo_unreference(drm_intel_bo *bo);
|
||||
int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
|
||||
int drm_intel_bo_unmap(drm_intel_bo *bo);
|
||||
|
||||
int dri_bo_subdata(dri_bo *bo, unsigned long offset,
|
||||
unsigned long size, const void *data);
|
||||
int dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
|
||||
unsigned long size, void *data);
|
||||
void dri_bo_wait_rendering(dri_bo *bo);
|
||||
int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
|
||||
unsigned long size, const void *data);
|
||||
int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
|
||||
unsigned long size, void *data);
|
||||
void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
|
||||
|
||||
void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug);
|
||||
void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
|
||||
int dri_bo_exec(dri_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4);
|
||||
int dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count);
|
||||
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
|
||||
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
|
||||
int drm_intel_bo_exec(drm_intel_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4);
|
||||
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count);
|
||||
|
||||
int dri_bo_emit_reloc(dri_bo *reloc_buf,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target_buf);
|
||||
int dri_bo_pin(dri_bo *buf, uint32_t alignment);
|
||||
int dri_bo_unpin(dri_bo *buf);
|
||||
int dri_bo_set_tiling(dri_bo *buf, uint32_t *tiling_mode);
|
||||
int dri_bo_flink(dri_bo *buf, uint32_t *name);
|
||||
int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
||||
drm_intel_bo *target_bo, uint32_t target_offset,
|
||||
uint32_t read_domains, uint32_t write_domain);
|
||||
int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
|
||||
int drm_intel_bo_unpin(drm_intel_bo *bo);
|
||||
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
|
||||
uint32_t stride);
|
||||
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
|
||||
uint32_t *swizzle_mode);
|
||||
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name);
|
||||
|
||||
/* intel_bufmgr_gem.c */
|
||||
dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
|
||||
dri_bo *intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned int handle);
|
||||
void intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr);
|
||||
/* drm_intel_bufmgr_gem.c */
|
||||
drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size);
|
||||
drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned int handle);
|
||||
void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
|
||||
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
|
||||
|
||||
/* intel_bufmgr_fake.c */
|
||||
dri_bufmgr *intel_bufmgr_fake_init(int fd,
|
||||
unsigned long low_offset, void *low_virtual,
|
||||
unsigned long size,
|
||||
volatile unsigned int *last_dispatch);
|
||||
void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr,
|
||||
volatile unsigned int *last_dispatch);
|
||||
void intel_bufmgr_fake_set_exec_callback(dri_bufmgr *bufmgr,
|
||||
int (*exec)(dri_bo *bo,
|
||||
unsigned int used,
|
||||
void *priv),
|
||||
void *priv);
|
||||
void intel_bufmgr_fake_set_fence_callback(dri_bufmgr *bufmgr,
|
||||
unsigned int (*emit)(void *priv),
|
||||
void (*wait)(unsigned int fence,
|
||||
void *priv),
|
||||
void *priv);
|
||||
dri_bo *intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual);
|
||||
void intel_bo_fake_disable_backing_store(dri_bo *bo,
|
||||
void (*invalidate_cb)(dri_bo *bo,
|
||||
void *ptr),
|
||||
void *ptr);
|
||||
/* drm_intel_bufmgr_fake.c */
|
||||
drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
|
||||
unsigned long low_offset,
|
||||
void *low_virtual,
|
||||
unsigned long size,
|
||||
volatile unsigned int *last_dispatch);
|
||||
void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
|
||||
volatile unsigned int *last_dispatch);
|
||||
void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
|
||||
int (*exec)(drm_intel_bo *bo,
|
||||
unsigned int used,
|
||||
void *priv),
|
||||
void *priv);
|
||||
void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
|
||||
unsigned int (*emit)(void *priv),
|
||||
void (*wait)(unsigned int fence,
|
||||
void *priv),
|
||||
void *priv);
|
||||
drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
|
||||
const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual);
|
||||
void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
|
||||
void (*invalidate_cb)(drm_intel_bo *bo,
|
||||
void *ptr),
|
||||
void *ptr);
|
||||
|
||||
void intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
|
||||
void intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr);
|
||||
void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
|
||||
void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
|
||||
|
||||
/** @{ Compatibility defines to keep old code building despite the symbol rename
|
||||
* from dri_* to drm_intel_*
|
||||
*/
|
||||
#define dri_bo drm_intel_bo
|
||||
#define dri_bufmgr drm_intel_bufmgr
|
||||
#define dri_bo_alloc drm_intel_bo_alloc
|
||||
#define dri_bo_reference drm_intel_bo_reference
|
||||
#define dri_bo_unreference drm_intel_bo_unreference
|
||||
#define dri_bo_map drm_intel_bo_map
|
||||
#define dri_bo_unmap drm_intel_bo_unmap
|
||||
#define dri_bo_subdata drm_intel_bo_subdata
|
||||
#define dri_bo_get_subdata drm_intel_bo_get_subdata
|
||||
#define dri_bo_wait_rendering drm_intel_bo_wait_rendering
|
||||
#define dri_bufmgr_set_debug drm_intel_bufmgr_set_debug
|
||||
#define dri_bufmgr_destroy drm_intel_bufmgr_destroy
|
||||
#define dri_bo_exec drm_intel_bo_exec
|
||||
#define dri_bufmgr_check_aperture_space drm_intel_bufmgr_check_aperture_space
|
||||
#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \
|
||||
reloc_offset, target_bo) \
|
||||
drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \
|
||||
target_bo, target_offset, \
|
||||
read, write);
|
||||
#define dri_bo_pin drm_intel_bo_pin
|
||||
#define dri_bo_unpin drm_intel_bo_unpin
|
||||
#define dri_bo_get_tiling drm_intel_bo_get_tiling
|
||||
#define dri_bo_set_tiling(bo, mode) drm_intel_bo_set_tiling(bo, mode, 0)
|
||||
#define dri_bo_flink drm_intel_bo_flink
|
||||
#define intel_bufmgr_gem_init drm_intel_bufmgr_gem_init
|
||||
#define intel_bo_gem_create_from_name drm_intel_bo_gem_create_from_name
|
||||
#define intel_bufmgr_gem_enable_reuse drm_intel_bufmgr_gem_enable_reuse
|
||||
#define intel_bufmgr_fake_init drm_intel_bufmgr_fake_init
|
||||
#define intel_bufmgr_fake_set_last_dispatch drm_intel_bufmgr_fake_set_last_dispatch
|
||||
#define intel_bufmgr_fake_set_exec_callback drm_intel_bufmgr_fake_set_exec_callback
|
||||
#define intel_bufmgr_fake_set_fence_callback drm_intel_bufmgr_fake_set_fence_callback
|
||||
#define intel_bo_fake_alloc_static drm_intel_bo_fake_alloc_static
|
||||
#define intel_bo_fake_disable_backing_store drm_intel_bo_fake_disable_backing_store
|
||||
#define intel_bufmgr_fake_contended_lock_take drm_intel_bufmgr_fake_contended_lock_take
|
||||
#define intel_bufmgr_fake_evict_all drm_intel_bufmgr_fake_evict_all
|
||||
|
||||
/** @{ */
|
||||
|
||||
#endif /* INTEL_BUFMGR_H */
|
||||
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@
|
|||
struct fake_buffer_reloc
|
||||
{
|
||||
/** Buffer object that the relocation points at. */
|
||||
dri_bo *target_buf;
|
||||
drm_intel_bo *target_buf;
|
||||
/** Offset of the relocation entry within reloc_buf. */
|
||||
uint32_t offset;
|
||||
/** Cached value of the offset when we last performed this relocation. */
|
||||
|
|
@ -106,12 +106,12 @@ struct block {
|
|||
/** Fence cookie for the block. */
|
||||
unsigned fence; /* Split to read_fence, write_fence */
|
||||
|
||||
dri_bo *bo;
|
||||
drm_intel_bo *bo;
|
||||
void *virtual;
|
||||
};
|
||||
|
||||
typedef struct _bufmgr_fake {
|
||||
dri_bufmgr bufmgr;
|
||||
drm_intel_bufmgr bufmgr;
|
||||
|
||||
pthread_mutex_t lock;
|
||||
|
||||
|
|
@ -163,7 +163,7 @@ typedef struct _bufmgr_fake {
|
|||
* This allows the driver to hook in a replacement for the DRM usage in
|
||||
* bufmgr_fake.
|
||||
*/
|
||||
int (*exec)(dri_bo *bo, unsigned int used, void *priv);
|
||||
int (*exec)(drm_intel_bo *bo, unsigned int used, void *priv);
|
||||
void *exec_priv;
|
||||
|
||||
/** Driver-supplied argument to driver callbacks */
|
||||
|
|
@ -176,10 +176,10 @@ typedef struct _bufmgr_fake {
|
|||
int debug;
|
||||
|
||||
int performed_rendering;
|
||||
} dri_bufmgr_fake;
|
||||
} drm_intel_bufmgr_fake;
|
||||
|
||||
typedef struct _dri_bo_fake {
|
||||
dri_bo bo;
|
||||
typedef struct _drm_intel_bo_fake {
|
||||
drm_intel_bo bo;
|
||||
|
||||
unsigned id; /* debug only */
|
||||
const char *name;
|
||||
|
|
@ -214,11 +214,11 @@ typedef struct _dri_bo_fake {
|
|||
|
||||
struct block *block;
|
||||
void *backing_store;
|
||||
void (*invalidate_cb)(dri_bo *bo, void *ptr);
|
||||
void (*invalidate_cb)(drm_intel_bo *bo, void *ptr);
|
||||
void *invalidate_ptr;
|
||||
} dri_bo_fake;
|
||||
} drm_intel_bo_fake;
|
||||
|
||||
static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
|
||||
static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
|
||||
unsigned int fence_cookie);
|
||||
|
||||
#define MAXFENCE 0x7fffffff
|
||||
|
|
@ -237,13 +237,13 @@ static int FENCE_LTE( unsigned a, unsigned b )
|
|||
return 0;
|
||||
}
|
||||
|
||||
void intel_bufmgr_fake_set_fence_callback(dri_bufmgr *bufmgr,
|
||||
unsigned int (*emit)(void *priv),
|
||||
void (*wait)(unsigned int fence,
|
||||
void *priv),
|
||||
void *priv)
|
||||
void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
|
||||
unsigned int (*emit)(void *priv),
|
||||
void (*wait)(unsigned int fence,
|
||||
void *priv),
|
||||
void *priv)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
|
||||
|
||||
bufmgr_fake->fence_emit = emit;
|
||||
bufmgr_fake->fence_wait = wait;
|
||||
|
|
@ -251,13 +251,15 @@ void intel_bufmgr_fake_set_fence_callback(dri_bufmgr *bufmgr,
|
|||
}
|
||||
|
||||
static unsigned int
|
||||
_fence_emit_internal(dri_bufmgr_fake *bufmgr_fake)
|
||||
_fence_emit_internal(drm_intel_bufmgr_fake *bufmgr_fake)
|
||||
{
|
||||
struct drm_i915_irq_emit ie;
|
||||
int ret, seq = 1;
|
||||
|
||||
if (bufmgr_fake->fence_emit != NULL)
|
||||
return bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
|
||||
if (bufmgr_fake->fence_emit != NULL) {
|
||||
seq = bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
|
||||
return seq;
|
||||
}
|
||||
|
||||
ie.irq_seq = &seq;
|
||||
ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
|
||||
|
|
@ -268,12 +270,11 @@ _fence_emit_internal(dri_bufmgr_fake *bufmgr_fake)
|
|||
}
|
||||
|
||||
DBG("emit 0x%08x\n", seq);
|
||||
bufmgr_fake->last_fence = seq;
|
||||
return bufmgr_fake->last_fence;
|
||||
return seq;
|
||||
}
|
||||
|
||||
static void
|
||||
_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, int seq)
|
||||
_fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
|
||||
{
|
||||
struct drm_i915_irq_wait iw;
|
||||
int hw_seq, busy_count = 0;
|
||||
|
|
@ -282,6 +283,7 @@ _fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, int seq)
|
|||
|
||||
if (bufmgr_fake->fence_wait != NULL) {
|
||||
bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv);
|
||||
clear_fenced(bufmgr_fake, seq);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -395,7 +397,7 @@ _fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, int seq)
|
|||
}
|
||||
|
||||
static int
|
||||
_fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
|
||||
_fence_test(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
|
||||
{
|
||||
/* Slight problem with wrap-around:
|
||||
*/
|
||||
|
|
@ -406,10 +408,10 @@ _fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
|
|||
* Allocate a memory manager block for the buffer.
|
||||
*/
|
||||
static int
|
||||
alloc_block(dri_bo *bo)
|
||||
alloc_block(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
dri_bufmgr_fake *bufmgr_fake= (dri_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake= (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
struct block *block = (struct block *)calloc(sizeof *block, 1);
|
||||
unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
|
||||
unsigned int sz;
|
||||
|
|
@ -442,18 +444,18 @@ alloc_block(dri_bo *bo)
|
|||
|
||||
/* Release the card storage associated with buf:
|
||||
*/
|
||||
static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
|
||||
static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block)
|
||||
{
|
||||
dri_bo_fake *bo_fake;
|
||||
drm_intel_bo_fake *bo_fake;
|
||||
DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced);
|
||||
|
||||
if (!block)
|
||||
return;
|
||||
|
||||
bo_fake = (dri_bo_fake *)block->bo;
|
||||
if (!(bo_fake->flags & BM_NO_BACKING_STORE) && (bo_fake->card_dirty == 1)) {
|
||||
bo_fake = (drm_intel_bo_fake *)block->bo;
|
||||
if (!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)) && (bo_fake->card_dirty == 1)) {
|
||||
memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
|
||||
bo_fake->card_dirty = 1;
|
||||
bo_fake->card_dirty = 0;
|
||||
bo_fake->dirty = 1;
|
||||
}
|
||||
|
||||
|
|
@ -473,10 +475,10 @@ static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
|
|||
}
|
||||
|
||||
static void
|
||||
alloc_backing_store(dri_bo *bo)
|
||||
alloc_backing_store(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
assert(!bo_fake->backing_store);
|
||||
assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
|
||||
|
||||
|
|
@ -487,9 +489,9 @@ alloc_backing_store(dri_bo *bo)
|
|||
}
|
||||
|
||||
static void
|
||||
free_backing_store(dri_bo *bo)
|
||||
free_backing_store(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
|
||||
if (bo_fake->backing_store) {
|
||||
assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
|
||||
|
|
@ -499,10 +501,10 @@ free_backing_store(dri_bo *bo)
|
|||
}
|
||||
|
||||
static void
|
||||
set_dirty(dri_bo *bo)
|
||||
set_dirty(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
|
||||
if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL)
|
||||
bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
|
||||
|
|
@ -514,14 +516,14 @@ set_dirty(dri_bo *bo)
|
|||
}
|
||||
|
||||
static int
|
||||
evict_lru(dri_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
|
||||
evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
|
||||
{
|
||||
struct block *block, *tmp;
|
||||
|
||||
DBG("%s\n", __FUNCTION__);
|
||||
|
||||
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
|
||||
|
||||
if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
|
||||
continue;
|
||||
|
|
@ -540,14 +542,14 @@ evict_lru(dri_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
|
|||
}
|
||||
|
||||
static int
|
||||
evict_mru(dri_bufmgr_fake *bufmgr_fake)
|
||||
evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
|
||||
{
|
||||
struct block *block, *tmp;
|
||||
|
||||
DBG("%s\n", __FUNCTION__);
|
||||
|
||||
DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
|
||||
|
||||
if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
|
||||
continue;
|
||||
|
|
@ -565,12 +567,13 @@ evict_mru(dri_bufmgr_fake *bufmgr_fake)
|
|||
/**
|
||||
* Removes all objects from the fenced list older than the given fence.
|
||||
*/
|
||||
static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
|
||||
static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
|
||||
unsigned int fence_cookie)
|
||||
{
|
||||
struct block *block, *tmp;
|
||||
int ret = 0;
|
||||
|
||||
bufmgr_fake->last_fence = fence_cookie;
|
||||
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
|
||||
assert(block->fenced);
|
||||
|
||||
|
|
@ -608,7 +611,7 @@ static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
|
||||
static void fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
|
||||
{
|
||||
struct block *block, *tmp;
|
||||
|
||||
|
|
@ -629,10 +632,10 @@ static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
|
|||
assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
|
||||
}
|
||||
|
||||
static int evict_and_alloc_block(dri_bo *bo)
|
||||
static int evict_and_alloc_block(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
|
||||
assert(bo_fake->block == NULL);
|
||||
|
||||
|
|
@ -699,7 +702,7 @@ static int evict_and_alloc_block(dri_bo *bo)
|
|||
* Wait for hardware idle by emitting a fence and waiting for it.
|
||||
*/
|
||||
static void
|
||||
dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake)
|
||||
drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
|
||||
{
|
||||
unsigned int cookie;
|
||||
|
||||
|
|
@ -714,20 +717,24 @@ dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake)
|
|||
* the necessary flushing.
|
||||
*/
|
||||
static void
|
||||
dri_fake_bo_wait_rendering(dri_bo *bo)
|
||||
drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
|
||||
if (bo_fake->block == NULL || !bo_fake->block->fenced) {
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
if (bo_fake->block == NULL || !bo_fake->block->fenced)
|
||||
return;
|
||||
}
|
||||
|
||||
_fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
|
||||
}
|
||||
|
||||
static void
|
||||
drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
drm_intel_fake_bo_wait_rendering_locked(bo);
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
}
|
||||
|
||||
|
|
@ -736,9 +743,9 @@ dri_fake_bo_wait_rendering(dri_bo *bo)
|
|||
* -- and wait for idle
|
||||
*/
|
||||
void
|
||||
intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
|
||||
drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
|
||||
struct block *block, *tmp;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
|
|
@ -750,7 +757,7 @@ intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
|
|||
* happening, so we'll need to wait anyway before letting anything get
|
||||
* put on the card again.
|
||||
*/
|
||||
dri_bufmgr_fake_wait_idle(bufmgr_fake);
|
||||
drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
|
||||
|
||||
/* Check that we hadn't released the lock without having fenced the last
|
||||
* set of buffers.
|
||||
|
|
@ -766,14 +773,14 @@ intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
|
|||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
}
|
||||
|
||||
static dri_bo *
|
||||
dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment)
|
||||
static drm_intel_bo *
|
||||
drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake;
|
||||
dri_bo_fake *bo_fake;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake;
|
||||
drm_intel_bo_fake *bo_fake;
|
||||
|
||||
bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
|
||||
|
||||
assert(size != 0);
|
||||
|
||||
|
|
@ -803,15 +810,15 @@ dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
|||
return &bo_fake->bo;
|
||||
}
|
||||
|
||||
dri_bo *
|
||||
intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual)
|
||||
drm_intel_bo *
|
||||
drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long offset, unsigned long size,
|
||||
void *virtual)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake;
|
||||
dri_bo_fake *bo_fake;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake;
|
||||
drm_intel_bo_fake *bo_fake;
|
||||
|
||||
bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
|
||||
|
||||
assert(size != 0);
|
||||
|
||||
|
|
@ -836,10 +843,10 @@ intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
|
|||
}
|
||||
|
||||
static void
|
||||
dri_fake_bo_reference(dri_bo *bo)
|
||||
drm_intel_fake_bo_reference(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
bo_fake->refcount++;
|
||||
|
|
@ -847,18 +854,18 @@ dri_fake_bo_reference(dri_bo *bo)
|
|||
}
|
||||
|
||||
static void
|
||||
dri_fake_bo_reference_locked(dri_bo *bo)
|
||||
drm_intel_fake_bo_reference_locked(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
|
||||
bo_fake->refcount++;
|
||||
}
|
||||
|
||||
static void
|
||||
dri_fake_bo_unreference_locked(dri_bo *bo)
|
||||
drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
int i;
|
||||
|
||||
if (--bo_fake->refcount == 0) {
|
||||
|
|
@ -869,7 +876,7 @@ dri_fake_bo_unreference_locked(dri_bo *bo)
|
|||
free_backing_store(bo);
|
||||
|
||||
for (i = 0; i < bo_fake->nr_relocs; i++)
|
||||
dri_fake_bo_unreference_locked(bo_fake->relocs[i].target_buf);
|
||||
drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].target_buf);
|
||||
|
||||
DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name);
|
||||
|
||||
|
|
@ -879,12 +886,12 @@ dri_fake_bo_unreference_locked(dri_bo *bo)
|
|||
}
|
||||
|
||||
static void
|
||||
dri_fake_bo_unreference(dri_bo *bo)
|
||||
drm_intel_fake_bo_unreference(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
dri_fake_bo_unreference_locked(bo);
|
||||
drm_intel_fake_bo_unreference_locked(bo);
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
}
|
||||
|
||||
|
|
@ -892,13 +899,13 @@ dri_fake_bo_unreference(dri_bo *bo)
|
|||
* Set the buffer as not requiring backing store, and instead get the callback
|
||||
* invoked whenever it would be set dirty.
|
||||
*/
|
||||
void intel_bo_fake_disable_backing_store(dri_bo *bo,
|
||||
void (*invalidate_cb)(dri_bo *bo,
|
||||
void *ptr),
|
||||
void *ptr)
|
||||
void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
|
||||
void (*invalidate_cb)(drm_intel_bo *bo,
|
||||
void *ptr),
|
||||
void *ptr)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
|
||||
|
|
@ -927,14 +934,19 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo,
|
|||
* BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
|
||||
*/
|
||||
static int
|
||||
dri_fake_bo_map_locked(dri_bo *bo, int write_enable)
|
||||
drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
|
||||
/* Static buffers are always mapped. */
|
||||
if (bo_fake->is_static)
|
||||
if (bo_fake->is_static) {
|
||||
if (bo_fake->card_dirty) {
|
||||
drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
|
||||
bo_fake->card_dirty = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allow recursive mapping. Mesa may recursively map buffers with
|
||||
* nested display loops, and it is used internally in bufmgr_fake
|
||||
|
|
@ -964,7 +976,7 @@ dri_fake_bo_map_locked(dri_bo *bo, int write_enable)
|
|||
|
||||
if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
|
||||
bo_fake->block->fenced) {
|
||||
dri_fake_bo_wait_rendering(bo);
|
||||
drm_intel_fake_bo_wait_rendering_locked(bo);
|
||||
}
|
||||
|
||||
bo->virtual = bo_fake->block->virtual;
|
||||
|
|
@ -977,6 +989,14 @@ dri_fake_bo_map_locked(dri_bo *bo, int write_enable)
|
|||
if (bo_fake->backing_store == 0)
|
||||
alloc_backing_store(bo);
|
||||
|
||||
if ((bo_fake->card_dirty == 1) && bo_fake->block) {
|
||||
if (bo_fake->block->fenced)
|
||||
drm_intel_fake_bo_wait_rendering_locked(bo);
|
||||
|
||||
memcpy(bo_fake->backing_store, bo_fake->block->virtual, bo_fake->block->bo->size);
|
||||
bo_fake->card_dirty = 0;
|
||||
}
|
||||
|
||||
bo->virtual = bo_fake->backing_store;
|
||||
}
|
||||
}
|
||||
|
|
@ -985,23 +1005,23 @@ dri_fake_bo_map_locked(dri_bo *bo, int write_enable)
|
|||
}
|
||||
|
||||
static int
|
||||
dri_fake_bo_map(dri_bo *bo, int write_enable)
|
||||
drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
int ret;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
ret = dri_fake_bo_map_locked(bo, write_enable);
|
||||
ret = drm_intel_fake_bo_map_locked(bo, write_enable);
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
dri_fake_bo_unmap_locked(dri_bo *bo)
|
||||
drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
|
||||
/* Static buffers are always mapped. */
|
||||
if (bo_fake->is_static)
|
||||
|
|
@ -1020,30 +1040,28 @@ dri_fake_bo_unmap_locked(dri_bo *bo)
|
|||
}
|
||||
|
||||
static int
|
||||
dri_fake_bo_unmap(dri_bo *bo)
|
||||
drm_intel_fake_bo_unmap(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
int ret;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
ret = dri_fake_bo_unmap_locked(bo);
|
||||
ret = drm_intel_fake_bo_unmap_locked(bo);
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)
|
||||
drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
|
||||
{
|
||||
struct block *block, *tmp;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
|
||||
bufmgr_fake->performed_rendering = 0;
|
||||
/* okay for ever BO that is on the HW kick it off.
|
||||
seriously not afraid of the POLICE right now */
|
||||
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
|
||||
|
||||
block->on_hardware = 0;
|
||||
free_block(bufmgr_fake, block);
|
||||
|
|
@ -1052,17 +1070,15 @@ dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)
|
|||
if (!(bo_fake->flags & BM_NO_BACKING_STORE))
|
||||
bo_fake->dirty = 1;
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
}
|
||||
|
||||
static int
|
||||
dri_fake_bo_validate(dri_bo *bo)
|
||||
drm_intel_fake_bo_validate(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
|
||||
bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
|
||||
DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
|
||||
bo_fake->bo.size / 1024);
|
||||
|
|
@ -1103,7 +1119,7 @@ dri_fake_bo_validate(dri_bo *bo)
|
|||
* which we would be tracking when we free it. Waiting for idle is
|
||||
* a sufficiently large hammer for now.
|
||||
*/
|
||||
dri_bufmgr_fake_wait_idle(bufmgr_fake);
|
||||
drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
|
||||
|
||||
/* we may never have mapped this BO so it might not have any backing
|
||||
* store if this happens it should be rare, but 0 the card memory
|
||||
|
|
@ -1128,9 +1144,9 @@ dri_fake_bo_validate(dri_bo *bo)
|
|||
}
|
||||
|
||||
static void
|
||||
dri_fake_fence_validated(dri_bufmgr *bufmgr)
|
||||
drm_intel_fake_fence_validated(drm_intel_bufmgr *bufmgr)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
|
||||
unsigned int cookie;
|
||||
|
||||
cookie = _fence_emit_internal(bufmgr_fake);
|
||||
|
|
@ -1140,9 +1156,9 @@ dri_fake_fence_validated(dri_bufmgr *bufmgr)
|
|||
}
|
||||
|
||||
static void
|
||||
dri_fake_destroy(dri_bufmgr *bufmgr)
|
||||
drm_intel_fake_destroy(drm_intel_bufmgr *bufmgr)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
|
||||
|
||||
pthread_mutex_destroy(&bufmgr_fake->lock);
|
||||
mmDestroy(bufmgr_fake->heap);
|
||||
|
|
@ -1150,46 +1166,46 @@ dri_fake_destroy(dri_bufmgr *bufmgr)
|
|||
}
|
||||
|
||||
static int
|
||||
dri_fake_emit_reloc(dri_bo *reloc_buf,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target_buf)
|
||||
drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
||||
drm_intel_bo *target_bo, uint32_t target_offset,
|
||||
uint32_t read_domains, uint32_t write_domain)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)reloc_buf->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
struct fake_buffer_reloc *r;
|
||||
dri_bo_fake *reloc_fake = (dri_bo_fake *)reloc_buf;
|
||||
dri_bo_fake *target_fake = (dri_bo_fake *)target_buf;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)target_bo;
|
||||
int i;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
|
||||
assert(reloc_buf);
|
||||
assert(target_buf);
|
||||
assert(bo);
|
||||
assert(target_bo);
|
||||
|
||||
if (reloc_fake->relocs == NULL) {
|
||||
reloc_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) *
|
||||
MAX_RELOCS);
|
||||
if (bo_fake->relocs == NULL) {
|
||||
bo_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
|
||||
}
|
||||
|
||||
r = &reloc_fake->relocs[reloc_fake->nr_relocs++];
|
||||
r = &bo_fake->relocs[bo_fake->nr_relocs++];
|
||||
|
||||
assert(reloc_fake->nr_relocs <= MAX_RELOCS);
|
||||
assert(bo_fake->nr_relocs <= MAX_RELOCS);
|
||||
|
||||
dri_fake_bo_reference_locked(target_buf);
|
||||
drm_intel_fake_bo_reference_locked(target_bo);
|
||||
|
||||
if (!target_fake->is_static)
|
||||
reloc_fake->child_size += ALIGN(target_buf->size, target_fake->alignment);
|
||||
|
||||
r->target_buf = target_buf;
|
||||
if (!target_fake->is_static) {
|
||||
bo_fake->child_size += ALIGN(target_bo->size, target_fake->alignment);
|
||||
bo_fake->child_size += target_fake->child_size;
|
||||
}
|
||||
r->target_buf = target_bo;
|
||||
r->offset = offset;
|
||||
r->last_target_offset = target_buf->offset;
|
||||
r->delta = delta;
|
||||
r->last_target_offset = target_bo->offset;
|
||||
r->delta = target_offset;
|
||||
r->read_domains = read_domains;
|
||||
r->write_domain = write_domain;
|
||||
|
||||
if (bufmgr_fake->debug) {
|
||||
/* Check that a conflicting relocation hasn't already been emitted. */
|
||||
for (i = 0; i < reloc_fake->nr_relocs - 1; i++) {
|
||||
struct fake_buffer_reloc *r2 = &reloc_fake->relocs[i];
|
||||
for (i = 0; i < bo_fake->nr_relocs - 1; i++) {
|
||||
struct fake_buffer_reloc *r2 = &bo_fake->relocs[i];
|
||||
|
||||
assert(r->offset != r2->offset);
|
||||
}
|
||||
|
|
@ -1205,45 +1221,44 @@ dri_fake_emit_reloc(dri_bo *reloc_buf,
|
|||
* the combined validation flags for the buffer on this batchbuffer submission.
|
||||
*/
|
||||
static void
|
||||
dri_fake_calculate_domains(dri_bo *bo)
|
||||
drm_intel_fake_calculate_domains(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bo_fake->nr_relocs; i++) {
|
||||
struct fake_buffer_reloc *r = &bo_fake->relocs[i];
|
||||
dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
|
||||
drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
|
||||
|
||||
/* Do the same for the tree of buffers we depend on */
|
||||
dri_fake_calculate_domains(r->target_buf);
|
||||
drm_intel_fake_calculate_domains(r->target_buf);
|
||||
|
||||
target_fake->read_domains |= r->read_domains;
|
||||
if (target_fake->write_domain != 0)
|
||||
target_fake->write_domain = r->write_domain;
|
||||
target_fake->write_domain |= r->write_domain;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
dri_fake_reloc_and_validate_buffer(dri_bo *bo)
|
||||
drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
int i, ret;
|
||||
|
||||
assert(bo_fake->map_count == 0);
|
||||
|
||||
for (i = 0; i < bo_fake->nr_relocs; i++) {
|
||||
struct fake_buffer_reloc *r = &bo_fake->relocs[i];
|
||||
dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
|
||||
drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
|
||||
uint32_t reloc_data;
|
||||
|
||||
/* Validate the target buffer if that hasn't been done. */
|
||||
if (!target_fake->validated) {
|
||||
ret = dri_fake_reloc_and_validate_buffer(r->target_buf);
|
||||
ret = drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
|
||||
if (ret != 0) {
|
||||
if (bo->virtual != NULL)
|
||||
dri_fake_bo_unmap_locked(bo);
|
||||
drm_intel_fake_bo_unmap_locked(bo);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
|
@ -1253,7 +1268,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo)
|
|||
reloc_data = r->target_buf->offset + r->delta;
|
||||
|
||||
if (bo->virtual == NULL)
|
||||
dri_fake_bo_map_locked(bo, 1);
|
||||
drm_intel_fake_bo_map_locked(bo, 1);
|
||||
|
||||
*(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data;
|
||||
|
||||
|
|
@ -1262,34 +1277,33 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo)
|
|||
}
|
||||
|
||||
if (bo->virtual != NULL)
|
||||
dri_fake_bo_unmap_locked(bo);
|
||||
drm_intel_fake_bo_unmap_locked(bo);
|
||||
|
||||
if (bo_fake->write_domain != 0) {
|
||||
if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) {
|
||||
if (bo_fake->backing_store == 0)
|
||||
alloc_backing_store(bo);
|
||||
|
||||
bo_fake->card_dirty = 1;
|
||||
}
|
||||
bo_fake->card_dirty = 1;
|
||||
bufmgr_fake->performed_rendering = 1;
|
||||
}
|
||||
|
||||
return dri_fake_bo_validate(bo);
|
||||
return drm_intel_fake_bo_validate(bo);
|
||||
}
|
||||
|
||||
static void
|
||||
dri_bo_fake_post_submit(dri_bo *bo)
|
||||
drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bo_fake->nr_relocs; i++) {
|
||||
struct fake_buffer_reloc *r = &bo_fake->relocs[i];
|
||||
dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
|
||||
drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
|
||||
|
||||
if (target_fake->validated)
|
||||
dri_bo_fake_post_submit(r->target_buf);
|
||||
drm_intel_bo_fake_post_submit(r->target_buf);
|
||||
|
||||
DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
|
||||
bo_fake->name, (uint32_t)bo->offset, r->offset,
|
||||
|
|
@ -1303,25 +1317,25 @@ dri_bo_fake_post_submit(dri_bo *bo)
|
|||
}
|
||||
|
||||
|
||||
void intel_bufmgr_fake_set_exec_callback(dri_bufmgr *bufmgr,
|
||||
int (*exec)(dri_bo *bo,
|
||||
unsigned int used,
|
||||
void *priv),
|
||||
void *priv)
|
||||
void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
|
||||
int (*exec)(drm_intel_bo *bo,
|
||||
unsigned int used,
|
||||
void *priv),
|
||||
void *priv)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
|
||||
|
||||
bufmgr_fake->exec = exec;
|
||||
bufmgr_fake->exec_priv = priv;
|
||||
}
|
||||
|
||||
static int
|
||||
dri_fake_bo_exec(dri_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
|
||||
dri_bo_fake *batch_fake = (dri_bo_fake *)bo;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
|
||||
drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *)bo;
|
||||
struct drm_i915_batchbuffer batch;
|
||||
int ret;
|
||||
int retry_count = 0;
|
||||
|
|
@ -1330,17 +1344,17 @@ dri_fake_bo_exec(dri_bo *bo, int used,
|
|||
|
||||
bufmgr_fake->performed_rendering = 0;
|
||||
|
||||
dri_fake_calculate_domains(bo);
|
||||
drm_intel_fake_calculate_domains(bo);
|
||||
|
||||
batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
|
||||
|
||||
/* we've ran out of RAM so blow the whole lot away and retry */
|
||||
restart:
|
||||
ret = dri_fake_reloc_and_validate_buffer(bo);
|
||||
ret = drm_intel_fake_reloc_and_validate_buffer(bo);
|
||||
if (bufmgr_fake->fail == 1) {
|
||||
if (retry_count == 0) {
|
||||
retry_count++;
|
||||
dri_fake_kick_all(bufmgr_fake);
|
||||
drm_intel_fake_kick_all_locked(bufmgr_fake);
|
||||
bufmgr_fake->fail = 0;
|
||||
goto restart;
|
||||
} else /* dump out the memory here */
|
||||
|
|
@ -1351,8 +1365,10 @@ dri_fake_bo_exec(dri_bo *bo, int used,
|
|||
|
||||
if (bufmgr_fake->exec != NULL) {
|
||||
int ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
|
||||
if (ret != 0)
|
||||
if (ret != 0) {
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
batch.start = bo->offset;
|
||||
batch.used = used;
|
||||
|
|
@ -1364,13 +1380,14 @@ dri_fake_bo_exec(dri_bo *bo, int used,
|
|||
if (drmCommandWrite(bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
|
||||
sizeof(batch))) {
|
||||
drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
return -errno;
|
||||
}
|
||||
}
|
||||
|
||||
dri_fake_fence_validated(bo->bufmgr);
|
||||
drm_intel_fake_fence_validated(bo->bufmgr);
|
||||
|
||||
dri_bo_fake_post_submit(bo);
|
||||
drm_intel_bo_fake_post_submit(bo);
|
||||
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
|
||||
|
|
@ -1385,14 +1402,14 @@ dri_fake_bo_exec(dri_bo *bo, int used,
|
|||
* a set smaller than the aperture.
|
||||
*/
|
||||
static int
|
||||
dri_fake_check_aperture_space(dri_bo **bo_array, int count)
|
||||
drm_intel_fake_check_aperture_space(drm_intel_bo **bo_array, int count)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo_array[0]->bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo_array[0]->bufmgr;
|
||||
unsigned int sz = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
dri_bo_fake *bo_fake = (dri_bo_fake *)bo_array[i];
|
||||
drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo_array[i];
|
||||
|
||||
if (bo_fake == NULL)
|
||||
continue;
|
||||
|
|
@ -1421,9 +1438,9 @@ dri_fake_check_aperture_space(dri_bo **bo_array, int count)
|
|||
* own.
|
||||
*/
|
||||
void
|
||||
intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)
|
||||
drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
|
||||
struct block *block, *tmp;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_fake->lock);
|
||||
|
|
@ -1435,7 +1452,7 @@ intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)
|
|||
* happening, so we'll need to wait anyway before letting anything get
|
||||
* put on the card again.
|
||||
*/
|
||||
dri_bufmgr_fake_wait_idle(bufmgr_fake);
|
||||
drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
|
||||
|
||||
/* Check that we hadn't released the lock without having fenced the last
|
||||
* set of buffers.
|
||||
|
|
@ -1450,21 +1467,21 @@ intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)
|
|||
|
||||
pthread_mutex_unlock(&bufmgr_fake->lock);
|
||||
}
|
||||
void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr,
|
||||
void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
|
||||
volatile unsigned int *last_dispatch)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
|
||||
|
||||
bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
|
||||
}
|
||||
|
||||
dri_bufmgr *
|
||||
intel_bufmgr_fake_init(int fd,
|
||||
drm_intel_bufmgr *
|
||||
drm_intel_bufmgr_fake_init(int fd,
|
||||
unsigned long low_offset, void *low_virtual,
|
||||
unsigned long size,
|
||||
volatile unsigned int *last_dispatch)
|
||||
{
|
||||
dri_bufmgr_fake *bufmgr_fake;
|
||||
drm_intel_bufmgr_fake *bufmgr_fake;
|
||||
|
||||
bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
|
||||
|
||||
|
|
@ -1484,16 +1501,16 @@ intel_bufmgr_fake_init(int fd,
|
|||
bufmgr_fake->heap = mmInit(low_offset, size);
|
||||
|
||||
/* Hook in methods */
|
||||
bufmgr_fake->bufmgr.bo_alloc = dri_fake_bo_alloc;
|
||||
bufmgr_fake->bufmgr.bo_reference = dri_fake_bo_reference;
|
||||
bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
|
||||
bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
|
||||
bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
|
||||
bufmgr_fake->bufmgr.bo_wait_rendering = dri_fake_bo_wait_rendering;
|
||||
bufmgr_fake->bufmgr.bo_emit_reloc = dri_fake_emit_reloc;
|
||||
bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
|
||||
bufmgr_fake->bufmgr.bo_exec = dri_fake_bo_exec;
|
||||
bufmgr_fake->bufmgr.check_aperture_space = dri_fake_check_aperture_space;
|
||||
bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
|
||||
bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
|
||||
bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
|
||||
bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
|
||||
bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
|
||||
bufmgr_fake->bufmgr.bo_wait_rendering = drm_intel_fake_bo_wait_rendering;
|
||||
bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
|
||||
bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
|
||||
bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
|
||||
bufmgr_fake->bufmgr.check_aperture_space = drm_intel_fake_check_aperture_space;
|
||||
bufmgr_fake->bufmgr.debug = 0;
|
||||
|
||||
bufmgr_fake->fd = fd;
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@
|
|||
#endif
|
||||
|
||||
#include <xf86drm.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
|
@ -47,6 +48,8 @@
|
|||
#include <pthread.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "errno.h"
|
||||
#include "intel_bufmgr.h"
|
||||
|
|
@ -60,10 +63,10 @@
|
|||
fprintf(stderr, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
typedef struct _dri_bo_gem dri_bo_gem;
|
||||
typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
|
||||
|
||||
struct dri_gem_bo_bucket {
|
||||
dri_bo_gem *head, **tail;
|
||||
struct drm_intel_gem_bo_bucket {
|
||||
drm_intel_bo_gem *head, **tail;
|
||||
/**
|
||||
* Limit on the number of entries in this bucket.
|
||||
*
|
||||
|
|
@ -77,9 +80,9 @@ struct dri_gem_bo_bucket {
|
|||
/* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
|
||||
* is 1 << 16 pages, or 256MB.
|
||||
*/
|
||||
#define INTEL_GEM_BO_BUCKETS 16
|
||||
typedef struct _dri_bufmgr_gem {
|
||||
dri_bufmgr bufmgr;
|
||||
#define DRM_INTEL_GEM_BO_BUCKETS 16
|
||||
typedef struct _drm_intel_bufmgr_gem {
|
||||
drm_intel_bufmgr bufmgr;
|
||||
|
||||
int fd;
|
||||
|
||||
|
|
@ -88,16 +91,18 @@ typedef struct _dri_bufmgr_gem {
|
|||
pthread_mutex_t lock;
|
||||
|
||||
struct drm_i915_gem_exec_object *exec_objects;
|
||||
dri_bo **exec_bos;
|
||||
drm_intel_bo **exec_bos;
|
||||
int exec_size;
|
||||
int exec_count;
|
||||
|
||||
/** Array of lists of cached gem objects of power-of-two sizes */
|
||||
struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS];
|
||||
} dri_bufmgr_gem;
|
||||
struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
|
||||
|
||||
struct _dri_bo_gem {
|
||||
dri_bo bo;
|
||||
uint64_t gtt_size;
|
||||
} drm_intel_bufmgr_gem;
|
||||
|
||||
struct _drm_intel_bo_gem {
|
||||
drm_intel_bo bo;
|
||||
|
||||
int refcount;
|
||||
/** Boolean whether the mmap ioctl has been called for this buffer yet. */
|
||||
|
|
@ -126,17 +131,38 @@ struct _dri_bo_gem {
|
|||
/** Array passed to the DRM containing relocation information. */
|
||||
struct drm_i915_gem_relocation_entry *relocs;
|
||||
/** Array of bos corresponding to relocs[i].target_handle */
|
||||
dri_bo **reloc_target_bo;
|
||||
drm_intel_bo **reloc_target_bo;
|
||||
/** Number of entries in relocs */
|
||||
int reloc_count;
|
||||
/** Mapped address for the buffer */
|
||||
void *virtual;
|
||||
|
||||
/** free list */
|
||||
dri_bo_gem *next;
|
||||
drm_intel_bo_gem *next;
|
||||
|
||||
/**
|
||||
* Boolean of whether this BO and its children have been included in
|
||||
* the current drm_intel_bufmgr_check_aperture_space() total.
|
||||
*/
|
||||
char included_in_check_aperture;
|
||||
|
||||
/**
|
||||
* Boolean of whether this buffer has been used as a relocation
|
||||
* target and had its size accounted for, and thus can't have any
|
||||
* further relocations added to it.
|
||||
*/
|
||||
char used_as_reloc_target;
|
||||
|
||||
/**
|
||||
* Size in bytes of this buffer and its relocation descendents.
|
||||
*
|
||||
* Used to avoid costly tree walking in drm_intel_bufmgr_check_aperture in
|
||||
* the common case.
|
||||
*/
|
||||
int reloc_tree_size;
|
||||
};
|
||||
|
||||
static void dri_gem_bo_reference_locked(dri_bo *bo);
|
||||
static void drm_intel_gem_bo_reference_locked(drm_intel_bo *bo);
|
||||
|
||||
static int
|
||||
logbase2(int n)
|
||||
|
|
@ -152,8 +178,9 @@ logbase2(int n)
|
|||
return log2;
|
||||
}
|
||||
|
||||
static struct dri_gem_bo_bucket *
|
||||
dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size)
|
||||
static struct drm_intel_gem_bo_bucket *
|
||||
drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
|
||||
unsigned long size)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
@ -166,20 +193,20 @@ dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size)
|
|||
|
||||
/* We always allocate in units of pages */
|
||||
i = ffs(size / 4096) - 1;
|
||||
if (i >= INTEL_GEM_BO_BUCKETS)
|
||||
if (i >= DRM_INTEL_GEM_BO_BUCKETS)
|
||||
return NULL;
|
||||
|
||||
return &bufmgr_gem->cache_bucket[i];
|
||||
}
|
||||
|
||||
|
||||
static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
|
||||
static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < bufmgr_gem->exec_count; i++) {
|
||||
dri_bo *bo = bufmgr_gem->exec_bos[i];
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
|
||||
if (bo_gem->relocs == NULL) {
|
||||
DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
|
||||
|
|
@ -187,12 +214,13 @@ static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
|
|||
}
|
||||
|
||||
for (j = 0; j < bo_gem->reloc_count; j++) {
|
||||
dri_bo *target_bo = bo_gem->reloc_target_bo[j];
|
||||
dri_bo_gem *target_gem = (dri_bo_gem *)target_bo;
|
||||
drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
|
||||
drm_intel_bo_gem *target_gem = (drm_intel_bo_gem *)target_bo;
|
||||
|
||||
DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
|
||||
i,
|
||||
bo_gem->gem_handle, bo_gem->name, bo_gem->relocs[j].offset,
|
||||
bo_gem->gem_handle, bo_gem->name,
|
||||
(unsigned long long)bo_gem->relocs[j].offset,
|
||||
target_gem->gem_handle, target_gem->name, target_bo->offset,
|
||||
bo_gem->relocs[j].delta);
|
||||
}
|
||||
|
|
@ -208,10 +236,10 @@ static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
|
|||
* access flags.
|
||||
*/
|
||||
static void
|
||||
intel_add_validate_buffer(dri_bo *bo)
|
||||
drm_intel_add_validate_buffer(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
int index;
|
||||
|
||||
if (bo_gem->validate_index != -1)
|
||||
|
|
@ -242,7 +270,7 @@ intel_add_validate_buffer(dri_bo *bo)
|
|||
bufmgr_gem->exec_objects[index].alignment = 0;
|
||||
bufmgr_gem->exec_objects[index].offset = 0;
|
||||
bufmgr_gem->exec_bos[index] = bo;
|
||||
dri_gem_bo_reference_locked(bo);
|
||||
drm_intel_gem_bo_reference_locked(bo);
|
||||
bufmgr_gem->exec_count++;
|
||||
}
|
||||
|
||||
|
|
@ -251,27 +279,28 @@ intel_add_validate_buffer(dri_bo *bo)
|
|||
sizeof(uint32_t))
|
||||
|
||||
static int
|
||||
intel_setup_reloc_list(dri_bo *bo)
|
||||
drm_intel_setup_reloc_list(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
|
||||
bo_gem->relocs = malloc(bufmgr_gem->max_relocs *
|
||||
sizeof(struct drm_i915_gem_relocation_entry));
|
||||
bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs * sizeof(dri_bo *));
|
||||
bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs *
|
||||
sizeof(drm_intel_bo *));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static dri_bo *
|
||||
dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment)
|
||||
static drm_intel_bo *
|
||||
drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
|
||||
dri_bo_gem *bo_gem;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
|
||||
drm_intel_bo_gem *bo_gem;
|
||||
unsigned int page_size = getpagesize();
|
||||
int ret;
|
||||
struct dri_gem_bo_bucket *bucket;
|
||||
struct drm_intel_gem_bo_bucket *bucket;
|
||||
int alloc_from_cache = 0;
|
||||
unsigned long bo_size;
|
||||
|
||||
|
|
@ -279,7 +308,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
|||
bo_size = 1 << logbase2(size);
|
||||
if (bo_size < page_size)
|
||||
bo_size = page_size;
|
||||
bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
|
||||
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
|
||||
|
||||
/* If we don't have caching at this size, don't actually round the
|
||||
* allocation up.
|
||||
|
|
@ -334,6 +363,8 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
|||
bo_gem->name = name;
|
||||
bo_gem->refcount = 1;
|
||||
bo_gem->validate_index = -1;
|
||||
bo_gem->reloc_tree_size = bo_gem->bo.size;
|
||||
bo_gem->used_as_reloc_target = 0;
|
||||
|
||||
DBG("bo_create: buf %d (%s) %ldb\n",
|
||||
bo_gem->gem_handle, bo_gem->name, size);
|
||||
|
|
@ -342,17 +373,17 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns a dri_bo wrapping the given buffer object handle.
|
||||
* Returns a drm_intel_bo wrapping the given buffer object handle.
|
||||
*
|
||||
* This can be used when one application needs to pass a buffer object
|
||||
* to another.
|
||||
*/
|
||||
dri_bo *
|
||||
intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
|
||||
unsigned int handle)
|
||||
drm_intel_bo *
|
||||
drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned int handle)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
|
||||
dri_bo_gem *bo_gem;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
|
||||
drm_intel_bo_gem *bo_gem;
|
||||
int ret;
|
||||
struct drm_gem_open open_arg;
|
||||
|
||||
|
|
@ -385,10 +416,10 @@ intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
|
|||
}
|
||||
|
||||
static void
|
||||
dri_gem_bo_reference(dri_bo *bo)
|
||||
drm_intel_gem_bo_reference(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_gem->lock);
|
||||
bo_gem->refcount++;
|
||||
|
|
@ -398,16 +429,24 @@ dri_gem_bo_reference(dri_bo *bo)
|
|||
static void
|
||||
dri_gem_bo_reference_locked(dri_bo *bo)
|
||||
{
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
|
||||
bo_gem->refcount++;
|
||||
}
|
||||
|
||||
static void
|
||||
dri_gem_bo_free(dri_bo *bo)
|
||||
drm_intel_gem_bo_reference_locked(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
|
||||
bo_gem->refcount++;
|
||||
}
|
||||
|
||||
static void
|
||||
drm_intel_gem_bo_free(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_gem_close close;
|
||||
int ret;
|
||||
|
||||
|
|
@ -426,20 +465,20 @@ dri_gem_bo_free(dri_bo *bo)
|
|||
}
|
||||
|
||||
static void
|
||||
dri_gem_bo_unreference_locked(dri_bo *bo)
|
||||
drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
|
||||
if (--bo_gem->refcount == 0) {
|
||||
struct dri_gem_bo_bucket *bucket;
|
||||
struct drm_intel_gem_bo_bucket *bucket;
|
||||
|
||||
if (bo_gem->relocs != NULL) {
|
||||
int i;
|
||||
|
||||
/* Unreference all the target buffers */
|
||||
for (i = 0; i < bo_gem->reloc_count; i++)
|
||||
dri_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
|
||||
drm_intel_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
|
||||
free(bo_gem->reloc_target_bo);
|
||||
free(bo_gem->relocs);
|
||||
}
|
||||
|
|
@ -447,7 +486,7 @@ dri_gem_bo_unreference_locked(dri_bo *bo)
|
|||
DBG("bo_unreference final: %d (%s)\n",
|
||||
bo_gem->gem_handle, bo_gem->name);
|
||||
|
||||
bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
|
||||
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
|
||||
/* Put the buffer into our internal cache for reuse if we can. */
|
||||
if (bucket != NULL &&
|
||||
(bucket->max_entries == -1 ||
|
||||
|
|
@ -465,26 +504,26 @@ dri_gem_bo_unreference_locked(dri_bo *bo)
|
|||
bucket->tail = &bo_gem->next;
|
||||
bucket->num_entries++;
|
||||
} else {
|
||||
dri_gem_bo_free(bo);
|
||||
drm_intel_gem_bo_free(bo);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dri_gem_bo_unreference(dri_bo *bo)
|
||||
drm_intel_gem_bo_unreference(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_gem->lock);
|
||||
dri_gem_bo_unreference_locked(bo);
|
||||
drm_intel_gem_bo_unreference_locked(bo);
|
||||
pthread_mutex_unlock(&bufmgr_gem->lock);
|
||||
}
|
||||
|
||||
static int
|
||||
dri_gem_bo_map(dri_bo *bo, int write_enable)
|
||||
drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
int ret;
|
||||
|
||||
|
|
@ -543,11 +582,89 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
dri_gem_bo_unmap(dri_bo *bo)
|
||||
int
|
||||
drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
int ret;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_gem->lock);
|
||||
|
||||
/* Allow recursive mapping. Mesa may recursively map buffers with
|
||||
* nested display loops.
|
||||
*/
|
||||
if (!bo_gem->mapped) {
|
||||
|
||||
assert(bo->virtual == NULL);
|
||||
|
||||
DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
|
||||
|
||||
if (bo_gem->virtual == NULL) {
|
||||
struct drm_i915_gem_mmap_gtt mmap_arg;
|
||||
|
||||
memset(&mmap_arg, 0, sizeof(mmap_arg));
|
||||
mmap_arg.handle = bo_gem->gem_handle;
|
||||
|
||||
/* Get the fake offset back... */
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
|
||||
&mmap_arg);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr,
|
||||
"%s:%d: Error preparing buffer map %d (%s): %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
bo_gem->gem_handle, bo_gem->name,
|
||||
strerror(errno));
|
||||
pthread_mutex_unlock(&bufmgr_gem->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* and mmap it */
|
||||
bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, bufmgr_gem->fd,
|
||||
mmap_arg.offset);
|
||||
if (bo_gem->virtual == MAP_FAILED) {
|
||||
fprintf(stderr,
|
||||
"%s:%d: Error mapping buffer %d (%s): %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
bo_gem->gem_handle, bo_gem->name,
|
||||
strerror(errno));
|
||||
pthread_mutex_unlock(&bufmgr_gem->lock);
|
||||
return errno;
|
||||
}
|
||||
}
|
||||
|
||||
bo->virtual = bo_gem->virtual;
|
||||
bo_gem->mapped = 1;
|
||||
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
|
||||
bo_gem->virtual);
|
||||
}
|
||||
|
||||
/* Now move it to the GTT domain so that the CPU caches are flushed */
|
||||
set_domain.handle = bo_gem->gem_handle;
|
||||
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
|
||||
set_domain.write_domain = I915_GEM_DOMAIN_GTT;
|
||||
do {
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
|
||||
&set_domain);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
|
||||
__FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&bufmgr_gem->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
drm_intel_gem_bo_unmap(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_i915_gem_sw_finish sw_finish;
|
||||
int ret;
|
||||
|
||||
|
|
@ -570,11 +687,11 @@ dri_gem_bo_unmap(dri_bo *bo)
|
|||
}
|
||||
|
||||
static int
|
||||
dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
|
||||
unsigned long size, const void *data)
|
||||
drm_intel_gem_bo_subdata (drm_intel_bo *bo, unsigned long offset,
|
||||
unsigned long size, const void *data)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_i915_gem_pwrite pwrite;
|
||||
int ret;
|
||||
|
||||
|
|
@ -596,11 +713,11 @@ dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
|
|||
}
|
||||
|
||||
static int
|
||||
dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
|
||||
unsigned long size, void *data)
|
||||
drm_intel_gem_bo_get_subdata (drm_intel_bo *bo, unsigned long offset,
|
||||
unsigned long size, void *data)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_i915_gem_pread pread;
|
||||
int ret;
|
||||
|
||||
|
|
@ -622,17 +739,19 @@ dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
|
|||
}
|
||||
|
||||
static void
|
||||
dri_gem_bo_wait_rendering(dri_bo *bo)
|
||||
drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
int ret;
|
||||
|
||||
set_domain.handle = bo_gem->gem_handle;
|
||||
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
|
||||
set_domain.write_domain = 0;
|
||||
ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
|
||||
do {
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
if (ret != 0) {
|
||||
fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
|
||||
__FILE__, __LINE__,
|
||||
|
|
@ -642,9 +761,9 @@ dri_gem_bo_wait_rendering(dri_bo *bo)
|
|||
}
|
||||
|
||||
static void
|
||||
dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
|
||||
drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
|
||||
int i;
|
||||
|
||||
free(bufmgr_gem->exec_objects);
|
||||
|
|
@ -653,9 +772,9 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
|
|||
pthread_mutex_destroy(&bufmgr_gem->lock);
|
||||
|
||||
/* Free any cached buffer objects we were going to reuse */
|
||||
for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
|
||||
struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
|
||||
dri_bo_gem *bo_gem;
|
||||
for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
|
||||
struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
|
||||
drm_intel_bo_gem *bo_gem;
|
||||
|
||||
while ((bo_gem = bucket->head) != NULL) {
|
||||
bucket->head = bo_gem->next;
|
||||
|
|
@ -663,7 +782,7 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
|
|||
bucket->tail = &bucket->head;
|
||||
bucket->num_entries--;
|
||||
|
||||
dri_gem_bo_free(&bo_gem->bo);
|
||||
drm_intel_gem_bo_free(&bo_gem->bo);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -680,18 +799,21 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
|
|||
* last known offset in target_bo.
|
||||
*/
|
||||
static int
|
||||
dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target_bo)
|
||||
drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
|
||||
drm_intel_bo *target_bo, uint32_t target_offset,
|
||||
uint32_t read_domains, uint32_t write_domain)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_gem->lock);
|
||||
|
||||
pthread_mutex_lock(&bufmgr_gem->lock);
|
||||
|
||||
/* Create a new relocation list if needed */
|
||||
if (bo_gem->relocs == NULL)
|
||||
intel_setup_reloc_list(bo);
|
||||
drm_intel_setup_reloc_list(bo);
|
||||
|
||||
/* Check overflow */
|
||||
assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
|
||||
|
|
@ -700,8 +822,17 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
|
|||
assert (offset <= bo->size - 4);
|
||||
assert ((write_domain & (write_domain-1)) == 0);
|
||||
|
||||
/* Make sure that we're not adding a reloc to something whose size has
|
||||
* already been accounted for.
|
||||
*/
|
||||
assert(!bo_gem->used_as_reloc_target);
|
||||
bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
|
||||
|
||||
/* Flag the target to disallow further relocations in it. */
|
||||
target_bo_gem->used_as_reloc_target = 1;
|
||||
|
||||
bo_gem->relocs[bo_gem->reloc_count].offset = offset;
|
||||
bo_gem->relocs[bo_gem->reloc_count].delta = delta;
|
||||
bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
|
||||
bo_gem->relocs[bo_gem->reloc_count].target_handle =
|
||||
target_bo_gem->gem_handle;
|
||||
bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
|
||||
|
|
@ -709,7 +840,7 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
|
|||
bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
|
||||
|
||||
bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
|
||||
dri_gem_bo_reference_locked(target_bo);
|
||||
drm_intel_gem_bo_reference_locked(target_bo);
|
||||
|
||||
bo_gem->reloc_count++;
|
||||
|
||||
|
|
@ -724,61 +855,61 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
|
|||
* index values into the validation list.
|
||||
*/
|
||||
static void
|
||||
dri_gem_bo_process_reloc(dri_bo *bo)
|
||||
drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
int i;
|
||||
|
||||
if (bo_gem->relocs == NULL)
|
||||
return;
|
||||
|
||||
for (i = 0; i < bo_gem->reloc_count; i++) {
|
||||
dri_bo *target_bo = bo_gem->reloc_target_bo[i];
|
||||
drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
|
||||
|
||||
/* Continue walking the tree depth-first. */
|
||||
dri_gem_bo_process_reloc(target_bo);
|
||||
drm_intel_gem_bo_process_reloc(target_bo);
|
||||
|
||||
/* Add the target to the validate list */
|
||||
intel_add_validate_buffer(target_bo);
|
||||
drm_intel_add_validate_buffer(target_bo);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
|
||||
drm_intel_update_buffer_offsets (drm_intel_bufmgr_gem *bufmgr_gem)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bufmgr_gem->exec_count; i++) {
|
||||
dri_bo *bo = bufmgr_gem->exec_bos[i];
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
|
||||
/* Update the buffer offset */
|
||||
if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
|
||||
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
|
||||
bo_gem->gem_handle, bo_gem->name, bo->offset,
|
||||
bufmgr_gem->exec_objects[i].offset);
|
||||
(unsigned long long)bufmgr_gem->exec_objects[i].offset);
|
||||
bo->offset = bufmgr_gem->exec_objects[i].offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
dri_gem_bo_exec(dri_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
struct drm_i915_gem_execbuffer execbuf;
|
||||
int ret, i;
|
||||
|
||||
pthread_mutex_lock(&bufmgr_gem->lock);
|
||||
/* Update indices and set up the validate list. */
|
||||
dri_gem_bo_process_reloc(bo);
|
||||
drm_intel_gem_bo_process_reloc(bo);
|
||||
|
||||
/* Add the batch buffer to the validation list. There are no relocations
|
||||
* pointing to it.
|
||||
*/
|
||||
intel_add_validate_buffer(bo);
|
||||
drm_intel_add_validate_buffer(bo);
|
||||
|
||||
execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
|
||||
execbuf.buffer_count = bufmgr_gem->exec_count;
|
||||
|
|
@ -791,23 +922,23 @@ dri_gem_bo_exec(dri_bo *bo, int used,
|
|||
|
||||
do {
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER, &execbuf);
|
||||
} while (ret == -EAGAIN);
|
||||
} while (ret != 0 && errno == EAGAIN);
|
||||
|
||||
intel_update_buffer_offsets (bufmgr_gem);
|
||||
drm_intel_update_buffer_offsets (bufmgr_gem);
|
||||
|
||||
if (bufmgr_gem->bufmgr.debug)
|
||||
dri_gem_dump_validation_list(bufmgr_gem);
|
||||
drm_intel_gem_dump_validation_list(bufmgr_gem);
|
||||
|
||||
for (i = 0; i < bufmgr_gem->exec_count; i++) {
|
||||
dri_bo *bo = bufmgr_gem->exec_bos[i];
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
|
||||
/* Need to call swrast on next bo_map */
|
||||
bo_gem->swrast = 0;
|
||||
|
||||
/* Disconnect the buffer from the validate list */
|
||||
bo_gem->validate_index = -1;
|
||||
dri_gem_bo_unreference_locked(bo);
|
||||
drm_intel_gem_bo_unreference_locked(bo);
|
||||
bufmgr_gem->exec_bos[i] = NULL;
|
||||
}
|
||||
bufmgr_gem->exec_count = 0;
|
||||
|
|
@ -817,10 +948,10 @@ dri_gem_bo_exec(dri_bo *bo, int used,
|
|||
}
|
||||
|
||||
static int
|
||||
dri_gem_bo_pin(dri_bo *bo, uint32_t alignment)
|
||||
drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_i915_gem_pin pin;
|
||||
int ret;
|
||||
|
||||
|
|
@ -836,10 +967,10 @@ dri_gem_bo_pin(dri_bo *bo, uint32_t alignment)
|
|||
}
|
||||
|
||||
static int
|
||||
dri_gem_bo_unpin(dri_bo *bo)
|
||||
drm_intel_gem_bo_unpin(drm_intel_bo *bo)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_i915_gem_unpin unpin;
|
||||
int ret;
|
||||
|
||||
|
|
@ -853,15 +984,17 @@ dri_gem_bo_unpin(dri_bo *bo)
|
|||
}
|
||||
|
||||
static int
|
||||
dri_gem_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
|
||||
drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
|
||||
uint32_t stride)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_i915_gem_set_tiling set_tiling;
|
||||
int ret;
|
||||
|
||||
set_tiling.handle = bo_gem->gem_handle;
|
||||
set_tiling.tiling_mode = *tiling_mode;
|
||||
set_tiling.stride = stride;
|
||||
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
|
||||
if (ret != 0) {
|
||||
|
|
@ -874,10 +1007,33 @@ dri_gem_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
|
|||
}
|
||||
|
||||
static int
|
||||
dri_gem_bo_flink(dri_bo *bo, uint32_t *name)
|
||||
drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
|
||||
uint32_t *swizzle_mode)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
|
||||
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_i915_gem_get_tiling get_tiling;
|
||||
int ret;
|
||||
|
||||
get_tiling.handle = bo_gem->gem_handle;
|
||||
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
|
||||
if (ret != 0) {
|
||||
*tiling_mode = I915_TILING_NONE;
|
||||
*swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
|
||||
return -errno;
|
||||
}
|
||||
|
||||
*tiling_mode = get_tiling.tiling_mode;
|
||||
*swizzle_mode = get_tiling.swizzle_mode;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t *name)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
struct drm_gem_flink flink;
|
||||
int ret;
|
||||
|
||||
|
|
@ -902,23 +1058,106 @@ dri_gem_bo_flink(dri_bo *bo, uint32_t *name)
|
|||
* in flight at once.
|
||||
*/
|
||||
void
|
||||
intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr)
|
||||
drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
|
||||
for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
|
||||
bufmgr_gem->cache_bucket[i].max_entries = -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
/**
|
||||
* Return the additional aperture space required by the tree of buffer objects
|
||||
* rooted at bo.
|
||||
*/
|
||||
static int
|
||||
dri_gem_check_aperture_space(dri_bo **bo_array, int count)
|
||||
drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
|
||||
{
|
||||
return 0;
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
int i;
|
||||
int total = 0;
|
||||
|
||||
if (bo == NULL || bo_gem->included_in_check_aperture)
|
||||
return 0;
|
||||
|
||||
total += bo->size;
|
||||
bo_gem->included_in_check_aperture = 1;
|
||||
|
||||
for (i = 0; i < bo_gem->reloc_count; i++)
|
||||
total += drm_intel_gem_bo_get_aperture_space(bo_gem->reloc_target_bo[i]);
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
|
||||
* for the next drm_intel_bufmgr_check_aperture_space() call.
|
||||
*/
|
||||
static void
|
||||
drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
|
||||
{
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
|
||||
int i;
|
||||
|
||||
if (bo == NULL || !bo_gem->included_in_check_aperture)
|
||||
return;
|
||||
|
||||
bo_gem->included_in_check_aperture = 0;
|
||||
|
||||
for (i = 0; i < bo_gem->reloc_count; i++)
|
||||
drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->reloc_target_bo[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return -1 if the batchbuffer should be flushed before attempting to
|
||||
* emit rendering referencing the buffers pointed to by bo_array.
|
||||
*
|
||||
* This is required because if we try to emit a batchbuffer with relocations
|
||||
* to a tree of buffers that won't simultaneously fit in the aperture,
|
||||
* the rendering will return an error at a point where the software is not
|
||||
* prepared to recover from it.
|
||||
*
|
||||
* However, we also want to emit the batchbuffer significantly before we reach
|
||||
* the limit, as a series of batchbuffers each of which references buffers
|
||||
* covering almost all of the aperture means that at each emit we end up
|
||||
* waiting to evict a buffer from the last rendering, and we get synchronous
|
||||
* performance. By emitting smaller batchbuffers, we eat some CPU overhead to
|
||||
* get better parallelism.
|
||||
*/
|
||||
static int
|
||||
drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
|
||||
{
|
||||
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo_array[0]->bufmgr;
|
||||
unsigned int total = 0;
|
||||
unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
|
||||
if (bo_gem != NULL)
|
||||
total += bo_gem->reloc_tree_size;
|
||||
}
|
||||
|
||||
if (total > threshold) {
|
||||
total = 0;
|
||||
for (i = 0; i < count; i++)
|
||||
total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
|
||||
}
|
||||
|
||||
if (total > bufmgr_gem->gtt_size * 3 / 4) {
|
||||
DBG("check_space: overflowed available aperture, %dkb vs %dkb\n",
|
||||
total / 1024, (int)bufmgr_gem->gtt_size / 1024);
|
||||
return -1;
|
||||
} else {
|
||||
DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024 ,
|
||||
(int)bufmgr_gem->gtt_size / 1024);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -927,11 +1166,12 @@ dri_gem_check_aperture_space(dri_bo **bo_array, int count)
|
|||
*
|
||||
* \param fd File descriptor of the opened DRM device.
|
||||
*/
|
||||
dri_bufmgr *
|
||||
intel_bufmgr_gem_init(int fd, int batch_size)
|
||||
drm_intel_bufmgr *
|
||||
drm_intel_bufmgr_gem_init(int fd, int batch_size)
|
||||
{
|
||||
dri_bufmgr_gem *bufmgr_gem;
|
||||
int i;
|
||||
drm_intel_bufmgr_gem *bufmgr_gem;
|
||||
struct drm_i915_gem_get_aperture aperture;
|
||||
int ret, i;
|
||||
|
||||
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
|
||||
bufmgr_gem->fd = fd;
|
||||
|
|
@ -941,6 +1181,19 @@ intel_bufmgr_gem_init(int fd, int batch_size)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
|
||||
|
||||
if (ret == 0)
|
||||
bufmgr_gem->gtt_size = aperture.aper_available_size;
|
||||
else {
|
||||
fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
|
||||
strerror(errno));
|
||||
bufmgr_gem->gtt_size = 128 * 1024 * 1024;
|
||||
fprintf(stderr, "Assuming %dkB available aperture size.\n"
|
||||
"May lead to reduced performance or incorrect rendering.\n",
|
||||
(int)bufmgr_gem->gtt_size / 1024);
|
||||
}
|
||||
|
||||
/* Let's go with one relocation per every 2 dwords (but round down a bit
|
||||
* since a power of two will mean an extra page allocation for the reloc
|
||||
* buffer).
|
||||
|
|
@ -949,25 +1202,26 @@ intel_bufmgr_gem_init(int fd, int batch_size)
|
|||
*/
|
||||
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
|
||||
|
||||
bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc;
|
||||
bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference;
|
||||
bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference;
|
||||
bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map;
|
||||
bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap;
|
||||
bufmgr_gem->bufmgr.bo_subdata = dri_gem_bo_subdata;
|
||||
bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata;
|
||||
bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering;
|
||||
bufmgr_gem->bufmgr.bo_emit_reloc = dri_gem_bo_emit_reloc;
|
||||
bufmgr_gem->bufmgr.bo_pin = dri_gem_bo_pin;
|
||||
bufmgr_gem->bufmgr.bo_unpin = dri_gem_bo_unpin;
|
||||
bufmgr_gem->bufmgr.bo_set_tiling = dri_gem_bo_set_tiling;
|
||||
bufmgr_gem->bufmgr.bo_flink = dri_gem_bo_flink;
|
||||
bufmgr_gem->bufmgr.bo_exec = dri_gem_bo_exec;
|
||||
bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
|
||||
bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
|
||||
bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
|
||||
bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
|
||||
bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
|
||||
bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
|
||||
bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
|
||||
bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
|
||||
bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
|
||||
bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
|
||||
bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
|
||||
bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
|
||||
bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
|
||||
bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
|
||||
bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
|
||||
bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
|
||||
bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
|
||||
bufmgr_gem->bufmgr.debug = 0;
|
||||
bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
|
||||
bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space;
|
||||
/* Initialize the linked lists for BO reuse cache. */
|
||||
for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++)
|
||||
for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++)
|
||||
bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
|
||||
|
||||
return &bufmgr_gem->bufmgr;
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@
|
|||
*
|
||||
* Contains public methods followed by private storage for the buffer manager.
|
||||
*/
|
||||
struct _dri_bufmgr {
|
||||
struct _drm_intel_bufmgr {
|
||||
/**
|
||||
* Allocate a buffer object.
|
||||
*
|
||||
|
|
@ -48,17 +48,17 @@ struct _dri_bufmgr {
|
|||
* bo_map() to be used by the CPU, and validated for use using bo_validate()
|
||||
* to be used from the graphics device.
|
||||
*/
|
||||
dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name,
|
||||
unsigned long size, unsigned int alignment);
|
||||
drm_intel_bo *(*bo_alloc)(drm_intel_bufmgr *bufmgr, const char *name,
|
||||
unsigned long size, unsigned int alignment);
|
||||
|
||||
/** Takes a reference on a buffer object */
|
||||
void (*bo_reference)(dri_bo *bo);
|
||||
void (*bo_reference)(drm_intel_bo *bo);
|
||||
|
||||
/**
|
||||
* Releases a reference on a buffer object, freeing the data if
|
||||
* rerefences remain.
|
||||
*/
|
||||
void (*bo_unreference)(dri_bo *bo);
|
||||
void (*bo_unreference)(drm_intel_bo *bo);
|
||||
|
||||
/**
|
||||
* Maps the buffer into userspace.
|
||||
|
|
@ -67,28 +67,28 @@ struct _dri_bufmgr {
|
|||
* buffer to complete, first. The resulting mapping is available at
|
||||
* buf->virtual.
|
||||
*/
|
||||
int (*bo_map)(dri_bo *buf, int write_enable);
|
||||
int (*bo_map)(drm_intel_bo *bo, int write_enable);
|
||||
|
||||
/** Reduces the refcount on the userspace mapping of the buffer object. */
|
||||
int (*bo_unmap)(dri_bo *buf);
|
||||
int (*bo_unmap)(drm_intel_bo *bo);
|
||||
|
||||
/**
|
||||
* Write data into an object.
|
||||
*
|
||||
* This is an optional function, if missing,
|
||||
* dri_bo will map/memcpy/unmap.
|
||||
* drm_intel_bo will map/memcpy/unmap.
|
||||
*/
|
||||
int (*bo_subdata) (dri_bo *buf, unsigned long offset,
|
||||
unsigned long size, const void *data);
|
||||
int (*bo_subdata)(drm_intel_bo *bo, unsigned long offset,
|
||||
unsigned long size, const void *data);
|
||||
|
||||
/**
|
||||
* Read data from an object
|
||||
*
|
||||
* This is an optional function, if missing,
|
||||
* dri_bo will map/memcpy/unmap.
|
||||
* drm_intel_bo will map/memcpy/unmap.
|
||||
*/
|
||||
int (*bo_get_subdata) (dri_bo *bo, unsigned long offset,
|
||||
unsigned long size, void *data);
|
||||
int (*bo_get_subdata)(drm_intel_bo *bo, unsigned long offset,
|
||||
unsigned long size, void *data);
|
||||
|
||||
/**
|
||||
* Waits for rendering to an object by the GPU to have completed.
|
||||
|
|
@ -96,12 +96,12 @@ struct _dri_bufmgr {
|
|||
* This is not required for any access to the BO by bo_map, bo_subdata, etc.
|
||||
* It is merely a way for the driver to implement glFinish.
|
||||
*/
|
||||
void (*bo_wait_rendering) (dri_bo *bo);
|
||||
void (*bo_wait_rendering)(drm_intel_bo *bo);
|
||||
|
||||
/**
|
||||
* Tears down the buffer manager instance.
|
||||
*/
|
||||
void (*destroy)(dri_bufmgr *bufmgr);
|
||||
void (*destroy)(drm_intel_bufmgr *bufmgr);
|
||||
|
||||
/**
|
||||
* Add relocation entry in reloc_buf, which will be updated with the
|
||||
|
|
@ -109,23 +109,23 @@ struct _dri_bufmgr {
|
|||
*
|
||||
* Relocations remain in place for the lifetime of the buffer object.
|
||||
*
|
||||
* \param reloc_buf Buffer to write the relocation into.
|
||||
* \param bo Buffer to write the relocation into.
|
||||
* \param offset Byte offset within reloc_bo of the pointer to target_bo.
|
||||
* \param target_bo Buffer whose offset should be written into the
|
||||
* relocation entry.
|
||||
* \param target_offset Constant value to be added to target_bo's offset in
|
||||
* relocation entry.
|
||||
* \param read_domains GEM read domains which the buffer will be read into
|
||||
* by the command that this relocation is part of.
|
||||
* \param write_domains GEM read domains which the buffer will be dirtied
|
||||
* in by the command that this relocation is part of.
|
||||
* \param delta Constant value to be added to the relocation target's
|
||||
* offset.
|
||||
* \param offset Byte offset within batch_buf of the relocated pointer.
|
||||
* \param target Buffer whose offset should be written into the relocation
|
||||
* entry.
|
||||
*/
|
||||
int (*bo_emit_reloc)(dri_bo *reloc_buf,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta, uint32_t offset, dri_bo *target);
|
||||
int (*bo_emit_reloc)(drm_intel_bo *bo, uint32_t offset,
|
||||
drm_intel_bo *target_bo, uint32_t target_offset,
|
||||
uint32_t read_domains, uint32_t write_domain);
|
||||
|
||||
/** Executes the command buffer pointed to by bo. */
|
||||
int (*bo_exec)(dri_bo *bo, int used,
|
||||
int (*bo_exec)(drm_intel_bo *bo, int used,
|
||||
drm_clip_rect_t *cliprects, int num_cliprects,
|
||||
int DR4);
|
||||
|
||||
|
|
@ -135,29 +135,39 @@ struct _dri_bufmgr {
|
|||
* \param buf Buffer to pin
|
||||
* \param alignment Required alignment for aperture, in bytes
|
||||
*/
|
||||
int (*bo_pin) (dri_bo *buf, uint32_t alignment);
|
||||
int (*bo_pin)(drm_intel_bo *bo, uint32_t alignment);
|
||||
/**
|
||||
* Unpin a buffer from the aperture, allowing it to be removed
|
||||
*
|
||||
* \param buf Buffer to unpin
|
||||
*/
|
||||
int (*bo_unpin) (dri_bo *buf);
|
||||
int (*bo_unpin)(drm_intel_bo *bo);
|
||||
/**
|
||||
* Ask that the buffer be placed in tiling mode
|
||||
*
|
||||
* \param buf Buffer to set tiling mode for
|
||||
* \param tiling_mode desired, and returned tiling mode
|
||||
*/
|
||||
int (*bo_set_tiling) (dri_bo *bo, uint32_t *tiling_mode);
|
||||
int (*bo_set_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode,
|
||||
uint32_t stride);
|
||||
/**
|
||||
* Get the current tiling (and resulting swizzling) mode for the bo.
|
||||
*
|
||||
* \param buf Buffer to get tiling mode for
|
||||
* \param tiling_mode returned tiling mode
|
||||
* \param swizzle_mode returned swizzling mode
|
||||
*/
|
||||
int (*bo_get_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode,
|
||||
uint32_t *swizzle_mode);
|
||||
/**
|
||||
* Create a visible name for a buffer which can be used by other apps
|
||||
*
|
||||
* \param buf Buffer to create a name for
|
||||
* \param name Returned name
|
||||
*/
|
||||
int (*bo_flink) (dri_bo *buf, uint32_t *name);
|
||||
int (*bo_flink)(drm_intel_bo *bo, uint32_t *name);
|
||||
|
||||
int (*check_aperture_space)(dri_bo **bo_array, int count);
|
||||
int (*check_aperture_space)(drm_intel_bo **bo_array, int count);
|
||||
int debug; /**< Enables verbose debugging printouts */
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -357,9 +357,24 @@ static inline int kobject_uevent_env(struct kobject *kobj,
|
|||
|
||||
|
||||
#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM))
|
||||
/*
|
||||
* pgd_offset_k() is a macro that uses the symbol init_mm,
|
||||
* check that it is available.
|
||||
*/
|
||||
# if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) || \
|
||||
defined(CONFIG_UNUSED_SYMBOLS))
|
||||
#define DRM_KMAP_ATOMIC_PROT_PFN
|
||||
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
|
||||
pgprot_t protection);
|
||||
# else
|
||||
#warning "init_mm is not available on this kernel!"
|
||||
static inline void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
|
||||
pgprot_t protection)
|
||||
{
|
||||
/* stub */
|
||||
return NULL;
|
||||
}
|
||||
# endif /* no init_mm */
|
||||
#endif
|
||||
|
||||
#if !defined(flush_agp_mappings)
|
||||
|
|
@ -409,4 +424,17 @@ extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27))
|
||||
#define set_page_locked SetPageLocked
|
||||
#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
|
||||
/*
|
||||
* The kernel provides __set_page_locked, which uses the non-atomic
|
||||
* __set_bit function. Let's use the atomic set_bit just in case.
|
||||
*/
|
||||
static inline void set_page_locked(struct page *page)
|
||||
{
|
||||
set_bit(PG_locked, &page->flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -227,11 +227,7 @@ nouveau_sgdma_init(struct drm_device *dev)
|
|||
|
||||
dev_priv->gart_info.sg_dummy_page =
|
||||
alloc_page(GFP_KERNEL|__GFP_DMA32);
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
|
||||
set_page_locked(dev_priv->gart_info.sg_dummy_page);
|
||||
#else
|
||||
SetPageLocked(dev_priv->gart_info.sg_dummy_page);
|
||||
#endif
|
||||
dev_priv->gart_info.sg_dummy_bus =
|
||||
pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
|
|
|
|||
1
linux-core/nv50_grctx.h
Symbolic link
1
linux-core/nv50_grctx.h
Symbolic link
|
|
@ -0,0 +1 @@
|
|||
../shared-core/nv50_grctx.h
|
||||
|
|
@ -237,6 +237,7 @@ enum drm_map_type {
|
|||
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
|
||||
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
|
||||
_DRM_TTM = 6,
|
||||
_DRM_GEM = 7,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -83,18 +83,18 @@
|
|||
0x1002 0x5460 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X300 M22"
|
||||
0x1002 0x5462 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 SE M24C"
|
||||
0x1002 0x5464 CHIP_RV380|RADEON_IS_MOBILITY "ATI FireGL M22 GL 5464"
|
||||
0x1002 0x5548 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800"
|
||||
0x1002 0x5549 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 Pro"
|
||||
0x1002 0x554A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT PE"
|
||||
0x1002 0x554B CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 SE"
|
||||
0x1002 0x554C CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 XTP"
|
||||
0x1002 0x554D CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 XL"
|
||||
0x1002 0x554E CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 SE"
|
||||
0x1002 0x554F CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800"
|
||||
0x1002 0x5550 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL V7100 R423"
|
||||
0x1002 0x5551 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL V5100 R423 UQ"
|
||||
0x1002 0x5552 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL unknown R423 UR"
|
||||
0x1002 0x5554 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL unknown R423 UT"
|
||||
0x1002 0x5548 CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R423 X800"
|
||||
0x1002 0x5549 CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 Pro"
|
||||
0x1002 0x554A CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT PE"
|
||||
0x1002 0x554B CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 SE"
|
||||
0x1002 0x554C CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 XTP"
|
||||
0x1002 0x554D CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 XL"
|
||||
0x1002 0x554E CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 SE"
|
||||
0x1002 0x554F CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R430 X800"
|
||||
0x1002 0x5550 CHIP_R423|RADEON_NEW_MEMMAP "ATI FireGL V7100 R423"
|
||||
0x1002 0x5551 CHIP_R423|RADEON_NEW_MEMMAP "ATI FireGL V5100 R423 UQ"
|
||||
0x1002 0x5552 CHIP_R423|RADEON_NEW_MEMMAP "ATI FireGL unknown R423 UR"
|
||||
0x1002 0x5554 CHIP_R423|RADEON_NEW_MEMMAP "ATI FireGL unknown R423 UT"
|
||||
0x1002 0x564A CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5000 M26"
|
||||
0x1002 0x564B CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5000 M26"
|
||||
0x1002 0x564F CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 XL M26"
|
||||
|
|
@ -124,16 +124,16 @@
|
|||
0x1002 0x5b65 CHIP_RV380|RADEON_NEW_MEMMAP "ATI FireMV 2200 PCIE (RV370) 5B65"
|
||||
0x1002 0x5c61 CHIP_RV280|RADEON_IS_MOBILITY "ATI Radeon RV280 Mobility"
|
||||
0x1002 0x5c63 CHIP_RV280|RADEON_IS_MOBILITY "ATI Radeon RV280 Mobility"
|
||||
0x1002 0x5d48 CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X800 XT M28"
|
||||
0x1002 0x5d49 CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5100 M28"
|
||||
0x1002 0x5d4a CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X800 M28"
|
||||
0x1002 0x5d4c CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850"
|
||||
0x1002 0x5d4d CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 XT PE"
|
||||
0x1002 0x5d4e CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 SE"
|
||||
0x1002 0x5d4f CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 Pro"
|
||||
0x1002 0x5d50 CHIP_R420|RADEON_NEW_MEMMAP "ATI unknown Radeon / FireGL R480"
|
||||
0x1002 0x5d52 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 XT"
|
||||
0x1002 0x5d57 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT"
|
||||
0x1002 0x5d48 CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X800 XT M28"
|
||||
0x1002 0x5d49 CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5100 M28"
|
||||
0x1002 0x5d4a CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X800 M28"
|
||||
0x1002 0x5d4c CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R480 X850"
|
||||
0x1002 0x5d4d CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 XT PE"
|
||||
0x1002 0x5d4e CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 SE"
|
||||
0x1002 0x5d4f CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 Pro"
|
||||
0x1002 0x5d50 CHIP_R423|RADEON_NEW_MEMMAP "ATI unknown Radeon / FireGL R480"
|
||||
0x1002 0x5d52 CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 XT"
|
||||
0x1002 0x5d57 CHIP_R423|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT"
|
||||
0x1002 0x5e48 CHIP_RV410|RADEON_NEW_MEMMAP "ATI FireGL V5000 RV410"
|
||||
0x1002 0x5e4a CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 XT"
|
||||
0x1002 0x5e4b CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 Pro"
|
||||
|
|
@ -239,6 +239,10 @@
|
|||
0x1002 0x7835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon RS350 Mobility IGP"
|
||||
0x1002 0x791e CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART "ATI Radeon RS690 X1250 IGP"
|
||||
0x1002 0x791f CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART "ATI Radeon RS690 X1270 IGP"
|
||||
0x1002 0x796c CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART "ATI Radeon RS740 HD2100 IGP"
|
||||
0x1002 0x796d CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART "ATI Radeon RS740 HD2100 IGP"
|
||||
0x1002 0x796e CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART "ATI Radeon RS740 HD2100 IGP"
|
||||
0x1002 0x796f CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART "ATI Radeon RS740 HD2100 IGP"
|
||||
|
||||
[r128]
|
||||
0x1002 0x4c45 0 "ATI Rage 128 Mobility LE (PCI)"
|
||||
|
|
|
|||
|
|
@ -841,7 +841,11 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||
value = dev->pci_device;
|
||||
break;
|
||||
case I915_PARAM_HAS_GEM:
|
||||
#ifdef I915_HAVE_GEM
|
||||
value = 1;
|
||||
#else
|
||||
value = 0;
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown parameter %d\n", param->param);
|
||||
|
|
@ -1023,7 +1027,10 @@ struct drm_ioctl_desc i915_ioctls[] = {
|
|||
DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
|
||||
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
#ifdef I915_HAVE_BUFFER
|
||||
DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
|
||||
#endif
|
||||
#ifdef I915_HAVE_GEM
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
|
||||
|
|
|
|||
|
|
@ -192,6 +192,8 @@ typedef struct drm_i915_sarea {
|
|||
#define DRM_I915_GEM_SW_FINISH 0x20
|
||||
#define DRM_I915_GEM_SET_TILING 0x21
|
||||
#define DRM_I915_GEM_GET_TILING 0x22
|
||||
#define DRM_I915_GEM_GET_APERTURE 0x23
|
||||
#define DRM_I915_GEM_MMAP_GTT 0x24
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||
|
|
@ -223,10 +225,12 @@ typedef struct drm_i915_sarea {
|
|||
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
|
||||
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
|
||||
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
|
||||
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
|
||||
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
|
||||
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
|
||||
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
|
||||
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
|
||||
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
|
||||
|
||||
/* Asynchronous page flipping:
|
||||
*/
|
||||
|
|
@ -449,6 +453,18 @@ struct drm_i915_gem_mmap {
|
|||
uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
|
||||
};
|
||||
|
||||
struct drm_i915_gem_mmap_gtt {
|
||||
/** Handle for the object being mapped. */
|
||||
uint32_t handle;
|
||||
uint32_t pad;
|
||||
/**
|
||||
* Fake offset to use for subsequent mmap call
|
||||
*
|
||||
* This is a fixed-size type for 32/64 compatibility.
|
||||
*/
|
||||
uint64_t offset;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_set_domain {
|
||||
/** Handle for the object */
|
||||
uint32_t handle;
|
||||
|
|
@ -668,4 +684,15 @@ struct drm_i915_gem_get_tiling {
|
|||
uint32_t swizzle_mode;
|
||||
};
|
||||
|
||||
struct drm_i915_gem_get_aperture {
|
||||
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
|
||||
uint64_t aper_size;
|
||||
|
||||
/**
|
||||
* Available space in the aperture used by i915_gem_execbuffer, in
|
||||
* bytes
|
||||
*/
|
||||
uint64_t aper_available_size;
|
||||
};
|
||||
|
||||
#endif /* _I915_DRM_H_ */
|
||||
|
|
|
|||
|
|
@ -514,15 +514,37 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
|
|||
*/
|
||||
if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
|
||||
pipea_stats = I915_READ(PIPEASTAT);
|
||||
/* The vblank interrupt gets enabled even if we didn't ask for
|
||||
it, so make sure it's shut down again */
|
||||
if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
|
||||
pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
|
||||
PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
|
||||
PIPE_VBLANK_INTERRUPT_STATUS))
|
||||
{
|
||||
vblank++;
|
||||
drm_handle_vblank(dev, i915_get_plane(dev, 0));
|
||||
}
|
||||
|
||||
I915_WRITE(PIPEASTAT, pipea_stats);
|
||||
}
|
||||
|
||||
if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
|
||||
pipeb_stats = I915_READ(PIPEBSTAT);
|
||||
I915_WRITE(PIPEBSTAT, pipeb_stats);
|
||||
/* The vblank interrupt gets enabled even if we didn't ask for
|
||||
it, so make sure it's shut down again */
|
||||
if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
|
||||
pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
|
||||
PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
|
||||
PIPE_VBLANK_INTERRUPT_STATUS))
|
||||
{
|
||||
vblank++;
|
||||
drm_handle_vblank(dev, i915_get_plane(dev, 1));
|
||||
}
|
||||
}
|
||||
|
||||
I915_WRITE(IIR, iir);
|
||||
#ifdef __linux__
|
||||
if (dev->pdev->msi_enabled)
|
||||
I915_WRITE(IMR, dev_priv->irq_mask_reg);
|
||||
|
|
@ -1110,9 +1132,9 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
I915_WRITE16(HWSTAM, 0xeffe);
|
||||
I915_WRITE16(IMR, 0x0);
|
||||
I915_WRITE16(IER, 0x0);
|
||||
I915_WRITE(HWSTAM, 0xeffe);
|
||||
I915_WRITE(IMR, 0xffffffff);
|
||||
I915_WRITE(IER, 0x0);
|
||||
}
|
||||
|
||||
int i915_driver_irq_postinstall(struct drm_device * dev)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
9232
shared-core/nv50_grctx.h
Normal file
9232
shared-core/nv50_grctx.h
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -71,7 +71,8 @@ static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
|
|||
|
||||
static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
|
||||
{
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
|
||||
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
|
||||
return RS690_READ_MCIND(dev_priv, addr);
|
||||
else
|
||||
return RS480_READ_MCIND(dev_priv, addr);
|
||||
|
|
@ -99,7 +100,8 @@ u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
|
|||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
|
||||
return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
|
||||
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
|
||||
else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
|
||||
return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
|
||||
else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
|
||||
return RADEON_READ(R700_MC_VM_FB_LOCATION);
|
||||
|
|
@ -142,7 +144,8 @@ void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
|
|||
{
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
|
||||
R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
|
||||
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
|
||||
else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
|
||||
RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
|
||||
else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)
|
||||
RADEON_WRITE(R700_MC_VM_FB_LOCATION, fb_loc);
|
||||
|
|
@ -158,7 +161,8 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo
|
|||
{
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
|
||||
R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
|
||||
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
|
||||
else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
|
||||
RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
|
||||
else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) {
|
||||
RADEON_WRITE(R600_MC_VM_AGP_BOT, agp_loc);
|
||||
|
|
@ -177,7 +181,8 @@ static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
|
|||
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
|
||||
R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
|
||||
R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
|
||||
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
|
||||
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
|
||||
RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
|
||||
RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
|
||||
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
|
||||
|
|
@ -510,6 +515,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
|
|||
R300_cp_microcode[i][0]);
|
||||
}
|
||||
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
|
||||
DRM_INFO("Loading R400 Microcode\n");
|
||||
for (i = 0; i < 256; i++) {
|
||||
|
|
@ -518,8 +524,9 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
|
|||
RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
|
||||
R420_cp_microcode[i][0]);
|
||||
}
|
||||
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
|
||||
DRM_INFO("Loading RS690 Microcode\n");
|
||||
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
|
||||
DRM_INFO("Loading RS690/RS740 Microcode\n");
|
||||
for (i = 0; i < 256; i++) {
|
||||
RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
|
||||
RS690_cp_microcode[i][1]);
|
||||
|
|
@ -914,7 +921,8 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
|
|||
|
||||
temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
|
||||
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
|
||||
IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
|
||||
RS690_BLOCK_GFX_D3_EN));
|
||||
else
|
||||
|
|
@ -1007,6 +1015,7 @@ void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
|
|||
u32 tmp;
|
||||
|
||||
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
|
||||
(dev_priv->flags & RADEON_IS_IGPGART)) {
|
||||
radeon_set_igpgart(dev_priv, on);
|
||||
return;
|
||||
|
|
@ -2646,6 +2655,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
case CHIP_R300:
|
||||
case CHIP_R350:
|
||||
case CHIP_R420:
|
||||
case CHIP_R423:
|
||||
case CHIP_RV410:
|
||||
case CHIP_RV515:
|
||||
case CHIP_R520:
|
||||
|
|
|
|||
|
|
@ -598,12 +598,31 @@ int radeon_resume(struct drm_device *dev);
|
|||
# define RADEON_SCISSOR_1_ENABLE (1 << 29)
|
||||
# define RADEON_SCISSOR_2_ENABLE (1 << 30)
|
||||
|
||||
/*
|
||||
* PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx)
|
||||
* don't have an explicit bus mastering disable bit. It's handled
|
||||
* by the PCI D-states. PMI_BM_DIS disables D-state bus master
|
||||
* handling, not bus mastering itself.
|
||||
*/
|
||||
#define RADEON_BUS_CNTL 0x0030
|
||||
/* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
|
||||
# define RADEON_BUS_MASTER_DIS (1 << 6)
|
||||
/* rs600/rs690/rs740 */
|
||||
# define RS600_BUS_MASTER_DIS (1 << 14)
|
||||
# define RS600_MSI_REARM (1 << 20)
|
||||
/* see RS480_MSI_REARM in AIC_CNTL for rs480 */
|
||||
|
||||
#define RADEON_BUS_CNTL1 0x0034
|
||||
# define RADEON_PMI_BM_DIS (1 << 2)
|
||||
# define RADEON_PMI_INT_DIS (1 << 3)
|
||||
|
||||
#define RV370_BUS_CNTL 0x004c
|
||||
# define RV370_PMI_BM_DIS (1 << 5)
|
||||
# define RV370_PMI_INT_DIS (1 << 6)
|
||||
|
||||
#define RADEON_MSI_REARM_EN 0x0160
|
||||
/* rv370/rv380, rv410, r423/r430/r480, r5xx */
|
||||
# define RV370_MSI_REARM_EN (1 << 0)
|
||||
|
||||
#define RADEON_CLOCK_CNTL_DATA 0x000c
|
||||
# define RADEON_PLL_WR_EN (1 << 7)
|
||||
|
|
@ -1385,10 +1404,11 @@ do { \
|
|||
|
||||
#define IGP_WRITE_MCIND(addr, val) \
|
||||
do { \
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \
|
||||
RS690_WRITE_MCIND(addr, val); \
|
||||
else \
|
||||
RS480_WRITE_MCIND(addr, val); \
|
||||
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || \
|
||||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) \
|
||||
RS690_WRITE_MCIND( addr, val ); \
|
||||
else \
|
||||
RS480_WRITE_MCIND( addr, val ); \
|
||||
} while (0)
|
||||
|
||||
#define CP_PACKET0( reg, n ) \
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue