Expand the buffer manager to include a notion of multiple pools within

a memory space.  Allow some pools to be excluded from the upload/evict
processing, meaning that any buffers within those pools are effectively
fixed.  Add a mechanism to create buffers in those pools.

This allows the legacy fixed front/depth/back buffers to be represented
in this scheme and will allow other future pinned buffers to be allocated
from fixed pools in such a way that they cannot fragment the rest of
the texture memory.
This commit is contained in:
Keith Whitwell 2006-01-26 14:50:02 +00:00
parent 398cb30c72
commit 33ca04f379
9 changed files with 379 additions and 171 deletions

View file

@ -14,29 +14,54 @@ struct bm_buffer_list;
struct bufmgr *bm_fake_intel_Attach( struct intel_context *intel ); struct bufmgr *bm_fake_intel_Attach( struct intel_context *intel );
/* struct bufmgr *bmCreate( ... ); */ /* struct bufmgr *bmCreate( ... ); */
/* struct bufmgr *bmAttach( ... ); */ /* struct bufmgr *bmAttach( ... ); */
void bmInitPool( struct bufmgr *, /* Define an address space. Doesn't really do anything, but the
unsigned pool, * information could be used to validate the bmInitPool() requests.
unsigned long low_offset, */
unsigned long high_offset, void bmInitMemType( struct bufmgr *,
void *virtual_base ); unsigned mem_type,
unsigned long size );
/* Create a pool of a given memory type, from a certain offset and a
* certain size.
*
* Also passed in is a virtual pointer to the start of the pool. This
* is useful in the faked-out version in i915 so that MapBuffer can
* return a pointer to a buffer residing in AGP space.
*
* Flags passed into a pool are inherited by all buffers allocated in
* that pool. So pools representing the static front,back,depth
* buffer allocations should have MEM_AGP|NO_UPLOAD|NO_EVICT|NO_MOVE to match
* the behaviour of the legacy allocations.
*
* Returns -1 for failure, pool number for success.
*/
int bmInitPool( struct bufmgr *,
unsigned long low_offset,
void *low_virtual,
unsigned long size,
unsigned flags);
/* Flags for validate and other calls. If both NO_UPLOAD and NO_EVICT
* are specified, ValidateBuffers is essentially a query.
*/
#define BM_MEM_LOCAL 0x1 #define BM_MEM_LOCAL 0x1
#define BM_MEM_AGP 0x2 #define BM_MEM_AGP 0x2
#define BM_MEM_VRAM 0x4 /* not used */ #define BM_MEM_VRAM 0x4 /* not yet used */
#define BM_WRITE 0x8 /* not yet used */
#define BM_WRITE 0x100 /* not used */ #define BM_READ 0x10 /* not yet used */
#define BM_READ 0x200 /* not used */ #define BM_NO_UPLOAD 0x20
#define BM_NO_EVICT 0x40
#define BM_NO_MOVE 0x80 /* not yet used */
#define BM_NO_ALLOC 0x100 /* legacy "fixed" buffers only */
/* Flags for validate. If both NO_UPLOAD and NO_EVICT are specified, #define BM_MEM_MASK (BM_MEM_LOCAL|BM_MEM_AGP|BM_MEM_VRAM)
* ValidateBuffers is essentially a query.
*/
#define BM_NO_UPLOAD 0x1
#define BM_NO_EVICT 0x2
/* Stick closely to ARB_vbo semantics - they're well defined and /* Stick closely to ARB_vbo semantics - they're well defined and
@ -46,6 +71,18 @@ void bmInitPool( struct bufmgr *,
void bmGenBuffers(struct bufmgr *, unsigned n, unsigned *buffers); void bmGenBuffers(struct bufmgr *, unsigned n, unsigned *buffers);
void bmDeleteBuffers(struct bufmgr *, unsigned n, unsigned *buffers); void bmDeleteBuffers(struct bufmgr *, unsigned n, unsigned *buffers);
/* Hook to inform faked buffer manager about fixed-position
* front,depth,back buffers. These may move to a fully memory-managed
* scheme, or they may continue to be managed as is.
*/
unsigned bmBufferStatic(struct bufmgr *,
unsigned buffer,
unsigned size,
unsigned pool);
/* The driver has more intimate knowledge of the hardare than a GL /* The driver has more intimate knowledge of the hardare than a GL
* client would, so flags here is more proscriptive than the usage * client would, so flags here is more proscriptive than the usage
* values in the ARB_vbo interface: * values in the ARB_vbo interface:
@ -90,8 +127,7 @@ void bmUnmapBuffer( struct bufmgr *,
*/ */
struct bm_buffer_list *bmNewBufferList( void ); struct bm_buffer_list *bmNewBufferList( void );
void bmAddBuffer( struct bufmgr *, void bmAddBuffer( struct bm_buffer_list *list,
struct bm_buffer_list *list,
unsigned buffer, unsigned buffer,
unsigned flags, unsigned flags,
unsigned *pool_return, unsigned *pool_return,

View file

@ -22,6 +22,7 @@ struct _mesa_HashTable;
/* Maximum number of buffers to pass to bmValidateBufferList: /* Maximum number of buffers to pass to bmValidateBufferList:
*/ */
#define BM_LIST_MAX 32 #define BM_LIST_MAX 32
#define BM_POOL_MAX 8
/* Wrapper around mm.c's mem_block, which understands that you must /* Wrapper around mm.c's mem_block, which understands that you must
@ -32,7 +33,8 @@ struct _mesa_HashTable;
*/ */
struct block { struct block {
struct block *next, *prev; struct block *next, *prev;
int memType; int mem_type;
struct pool *pool; /* BM_MEM_AGP */
struct mem_block *mem; /* BM_MEM_AGP */ struct mem_block *mem; /* BM_MEM_AGP */
unsigned fence; /* BM_MEM_AGP, Split to read_fence, write_fence */ unsigned fence; /* BM_MEM_AGP, Split to read_fence, write_fence */
void *virtual; void *virtual;
@ -45,50 +47,56 @@ struct buffer {
unsigned size; unsigned size;
unsigned alignment; unsigned alignment;
unsigned mapped; unsigned mapped;
unsigned flags;
struct block *block; struct block *block;
}; };
struct pool { struct pool {
unsigned size; unsigned flags;
struct mem_block *heap; struct mem_block *heap;
void *virtual; void *virtual;
struct block lru; struct block lru;
struct block freed; struct block freed;
}; };
/* List of buffers to validate:
*/
struct bm_buffer_list {
struct buffer *buffer[BM_LIST_MAX];
unsigned *offset_return[BM_LIST_MAX];
unsigned nr;
unsigned need_fence;
};
struct bufmgr { struct bufmgr {
struct intel_context *intel; struct intel_context *intel;
struct buffer buffer_list; struct pool pool[BM_POOL_MAX];
struct pool pool; unsigned nr_pools;
struct _mesa_HashTable *hash; struct _mesa_HashTable *hash;
unsigned buf_nr; /* for generating ids */ unsigned buf_nr; /* for generating ids */
unsigned last_fence;
}; };
static struct block *alloc_agp( struct bufmgr *bm, /* List of buffers to validate:
unsigned size, */
unsigned align ) struct bm_buffer_list {
struct {
unsigned buffer;
unsigned *offset_return;
unsigned *memtype_return;
} elem[BM_LIST_MAX];
unsigned nr;
};
static struct block *alloc_from_pool( struct bufmgr *bm,
unsigned pool_nr,
unsigned size,
unsigned align )
{ {
struct pool *pool = &bm->pool[pool_nr];
struct block *block = (struct block *)calloc(sizeof *block, 1); struct block *block = (struct block *)calloc(sizeof *block, 1);
if (!block) if (!block)
return NULL; return NULL;
_mesa_printf("alloc_agp 0x%x\n", size); _mesa_printf("alloc_from_pool %d sz 0x%x\n", pool_nr, size);
block->mem = mmAllocMem(bm->pool.heap, size, align, 0); block->mem = mmAllocMem(pool->heap, size, align, 0);
if (!block->mem) { if (!block->mem) {
_mesa_printf("\t- failed\n"); _mesa_printf("\t- failed\n");
free(block); free(block);
@ -96,8 +104,9 @@ static struct block *alloc_agp( struct bufmgr *bm,
} }
make_empty_list(block); make_empty_list(block);
block->memType = BM_MEM_AGP; block->pool = pool;
block->virtual = bm->pool.virtual + block->mem->ofs; block->mem_type = pool->flags & BM_MEM_MASK;
block->virtual = pool->virtual + block->mem->ofs;
_mesa_printf("\t- offset 0x%x\n", block->mem->ofs); _mesa_printf("\t- offset 0x%x\n", block->mem->ofs);
return block; return block;
@ -112,7 +121,7 @@ static struct block *alloc_local( unsigned size )
_mesa_printf("alloc_local 0x%x\n", size); _mesa_printf("alloc_local 0x%x\n", size);
block->memType = BM_MEM_LOCAL; block->mem_type = BM_MEM_LOCAL;
block->virtual = malloc(size); block->virtual = malloc(size);
if (!block->virtual) { if (!block->virtual) {
free(block); free(block);
@ -123,30 +132,35 @@ static struct block *alloc_local( unsigned size )
} }
static struct block *alloc_block( struct bufmgr *bm, static struct block *alloc_block( struct bufmgr *bm,
unsigned size, unsigned size,
unsigned align, unsigned align,
int memType ) int flags )
{ {
switch (memType) { GLuint i;
case BM_MEM_AGP:
return alloc_agp(bm, size, align); for (i = 0; i < bm->nr_pools; i++) {
case BM_MEM_LOCAL: if (bm->pool[i].flags & BM_NO_ALLOC)
return alloc_local(size); continue;
default:
return NULL; if ((bm->pool[i].flags & flags & BM_MEM_MASK) == 0)
continue;
return alloc_from_pool(bm, i, size, align);
} }
if (flags & BM_MEM_LOCAL)
return alloc_local(size);
return NULL;
} }
static int bmAllocMem( struct bufmgr *bm, static int bmAllocMem( struct bufmgr *bm,
struct buffer *buf, struct buffer *buf )
unsigned flags ) /* unused */
{ {
if (buf->block == NULL) buf->block = alloc_block(bm, buf->size, 4, buf->flags);
buf->block = alloc_block(bm, buf->size, 4, BM_MEM_AGP);
if (buf->block == NULL)
buf->block = alloc_block(bm, buf->size, 4, BM_MEM_LOCAL);
if (buf->block) if (buf->block)
buf->block->buf = buf; buf->block->buf = buf;
@ -162,15 +176,16 @@ static void free_block( struct bufmgr *bm, struct block *block )
if (!block) if (!block)
return; return;
switch (block->memType) { switch (block->mem_type) {
case BM_MEM_AGP: case BM_MEM_AGP:
case BM_MEM_VRAM:
if (bmTestFence(bm, block->fence)) { if (bmTestFence(bm, block->fence)) {
mmFreeMem(block->mem); mmFreeMem(block->mem);
free(block); free(block);
} }
else { else {
block->buf = NULL; block->buf = NULL;
move_to_tail(&bm->pool.freed, block); move_to_tail(&block->pool->freed, block);
} }
break; break;
@ -189,13 +204,16 @@ static int delayed_free( struct bufmgr *bm )
{ {
struct block *block, *tmp; struct block *block, *tmp;
int ret = 0; int ret = 0;
int i;
foreach_s(block, tmp, &bm->pool.freed ) { for (i = 0; i < bm->nr_pools; i++) {
if (bmTestFence(bm, block->fence)) { foreach_s(block, tmp, &bm->pool[i].freed ) {
ret += block->mem->size; if (bmTestFence(bm, block->fence)) {
remove_from_list(block); ret += block->mem->size;
mmFreeMem(block->mem); remove_from_list(block);
free(block); mmFreeMem(block->mem);
free(block);
}
} }
} }
@ -206,7 +224,6 @@ static int delayed_free( struct bufmgr *bm )
static int move_buffers( struct bufmgr *bm, static int move_buffers( struct bufmgr *bm,
struct buffer *buffers[], struct buffer *buffers[],
int nr, int nr,
int newMemType,
int flags ) int flags )
{ {
struct block *newMem[BM_LIST_MAX]; struct block *newMem[BM_LIST_MAX];
@ -219,22 +236,20 @@ static int move_buffers( struct bufmgr *bm,
/* First do all the allocations (or fail): /* First do all the allocations (or fail):
*/ */
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
if (buffers[i]->block->memType != newMemType) { if (!(buffers[i]->block->mem_type & flags)) {
if (flags & BM_NO_UPLOAD) if (flags & BM_NO_UPLOAD)
goto cleanup; goto cleanup;
_mesa_printf("try to move buffer size 0x%x to pool %d\n", _mesa_printf("try to move buffer %d size 0x%x to pools 0x%x\n",
buffers[i]->size, newMemType); buffers[i]->id, buffers[i]->size, flags & BM_MEM_MASK);
newMem[i] = alloc_block(bm, newMem[i] = alloc_block(bm,
buffers[i]->size, buffers[i]->size,
buffers[i]->alignment, buffers[i]->alignment,
newMemType); flags & BM_MEM_MASK);
if (!newMem[i]) if (!newMem[i])
goto cleanup; goto cleanup;
newMem[i]->buf = buffers[i];
} }
} }
@ -243,12 +258,15 @@ static int move_buffers( struct bufmgr *bm,
*/ */
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
if (newMem[i]) { if (newMem[i]) {
/* XXX: To be replaced with DMA, GTT bind, and other
* mechanisms in final version. Memcpy (or sse_memcpy) is
* probably pretty good for local->agp uploads.
*/
memcpy(newMem[i]->virtual, memcpy(newMem[i]->virtual,
buffers[i]->block->virtual, buffers[i]->block->virtual,
buffers[i]->size); buffers[i]->size);
free_block(bm, buffers[i]->block); free_block(bm, buffers[i]->block);
buffers[i]->block = newMem[i]; buffers[i]->block = newMem[i];
buffers[i]->block->buf = buffers[i]; buffers[i]->block->buf = buffers[i];
} }
@ -256,7 +274,7 @@ static int move_buffers( struct bufmgr *bm,
/* Tell hardware that its texture and other caches may be invalid: /* Tell hardware that its texture and other caches may be invalid:
*/ */
if (nr) if (nr && (flags & (BM_MEM_AGP|BM_MEM_VRAM)))
bmFlushReadCaches(bm); bmFlushReadCaches(bm);
_mesa_printf("%s - success\n", __FUNCTION__); _mesa_printf("%s - success\n", __FUNCTION__);
@ -275,26 +293,36 @@ static int move_buffers( struct bufmgr *bm,
} }
static unsigned evict_lru( struct bufmgr *bm ) static unsigned evict_lru( struct bufmgr *bm,
unsigned flags)
{ {
int ret; int i;
_mesa_printf("%s\n", __FUNCTION__); _mesa_printf("%s\n", __FUNCTION__);
ret = delayed_free(bm); if (flags & BM_NO_EVICT)
if (ret) return 0;
return ret;
else {
struct block *block = bm->pool.lru.next;
unsigned size = block->buf->size;
if (block == &bm->pool.lru || /* XXX: this is broken with >1 active pool - all the first pool
!bmTestFence(bm, block->fence)) * will be evicted before starting on the second. Actually, maybe
return 0; * you want that in some situations...
*/
for (i = 0; i < bm->nr_pools; i++) {
if ((bm->pool[i].flags & flags & BM_MEM_MASK) &&
!(bm->pool[i].flags & BM_NO_EVICT)) {
struct block *block = bm->pool[i].lru.next;
unsigned size = block->buf->size;
move_buffers(bm, &block->buf, 1, BM_MEM_LOCAL, 0); if (block == &bm->pool[i].lru ||
return size; !bmTestFence(bm, block->fence))
return 0;
move_buffers(bm, &block->buf, 1, BM_MEM_LOCAL);
return size;
}
} }
return 0;
} }
#if 0 #if 0
@ -359,27 +387,43 @@ struct bufmgr *bm_fake_intel_Attach( struct intel_context *intel )
} }
void bmInitMemType( struct bufmgr *bm,
unsigned mem_type,
unsigned long size )
{
/* Nothing really to do. Could store and use to validate
* bmInitPool requests.
*/
}
/* The virtual pointer would go away in a true implementation. /* The virtual pointer would go away in a true implementation.
*/ */
void bmInitPool( struct bufmgr *bm, int bmInitPool( struct bufmgr *bm,
unsigned pool, unsigned long low_offset,
unsigned long low_offset, void *low_virtual,
unsigned long high_offset, unsigned long size,
void *virtual_base ) unsigned flags)
{ {
if (pool > 0 || low_offset >= high_offset) GLuint i;
return;
_mesa_printf("bmInitPool %d %x..%x\n", if (bm->nr_pools >= BM_POOL_MAX)
pool, low_offset, high_offset); return -1;
bm->pool.size = high_offset - low_offset; i = bm->nr_pools++;
bm->pool.heap = mmInit( low_offset, bm->pool.size );
bm->pool.virtual = virtual_base;
make_empty_list(&bm->pool.lru); _mesa_printf("bmInitPool %d low_offset %x sz %x\n",
make_empty_list(&bm->pool.freed); i, low_offset, size);
bm->pool[i].heap = mmInit( low_offset, size );
bm->pool[i].virtual = low_virtual - low_offset;
bm->pool[i].flags = flags;
make_empty_list(&bm->pool[i].lru);
make_empty_list(&bm->pool[i].freed);
return i;
} }
@ -390,7 +434,10 @@ void bmGenBuffers(struct bufmgr *bm, unsigned n, unsigned *buffers)
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
struct buffer *buf = calloc(sizeof(*buf), 1); struct buffer *buf = calloc(sizeof(*buf), 1);
buffers[i] = buf->id = ++bm->buf_nr; buf->id = ++bm->buf_nr;
buf->alignment = 12; /* page-alignment to fit in with AGP swapping */
buf->flags = BM_MEM_AGP|BM_MEM_VRAM|BM_MEM_LOCAL;
buffers[i] = buf->id;
_mesa_HashInsert(bm->hash, buffers[i], buf); _mesa_HashInsert(bm->hash, buffers[i], buf);
} }
} }
@ -410,6 +457,54 @@ void bmDeleteBuffers(struct bufmgr *bm, unsigned n, unsigned *buffers)
} }
} }
/* Hook to inform faked buffer manager about fixed-position
* front,depth,back buffers. These may move to a fully memory-managed
* scheme, or they may continue to be managed as is. It will probably
* be useful to pass a fixed offset here one day.
*/
unsigned bmBufferStatic(struct bufmgr *bm,
unsigned buffer,
unsigned size,
unsigned pool )
{
struct buffer *buf = (struct buffer *)_mesa_HashLookup( bm->hash, buffer );
assert(!buf->block);
assert(bm->pool[pool].flags & BM_NO_EVICT);
assert(bm->pool[pool].flags & BM_NO_MOVE);
buf->size = size;
buf->flags = bm->pool[pool].flags;
buf->alignment = 0;
buf->block = alloc_from_pool(bm, pool, buf->size, buf->alignment);
if (!buf->block)
return 0;
buf->block->buf = buf;
return buf->block->mem->ofs;
}
#if 0
/* How wise/useful is this?
*/
void bmBufferSetParams( struct bufmgr *bm,
unsigned buffer,
unsigned flags,
unsigned alignment )
{
struct buffer *buf = (struct buffer *)_mesa_HashLookup( bm->hash, buffer );
assert(!buf->block);
buf->flags = flags;
buf->alignment = alignment;
}
#endif
/* If buffer size changes, create new buffer in local memory. /* If buffer size changes, create new buffer in local memory.
* Otherwise update in place. * Otherwise update in place.
*/ */
@ -424,7 +519,7 @@ void bmBufferData(struct bufmgr *bm,
_mesa_printf("bmBufferData %d sz 0x%x data: %p\n", buffer, size, data); _mesa_printf("bmBufferData %d sz 0x%x data: %p\n", buffer, size, data);
if (buf->block) { if (buf->block) {
if ((buf->block->memType == BM_MEM_AGP && !bmTestFence(bm, buf->block->fence)) || if ((buf->block->mem_type != BM_MEM_LOCAL && !bmTestFence(bm, buf->block->fence)) ||
(buf->size && buf->size != size) || (buf->size && buf->size != size) ||
(data == NULL)) { (data == NULL)) {
free_block(bm, buf->block); free_block(bm, buf->block);
@ -435,7 +530,7 @@ void bmBufferData(struct bufmgr *bm,
buf->size = size; buf->size = size;
if (data != NULL) { if (data != NULL) {
bmAllocMem(bm, buf, flags); bmAllocMem(bm, buf);
memcpy(buf->block->virtual, data, size); memcpy(buf->block->virtual, data, size);
} }
} }
@ -453,12 +548,13 @@ void bmBufferSubData(struct bufmgr *bm,
_mesa_printf("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buffer, offset, size); _mesa_printf("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buffer, offset, size);
if (buf->block == 0) if (buf->block == 0)
bmAllocMem(bm, buf, 0); bmAllocMem(bm, buf);
if (buf->block->memType == BM_MEM_AGP) if (buf->block->mem_type != BM_MEM_LOCAL)
bmFinishFence(bm, buf->block->fence); bmFinishFence(bm, buf->block->fence);
memcpy(buf->block->virtual + offset, data, size); if (size)
memcpy(buf->block->virtual + offset, data, size);
} }
@ -478,11 +574,11 @@ void *bmMapBuffer( struct bufmgr *bm,
buf->mapped = 1; buf->mapped = 1;
if (buf->block == 0) if (buf->block == 0)
bmAllocMem(bm, buf, 0); bmAllocMem(bm, buf);
/* Finish any outstanding operations to/from this memory: /* Finish any outstanding operations to/from this memory:
*/ */
if (buf->block->memType == BM_MEM_AGP) if (buf->block->mem_type != BM_MEM_LOCAL)
bmFinishFence(bm, buf->block->fence); bmFinishFence(bm, buf->block->fence);
return buf->block->virtual; return buf->block->virtual;
@ -525,32 +621,27 @@ struct bm_buffer_list *bmNewBufferList( void )
return list; return list;
} }
void bmAddBuffer( struct bufmgr *bm, void bmAddBuffer( struct bm_buffer_list *list,
struct bm_buffer_list *list,
unsigned buffer, unsigned buffer,
unsigned flags, unsigned flags,
unsigned *pool_return, unsigned *memtype_return,
unsigned *offset_return ) unsigned *offset_return )
{ {
assert(list->nr < BM_LIST_MAX); assert(list->nr < BM_LIST_MAX);
list->buffer[list->nr] = _mesa_HashLookup(bm->hash, buffer); list->elem[list->nr].buffer = buffer;
list->offset_return[list->nr] = offset_return; list->elem[list->nr].memtype_return = memtype_return;
list->elem[list->nr].offset_return = offset_return;
_mesa_printf("bmAddBuffer nr %d buf %d (%p/%d)\n", list->nr, buffer, _mesa_printf("bmAddBuffer nr %d buf %d\n",
list->buffer[list->nr], list->buffer[list->nr]->id); list->nr, buffer);
list->nr++; list->nr++;
if (pool_return)
*pool_return = 0;
} }
void bmFreeBufferList( struct bm_buffer_list *list ) void bmFreeBufferList( struct bm_buffer_list *list )
{ {
assert(!list->need_fence);
free(list); free(list);
} }
@ -568,45 +659,39 @@ int bmValidateBufferList( struct bufmgr *bm,
struct bm_buffer_list *list, struct bm_buffer_list *list,
unsigned flags ) unsigned flags )
{ {
struct buffer *bufs[BM_LIST_MAX];
unsigned i; unsigned i;
unsigned total = 0;
_mesa_printf("%s\n", __FUNCTION__); _mesa_printf("%s\n", __FUNCTION__);
if (list->nr > BM_LIST_MAX) if (list->nr > BM_LIST_MAX)
return 0; return 0;
for (i = 0; i < list->nr; i++) { for (i = 0; i < list->nr; i++)
assert(!list->buffer[i]->mapped); bufs[i] = _mesa_HashLookup(bm->hash, list->elem[i].buffer);
assert(list->buffer[i]->block);
total += list->buffer[i]->size;
}
/* Don't need to try allocation in this case:
*/
if (total > bm->pool.size)
return 0;
/* The old story: evict one texture after another until allocation /* The old story: evict one texture after another until allocation
* succeeds. This is a pretty poor strategy but really hard to do * succeeds. This is a pretty poor strategy but really hard to do
* better without more infrastucture... Which is coming - hooray! * better without more infrastucture... Which is coming - hooray!
*/ */
while (!move_buffers(bm, list->buffer, list->nr, BM_MEM_AGP, flags)) { while (!move_buffers(bm, bufs, list->nr, flags)) {
if ((flags & BM_NO_EVICT) || if (!delayed_free(bm) &&
!evict_lru(bm)) !evict_lru(bm, flags))
return 0; return 0;
} }
for (i = 0; i < list->nr; i++) { for (i = 0; i < list->nr; i++) {
_mesa_printf("%d: buf %d ofs 0x%x\n", _mesa_printf("%d: buf %d ofs 0x%x\n",
i, list->buffer[i]->id, i, bufs[i]->id, bufs[i]->block->mem->ofs);
list->buffer[i]->block->mem->ofs);
list->offset_return[i][0] = list->buffer[i]->block->mem->ofs; list->elem[i].offset_return[0] = bufs[i]->block->mem->ofs;
if (list->elem[i].memtype_return)
list->elem[i].memtype_return[0] = bufs[i]->block->mem_type;
} }
list->need_fence = 1;
return 1; return 1;
} }
@ -621,26 +706,22 @@ int bmValidateBufferList( struct bufmgr *bm,
*/ */
void bmFenceBufferList( struct bufmgr *bm, struct bm_buffer_list *list ) void bmFenceBufferList( struct bufmgr *bm, struct bm_buffer_list *list )
{ {
unsigned i;
_mesa_printf("%s (%d bufs)\n", __FUNCTION__, list->nr); _mesa_printf("%s (%d bufs)\n", __FUNCTION__, list->nr);
assert(list->need_fence); if (list->nr) {
list->need_fence = 0; unsigned i;
unsigned fence = bmSetFence( bm );
if (!list->nr) /* Move all buffers to head of resident list and set their fences
return; */
for (i = 0; i < list->nr; i++) {
struct buffer *buf = _mesa_HashLookup(bm->hash, list->elem[i].buffer);
bm->last_fence = bmSetFence( bm ); move_to_head(&buf->block->pool->lru, buf->block);
buf->block->fence = fence;
/* Move all buffers to head of resident list and set their fences }
*/
for (i = 0; i < list->nr; i++) {
assert(list->buffer[i]->block->buf == list->buffer[i]);
move_to_head(&bm->pool.lru, list->buffer[i]->block);
list->buffer[i]->block->fence = bm->last_fence;
} }
} }

View file

@ -42,6 +42,7 @@
#include "i915_reg.h" #include "i915_reg.h"
#include "bufmgr.h" #include "bufmgr.h"
#include "intel_regions.h"
/*************************************** /***************************************
* Mesa's Driver Functions * Mesa's Driver Functions
@ -103,6 +104,7 @@ GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
struct dd_function_table functions; struct dd_function_table functions;
i915ContextPtr i915 = (i915ContextPtr) CALLOC_STRUCT(i915_context); i915ContextPtr i915 = (i915ContextPtr) CALLOC_STRUCT(i915_context);
intelContextPtr intel = &i915->intel; intelContextPtr intel = &i915->intel;
intelScreenPrivate *intelScreen;
GLcontext *ctx = &intel->ctx; GLcontext *ctx = &intel->ctx;
if (!i915) return GL_FALSE; if (!i915) return GL_FALSE;
@ -126,12 +128,49 @@ GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
intel->bm = bm_fake_intel_Attach( intel ); intel->bm = bm_fake_intel_Attach( intel );
bmInitPool(intel->bm, bmInitPool(intel->bm,
0, intel->intelScreen->tex.offset, /* low offset */
0, /* low offset */ intel->intelScreen->tex.map, /* low virtual */
intel->intelScreen->tex.size, /* high offset */ intel->intelScreen->tex.size,
intel->intelScreen->tex.map); /* virtual base */ BM_MEM_AGP);
intelScreen = intel->intelScreen;
/* These are still static, but create regions for them.
*/
intel->front_region =
intel_region_create_static(intel,
BM_MEM_AGP,
intelScreen->front.offset,
intelScreen->front.map,
intelScreen->cpp,
intelScreen->front.pitch / intelScreen->cpp,
intelScreen->front.size / intelScreen->front.pitch);
intel->back_region =
intel_region_create_static(intel,
BM_MEM_AGP,
intelScreen->back.offset,
intelScreen->back.map,
intelScreen->cpp,
intelScreen->back.pitch / intelScreen->cpp,
intelScreen->back.size / intelScreen->back.pitch);
/* Still assuming front.cpp == depth.cpp
*/
intel->depth_region =
intel_region_create_static(intel,
BM_MEM_AGP,
intelScreen->depth.offset,
intelScreen->depth.map,
intelScreen->cpp,
intelScreen->depth.pitch / intelScreen->cpp,
intelScreen->depth.size / intelScreen->depth.pitch);
/* Advertise the full hardware capabilities. The new memory
* manager should cope much better with overload situations:
*/
ctx->Const.MaxTextureLevels = 11; ctx->Const.MaxTextureLevels = 11;
ctx->Const.Max3DTextureLevels = 8; ctx->Const.Max3DTextureLevels = 8;
ctx->Const.MaxCubeTextureLevels = 11; ctx->Const.MaxCubeTextureLevels = 11;
@ -143,14 +182,13 @@ GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
* instruction can translate to more than one HW instruction, so * instruction can translate to more than one HW instruction, so
* we'll still have to check and fallback each time. * we'll still have to check and fallback each time.
*/ */
ctx->Const.FragmentProgram.MaxNativeTemps = I915_MAX_TEMPORARY; ctx->Const.FragmentProgram.MaxNativeTemps = I915_MAX_TEMPORARY;
ctx->Const.FragmentProgram.MaxNativeAttribs = 11; /* 8 tex, 2 color, fog */ ctx->Const.FragmentProgram.MaxNativeAttribs = 11; /* 8 tex, 2 color, fog */
ctx->Const.FragmentProgram.MaxNativeParameters = I915_MAX_CONSTANT; ctx->Const.FragmentProgram.MaxNativeParameters = I915_MAX_CONSTANT;
ctx->Const.FragmentProgram.MaxNativeAluInstructions = I915_MAX_ALU_INSN; ctx->Const.FragmentProgram.MaxNativeAluInstructions = I915_MAX_ALU_INSN;
ctx->Const.FragmentProgram.MaxNativeTexInstructions = I915_MAX_TEX_INSN; ctx->Const.FragmentProgram.MaxNativeTexInstructions = I915_MAX_TEX_INSN;
ctx->Const.FragmentProgram.MaxNativeInstructions = (I915_MAX_ALU_INSN + ctx->Const.FragmentProgram.MaxNativeInstructions = (I915_MAX_ALU_INSN +
I915_MAX_TEX_INSN); I915_MAX_TEX_INSN);
ctx->Const.FragmentProgram.MaxNativeTexIndirections = I915_MAX_TEX_INDIRECT; ctx->Const.FragmentProgram.MaxNativeTexIndirections = I915_MAX_TEX_INDIRECT;
ctx->Const.FragmentProgram.MaxNativeAddressRegs = 0; /* I don't think we have one */ ctx->Const.FragmentProgram.MaxNativeAddressRegs = 0; /* I don't think we have one */

View file

@ -306,7 +306,6 @@ extern void i915_update_fog( GLcontext *ctx );
*/ */
extern void i915UpdateTextureState( intelContextPtr intel ); extern void i915UpdateTextureState( intelContextPtr intel );
extern void i915InitTextureFuncs( struct dd_function_table *functions ); extern void i915InitTextureFuncs( struct dd_function_table *functions );
extern intelTextureObjectPtr i915AllocTexObj( struct gl_texture_object *texObj );
/*====================================================================== /*======================================================================
* i915_metaops.c * i915_metaops.c

View file

@ -47,9 +47,9 @@
#define DV_PF_565 (2<<8) #define DV_PF_565 (2<<8)
#define DV_PF_8888 (3<<8) #define DV_PF_8888 (3<<8)
typedef struct intel_context intelContext; struct intel_region;
typedef struct intel_context *intelContextPtr; typedef struct intel_context *intelContextPtr;
typedef struct intel_texture_object *intelTextureObjectPtr;
typedef void (*intel_tri_func)(intelContextPtr, intelVertex *, intelVertex *, typedef void (*intel_tri_func)(intelContextPtr, intelVertex *, intelVertex *,
intelVertex *); intelVertex *);
@ -214,6 +214,11 @@ struct intel_context
char *verts; /* points to tnl->clipspace.vertex_buf */ char *verts; /* points to tnl->clipspace.vertex_buf */
struct intel_region *front_region;
struct intel_region *back_region;
struct intel_region *depth_region;
/* Fallback rasterization functions /* Fallback rasterization functions
*/ */
intel_point_func draw_point; intel_point_func draw_point;

View file

@ -102,6 +102,47 @@ void intel_region_release( struct intel_context *intel,
} }
struct intel_region *intel_region_create_static( struct intel_context *intel,
GLuint mem_type,
GLuint offset,
void *virtual,
GLuint cpp,
GLuint pitch,
GLuint height )
{
struct intel_region *region = calloc(sizeof(*region), 1);
GLuint size = cpp * pitch * height;
GLint pool;
_mesa_printf("%s\n", __FUNCTION__);
region->cpp = cpp;
region->pitch = pitch;
region->height = height; /* needed? */
region->refcount = 1;
/* Recipe for creating a static buffer - create a static pool with
* the right offset and size, generate a buffer and use a special
* call to bind it to all of the memory in that pool.
*/
pool = bmInitPool(intel->bm, offset, virtual, size,
(BM_MEM_AGP |
BM_NO_UPLOAD |
BM_NO_EVICT |
BM_NO_MOVE));
if (pool < 0) {
_mesa_printf("bmInitPool failed for static region\n");
exit(1);
}
bmGenBuffers(intel->bm, 1, &region->buffer);
bmBufferStatic(intel->bm, region->buffer, size, pool);
return region;
}
static void _mesa_copy_rect( GLubyte *dst, static void _mesa_copy_rect( GLubyte *dst,
GLuint cpp, GLuint cpp,
GLuint dst_pitch, GLuint dst_pitch,
@ -243,11 +284,10 @@ void intel_region_copy( struct intel_context *intel,
assert(src->cpp == dst->cpp); assert(src->cpp == dst->cpp);
LOCK_HARDWARE(intel); LOCK_HARDWARE(intel);
bmAddBuffer(intel->bm, list, dst->buffer, BM_WRITE, NULL, &dst_offset); bmAddBuffer(list, dst->buffer, BM_WRITE, NULL, &dst_offset);
bmAddBuffer(intel->bm, list, src->buffer, BM_READ, NULL, &src_offset); bmAddBuffer(list, src->buffer, BM_READ, NULL, &src_offset);
/* What I really want to do is query if both buffers are already /* Query if both buffers are already uploaded:
* uploaded:
*/ */
if (bmValidateBufferList(intel->bm, list, BM_NO_EVICT|BM_NO_UPLOAD)) { if (bmValidateBufferList(intel->bm, list, BM_NO_EVICT|BM_NO_UPLOAD)) {
intelEmitCopyBlitLocked(intel, intelEmitCopyBlitLocked(intel,
@ -293,7 +333,7 @@ void intel_region_fill( struct intel_context *intel,
_mesa_printf("%s\n", __FUNCTION__); _mesa_printf("%s\n", __FUNCTION__);
LOCK_HARDWARE(intel); LOCK_HARDWARE(intel);
bmAddBuffer(intel->bm, list, dst->buffer, BM_WRITE, NULL, &dst_offset); bmAddBuffer(list, dst->buffer, BM_WRITE, NULL, &dst_offset);
if (bmValidateBufferList(intel->bm, list, BM_NO_EVICT)) { if (bmValidateBufferList(intel->bm, list, BM_NO_EVICT)) {
intelEmitFillBlitLocked(intel, intelEmitFillBlitLocked(intel,

View file

@ -61,6 +61,14 @@ void intel_region_release(struct intel_context *intel,
struct intel_region *ib ); struct intel_region *ib );
struct intel_region *intel_region_create_static( struct intel_context *intel,
GLuint mem_type,
GLuint offset,
void *virtual,
GLuint cpp,
GLuint pitch,
GLuint height );
/* Map/unmap regions. This is refcounted also: /* Map/unmap regions. This is refcounted also:
*/ */
GLubyte *intel_region_map(struct intel_context *intel, GLubyte *intel_region_map(struct intel_context *intel,

View file

@ -32,6 +32,9 @@
#include "dri_util.h" #include "dri_util.h"
#include "xmlconfig.h" #include "xmlconfig.h"
/* XXX: change name or eliminate to avoid conflict with "struct
* intel_region"!!!
*/
typedef struct { typedef struct {
drm_handle_t handle; drm_handle_t handle;
drmSize size; /* region size in bytes */ drmSize size; /* region size in bytes */

View file

@ -209,7 +209,6 @@ void intel_add_texoffset_fixup( struct intel_context *intel,
intel_miptree_image_offset(intelObj->mt, 0, intelObj->firstLevel)); intel_miptree_image_offset(intelObj->mt, 0, intelObj->firstLevel));
#else #else
*ptr = (intelObj->textureOffset + *ptr = (intelObj->textureOffset +
intel->intelScreen->tex.offset +
intel_miptree_image_offset(intelObj->mt, 0, intelObj->firstLevel)); intel_miptree_image_offset(intelObj->mt, 0, intelObj->firstLevel));
#endif #endif
} }
@ -270,8 +269,7 @@ GLboolean intel_validate_buffers( struct intel_context *intel )
ok = intel_finalize_mipmap_tree( intel, i ); ok = intel_finalize_mipmap_tree( intel, i );
if (ok) { if (ok) {
bmAddBuffer(intel->bm, bmAddBuffer(intel->buffer_list,
intel->buffer_list,
intelObj->mt->region->buffer, intelObj->mt->region->buffer,
BM_READ, BM_READ,
NULL, NULL,
@ -280,7 +278,7 @@ GLboolean intel_validate_buffers( struct intel_context *intel )
} }
} }
ok = bmValidateBufferList(intel->bm, intel->buffer_list, 0); ok = bmValidateBufferList(intel->bm, intel->buffer_list, BM_MEM_AGP);
assert(ok); assert(ok);
return ok; return ok;
} }