Call kernel to update the sarea breadcrumb when we seem to be out of

batchbuffer memory.

Remove flushes from batchbuffer tails. They are not needed anymore
when we have the kernel sync flush mechanism.

Better kernel error checks.
This commit is contained in:
Thomas Hellström 2006-02-27 16:28:10 +00:00
parent bb409e88d8
commit c64a08c286
2 changed files with 28 additions and 12 deletions

View file

@ -350,7 +350,8 @@ static int move_buffers( struct bufmgr *bm,
drm_ttm_arg_t arg;
struct block *block, *last_block;
int ret;
drm_ttm_buf_arg_t *cur;
int size;
DBG("%s\n", __FUNCTION__);
@ -381,6 +382,7 @@ static int move_buffers( struct bufmgr *bm,
arg.num_bufs = 0;
last_block = NULL;
size = 0;
for (i = 0; i <nr; ++i) {
if (newMem[i] && newMem[i]->has_ttm) {
@ -396,6 +398,7 @@ static int move_buffers( struct bufmgr *bm,
arg.first = &block->drm_buf;
else
last_block->drm_buf.next = &block->drm_buf;
size += block->drm_buf.num_pages;
arg.num_bufs++;
last_block = block;
block->drm_buf.op = ((flags & BM_MEM_MASK) == BM_MEM_AGP) ?
@ -406,10 +409,19 @@ static int move_buffers( struct bufmgr *bm,
}
arg.op = ttm_bufs;
arg.do_fence = 0;
DBG("Num validated TTM bufs is %d\n", arg.num_bufs);
DBG("Num validated TTM bufs is %d pages %d\n", arg.num_bufs, size);
if (arg.num_bufs) {
ret = ioctl(bm->intel->driFd, DRM_IOCTL_TTM, &arg);
assert(ret == 0);
ret = ioctl(bm->intel->driFd, DRM_IOCTL_TTM, &arg);
assert(ret==0);
cur = arg.first;
for(i=0; i< arg.num_bufs; ++i) {
if (cur->ret) {
fprintf(stderr,"Kernel Error. Check dmesg.\n");
fflush(stderr);
assert(0);
}
cur = cur->next;
}
}
/*
@ -529,7 +541,7 @@ static void viaSwapOutWork( struct bufmgr *bm )
unsigned target;
if (bm->thrashing) {
target = 1*1024*1024;
target = 6*1024*1024;
}
else if (bmIsTexMemLow(bm)) {
target = 64*1024;
@ -975,7 +987,7 @@ int bmValidateBufferList( struct bufmgr *bm,
{
struct buffer *bufs[BM_LIST_MAX];
unsigned i;
int count;
DBG("%s\n", __FUNCTION__);
@ -992,8 +1004,13 @@ int bmValidateBufferList( struct bufmgr *bm,
* better without more infrastucture... Which is coming - hooray!
*/
while (!move_buffers(bm, bufs, list->nr, flags))
delayed_free(bm);
count = 0;
while (!move_buffers(bm, bufs, list->nr, flags)) {
delayed_free(bm);
if (count++ > 10) {
intelWaitIrq( bm->intel, bm->intel->sarea->last_dispatch + 1);
}
}
for (i = 0; i < list->nr; i++) {
if (bufs[i]->block->has_ttm > 1) {

View file

@ -169,8 +169,7 @@ static void do_flush_locked( struct intel_batchbuffer *batch,
bmUnmapBuffer(batch->bm, batch->buffer);
/* Fire the batch buffer, which was uploaded above:
*/
intel_batch_ioctl(batch->intel,
@ -195,13 +194,13 @@ GLuint intel_batchbuffer_flush( struct intel_batchbuffer *batch )
* performance drain that we would like to avoid.
*/
if (used & 4) {
((int *)batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *)batch->ptr)[0] = 0; /*intel->vtbl.flush_cmd();*/
((int *)batch->ptr)[1] = 0;
((int *)batch->ptr)[2] = MI_BATCH_BUFFER_END;
used += 12;
}
else {
((int *)batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *)batch->ptr)[0] = /* intel->vtbl.flush_cmd(); */
((int *)batch->ptr)[1] = MI_BATCH_BUFFER_END;
used += 8;
}