Cell: after sending a batch, wait for a DMA completion signal.

This fixes sporadic rendering glitches.
Using a mailbox msg for now, until spe_mfcio_tag_status_read() or similar
is found to work.
This commit is contained in:
Brian 2008-01-14 16:39:26 -07:00
parent c28b112ce3
commit f20cb1d81b
4 changed files with 40 additions and 6 deletions

View file

@ -65,6 +65,8 @@
#define CELL_NUM_BATCH_BUFFERS 2
#define CELL_BATCH_BUFFER_SIZE 1024 /**< 16KB would be the max */
#define CELL_BATCH_FINISHED 0x1234 /**< mbox message */
/**
* Tell SPUs about the framebuffer size, location

View file

@ -43,16 +43,27 @@ cell_batch_flush(struct cell_context *cell)
assert(batch < CELL_NUM_BATCH_BUFFERS);
/*printf("cell_batch_dispatch: buf %u, size %u\n", batch, size);*/
/*
printf("cell_batch_dispatch: buf %u at %p, size %u\n",
batch, &cell->batch_buffer[batch][0], size);
*/
cmd_word = CELL_CMD_BATCH | (batch << 8) | (size << 16);
for (i = 0; i < cell->num_spus; i++) {
send_mbox_message(cell_global.spe_contexts[i], cmd_word);
}
/* XXX wait on DMA xfer of prev buffer to complete */
/* XXX wait for the DMX xfer to finish.
* Using mailboxes here is temporary.
* Ideally, we want to use a PPE-side DMA status check function...
*/
for (i = 0; i < cell->num_spus; i++) {
uint k = wait_mbox_message(cell_global.spe_contexts[i]);
assert(k == CELL_BATCH_FINISHED);
}
/* next buffer */
cell->cur_batch = (batch + 1) % CELL_NUM_BATCH_BUFFERS;
cell->batch_buffer_size[cell->cur_batch] = 0; /* empty */

View file

@ -46,7 +46,7 @@ helpful headers:
/opt/ibm/cell-sdk/prototype/sysroot/usr/include/libmisc.h
*/
static boolean Debug = TRUE;
static boolean Debug = FALSE;
struct spu_global spu;
@ -106,7 +106,7 @@ really_clear_tiles(uint surfaceIndex)
}
}
#if 01
#if 0
wait_on_mask(1 << TAG_SURFACE_CLEAR);
#endif
}
@ -165,6 +165,9 @@ cmd_clear_surface(const struct cell_command_clear_surface *clear)
#if 0
wait_on_mask(1 << TAG_SURFACE_CLEAR);
#endif
if (Debug)
printf("SPU %u: CLEAR SURF done\n", spu.init.id);
}
@ -222,8 +225,10 @@ cmd_render(const struct cell_command_render *render)
render->prim_type,
render->num_verts,
render->num_indexes);
/*
printf(" bound: %g, %g .. %g, %g\n",
render->xmin, render->ymin, render->xmax, render->ymax);
*/
}
ASSERT_ALIGN16(render->vertex_data);
@ -244,6 +249,7 @@ cmd_render(const struct cell_command_render *render)
render->index_data, render->vertex_data, vertex_bytes, index_bytes);
*/
ASSERT(vertex_bytes % 16 == 0);
/* get vertex data from main memory */
mfc_get(vertex_data, /* dest */
(unsigned int) render->vertex_data, /* src */
@ -252,6 +258,8 @@ cmd_render(const struct cell_command_render *render)
0, /* tid */
0 /* rid */);
ASSERT(index_bytes % 16 == 0);
/* get index data from main memory */
mfc_get(indexes, /* dest */
(unsigned int) render->index_data, /* src */
@ -330,6 +338,10 @@ cmd_render(const struct cell_command_render *render)
wait_on_mask(1 << TAG_WRITE_TILE_Z);
}
}
if (Debug)
printf("SPU %u: RENDER done\n",
spu.init.id);
}
@ -406,6 +418,9 @@ cmd_batch(uint opcode)
size = (size + 0xf) & ~0xf;
ASSERT(size % 16 == 0);
ASSERT((unsigned int) spu.init.batch_buffers[buf] % 16 == 0);
mfc_get(buffer, /* dest */
(unsigned int) spu.init.batch_buffers[buf], /* src */
size,
@ -414,6 +429,10 @@ cmd_batch(uint opcode)
0 /* rid */);
wait_on_mask(1 << TAG_BATCH_BUFFER);
/* send mbox message to indicate DMA completed */
/* XXX temporary */
spu_write_out_mbox(CELL_BATCH_FINISHED);
for (pos = 0; pos < usize; /* no incr */) {
switch (buffer[pos]) {
case CELL_CMD_FRAMEBUFFER:

View file

@ -77,9 +77,11 @@ put_tile(uint tx, uint ty, const uint *tile, int tag, int zBuf)
ASSERT(ty < spu.fb.height_tiles);
ASSERT_ALIGN16(tile);
/*
printf("put_tile: src: %p dst: 0x%x size: %d\n",
printf("SPU %u: put_tile: src: %p dst: 0x%x size: %d\n",
spu.init.id,
tile, (unsigned int) dst, bytesPerTile);
*/
mfc_put((void *) tile, /* src in local memory */
(unsigned int) dst, /* dst in main memory */
bytesPerTile,