mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-05 05:18:08 +02:00
Merge branch 'upstream-gallium-0.1' into darktama-gallium-0.1
This commit is contained in:
commit
faa05d41c5
15 changed files with 338 additions and 206 deletions
|
|
@ -54,6 +54,9 @@
|
|||
assert((((unsigned long) (ptr)) & 0xf) == 0);
|
||||
|
||||
|
||||
/** round up value to next multiple of 4 */
|
||||
#define ROUNDUP4(k) (((k) + 0x3) & ~0x3)
|
||||
|
||||
/** round up value to next multiple of 16 */
|
||||
#define ROUNDUP16(k) (((k) + 0xf) & ~0xf)
|
||||
|
||||
|
|
@ -124,6 +127,8 @@ struct cell_command_render
|
|||
const void *vertex_data;
|
||||
const ushort *index_data;
|
||||
float xmin, ymin, xmax, ymax;
|
||||
boolean inline_indexes;
|
||||
boolean inline_verts;
|
||||
} ALIGN16_ATTRIB;
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -96,6 +96,15 @@ cell_batch_flush(struct cell_context *cell)
|
|||
}
|
||||
|
||||
|
||||
uint
|
||||
cell_batch_free_space(const struct cell_context *cell)
|
||||
{
|
||||
uint free = CELL_BATCH_BUFFER_SIZE
|
||||
- cell->batch_buffer_size[cell->cur_batch];
|
||||
return free;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \param cmd command to append
|
||||
* \param length command size in bytes
|
||||
|
|
@ -129,6 +138,8 @@ cell_batch_alloc(struct cell_context *cell, uint bytes)
|
|||
void *pos;
|
||||
uint size;
|
||||
|
||||
ASSERT(bytes % 4 == 0);
|
||||
|
||||
assert(cell->cur_batch >= 0);
|
||||
|
||||
size = cell->batch_buffer_size[cell->cur_batch];
|
||||
|
|
|
|||
|
|
@ -38,6 +38,9 @@ struct cell_context;
|
|||
extern void
|
||||
cell_batch_flush(struct cell_context *cell);
|
||||
|
||||
extern uint
|
||||
cell_batch_free_space(const struct cell_context *cell);
|
||||
|
||||
extern void
|
||||
cell_batch_append(struct cell_context *cell, const void *cmd, uint length);
|
||||
|
||||
|
|
|
|||
|
|
@ -39,6 +39,11 @@
|
|||
#include "pipe/draw/draw_vbuf.h"
|
||||
|
||||
|
||||
/** Allow prim indexes, verts to be inlined after RENDER command */
|
||||
#define ALLOW_INLINE_INDEXES 1
|
||||
#define ALLOW_INLINE_VERTS 1
|
||||
|
||||
|
||||
/**
|
||||
* Subclass of vbuf_render because we need a cell_context pointer in
|
||||
* a few places.
|
||||
|
|
@ -123,6 +128,10 @@ cell_vbuf_draw(struct vbuf_render *vbr,
|
|||
printf("cell_vbuf_draw() nr_indices = %u nr_verts = %u indexes = [%u %u %u ...]\n",
|
||||
nr_indices, nr_vertices,
|
||||
indices[0], indices[1], indices[2]);
|
||||
printf("ind space = %u, vert space = %u, space = %u\n",
|
||||
nr_indices * 2,
|
||||
nr_vertices * 4 * cell->vertex_info.size,
|
||||
cell_batch_free_space(cell));
|
||||
#endif
|
||||
|
||||
/* compute x/y bounding box */
|
||||
|
|
@ -145,23 +154,53 @@ cell_vbuf_draw(struct vbuf_render *vbr,
|
|||
|
||||
/* build/insert batch RENDER command */
|
||||
{
|
||||
const uint index_bytes = ROUNDUP4(nr_indices * 2);
|
||||
const uint vertex_bytes = nr_vertices * 4 * cell->vertex_info.size;
|
||||
|
||||
struct cell_command_render *render
|
||||
= (struct cell_command_render *)
|
||||
cell_batch_alloc(cell, sizeof(*render));
|
||||
render->opcode = CELL_CMD_RENDER;
|
||||
render->prim_type = cvbr->prim;
|
||||
render->num_verts = nr_vertices;
|
||||
render->vertex_size = 4 * cell->vertex_info.size;
|
||||
render->vertex_data = vertices;
|
||||
render->index_data = indices;
|
||||
|
||||
render->num_indexes = nr_indices;
|
||||
if (ALLOW_INLINE_INDEXES &&
|
||||
index_bytes <= cell_batch_free_space(cell)) {
|
||||
/* indices inlined, right after render cmd */
|
||||
void *dst = cell_batch_alloc(cell, index_bytes);
|
||||
memcpy(dst, indices, nr_indices * 2);
|
||||
render->inline_indexes = TRUE;
|
||||
render->index_data = NULL;
|
||||
}
|
||||
else {
|
||||
/* indices in separate buffer */
|
||||
render->inline_indexes = FALSE;
|
||||
render->index_data = indices;
|
||||
ASSERT_ALIGN16(render->index_data);
|
||||
}
|
||||
|
||||
render->vertex_size = 4 * cell->vertex_info.size;
|
||||
render->num_verts = nr_vertices;
|
||||
if (ALLOW_INLINE_VERTS &&
|
||||
render->inline_indexes &&
|
||||
vertex_bytes <= cell_batch_free_space(cell)) {
|
||||
/* vertex data inlined, after indices */
|
||||
void *dst = cell_batch_alloc(cell, vertex_bytes);
|
||||
memcpy(dst, vertices, vertex_bytes);
|
||||
render->inline_verts = TRUE;
|
||||
render->vertex_data = NULL;
|
||||
}
|
||||
else {
|
||||
render->inline_verts = FALSE;
|
||||
render->vertex_data = vertices;
|
||||
ASSERT_ALIGN16(render->vertex_data);
|
||||
}
|
||||
|
||||
|
||||
render->xmin = xmin;
|
||||
render->ymin = ymin;
|
||||
render->xmax = xmax;
|
||||
render->ymax = ymax;
|
||||
|
||||
ASSERT_ALIGN16(render->vertex_data);
|
||||
ASSERT_ALIGN16(render->index_data);
|
||||
}
|
||||
|
||||
#if 01
|
||||
|
|
|
|||
|
|
@ -204,66 +204,116 @@ tile_bounding_box(const struct cell_command_render *render,
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* Render primitives
|
||||
* \param pos_incr returns value indicating how may words to skip after
|
||||
* this command in the batch buffer
|
||||
*/
|
||||
static void
|
||||
cmd_render(const struct cell_command_render *render)
|
||||
cmd_render(const struct cell_command_render *render, uint *pos_incr)
|
||||
{
|
||||
/* we'll DMA into these buffers */
|
||||
ubyte vertex_data[CELL_MAX_VBUF_SIZE] ALIGN16_ATTRIB;
|
||||
ushort indexes[CELL_MAX_VBUF_INDEXES] ALIGN16_ATTRIB;
|
||||
uint i, j, total_vertex_bytes, total_index_bytes;
|
||||
ushort index_data[CELL_MAX_VBUF_INDEXES] ALIGN16_ATTRIB;
|
||||
const uint vertex_size = render->vertex_size; /* in bytes */
|
||||
const uint total_vertex_bytes = render->num_verts * vertex_size;
|
||||
const ubyte *vertices;
|
||||
const ushort *indexes;
|
||||
uint mask;
|
||||
uint i, j;
|
||||
|
||||
|
||||
if (Debug) {
|
||||
printf("SPU %u: RENDER prim %u, indices: %u, nr_vert: %u\n",
|
||||
printf("SPU %u: RENDER prim %u, num_vert=%u num_ind=%u "
|
||||
"inline_vert=%u inline_ind=%u\n",
|
||||
spu.init.id,
|
||||
render->prim_type,
|
||||
render->num_verts,
|
||||
render->num_indexes);
|
||||
render->num_indexes,
|
||||
render->inline_verts,
|
||||
render->inline_indexes);
|
||||
|
||||
/*
|
||||
printf(" bound: %g, %g .. %g, %g\n",
|
||||
render->xmin, render->ymin, render->xmax, render->ymax);
|
||||
*/
|
||||
printf("SPU %u: indices at %p vertices at %p\n",
|
||||
spu.init.id,
|
||||
render->index_data, render->vertex_data);
|
||||
}
|
||||
|
||||
ASSERT(sizeof(*render) % 4 == 0);
|
||||
ASSERT_ALIGN16(render->vertex_data);
|
||||
ASSERT_ALIGN16(render->index_data);
|
||||
|
||||
/* how much vertex data */
|
||||
total_vertex_bytes = render->num_verts * vertex_size;
|
||||
total_index_bytes = render->num_indexes * sizeof(ushort);
|
||||
if (total_index_bytes < 16)
|
||||
total_index_bytes = 16;
|
||||
else
|
||||
total_index_bytes = ROUNDUP16(total_index_bytes);
|
||||
|
||||
/*
|
||||
printf("VBUF: indices at %p, vertices at %p total_vertex_bytes %u ind_bytes %u\n",
|
||||
render->index_data, render->vertex_data, total_vertex_bytes, total_index_bytes);
|
||||
*/
|
||||
/**
|
||||
** Get vertex, index buffers if not inlined
|
||||
**/
|
||||
if (!render->inline_verts) {
|
||||
ASSERT(total_vertex_bytes % 16 == 0);
|
||||
|
||||
ASSERT(total_vertex_bytes % 16 == 0);
|
||||
/* get vertex data from main memory */
|
||||
mfc_get(vertex_data, /* dest */
|
||||
(unsigned int) render->vertex_data, /* src */
|
||||
total_vertex_bytes, /* size */
|
||||
TAG_VERTEX_BUFFER,
|
||||
0, /* tid */
|
||||
0 /* rid */);
|
||||
mfc_get(vertex_data, /* dest */
|
||||
(unsigned int) render->vertex_data, /* src */
|
||||
total_vertex_bytes, /* size */
|
||||
TAG_VERTEX_BUFFER,
|
||||
0, /* tid */
|
||||
0 /* rid */);
|
||||
|
||||
ASSERT(total_index_bytes % 16 == 0);
|
||||
vertices = vertex_data;
|
||||
}
|
||||
|
||||
/* get index data from main memory */
|
||||
mfc_get(indexes, /* dest */
|
||||
(unsigned int) render->index_data, /* src */
|
||||
total_index_bytes,
|
||||
TAG_INDEX_BUFFER,
|
||||
0, /* tid */
|
||||
0 /* rid */);
|
||||
if (!render->inline_indexes) {
|
||||
uint total_index_bytes;
|
||||
|
||||
wait_on_mask_all((1 << TAG_VERTEX_BUFFER) |
|
||||
(1 << TAG_INDEX_BUFFER));
|
||||
*pos_incr = 0;
|
||||
|
||||
/* find tiles which intersect the prim bounding box */
|
||||
total_index_bytes = render->num_indexes * sizeof(ushort);
|
||||
if (total_index_bytes < 16)
|
||||
total_index_bytes = 16;
|
||||
else
|
||||
total_index_bytes = ROUNDUP16(total_index_bytes);
|
||||
|
||||
indexes = index_data;
|
||||
|
||||
/* get index data from main memory */
|
||||
mfc_get(index_data, /* dest */
|
||||
(unsigned int) render->index_data, /* src */
|
||||
total_index_bytes,
|
||||
TAG_INDEX_BUFFER,
|
||||
0, /* tid */
|
||||
0 /* rid */);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
** Get pointers to inlined indexes, verts, if present
|
||||
**/
|
||||
if (render->inline_indexes) {
|
||||
/* indexes are right after the render command in the batch buffer */
|
||||
indexes = (ushort *) (render + 1);
|
||||
*pos_incr = (render->num_indexes * 2 + 3) / 4;
|
||||
|
||||
if (render->inline_verts) {
|
||||
/* vertices are after indexes, if inlined */
|
||||
vertices = (const ubyte *) (render + 1) + *pos_incr * 4;
|
||||
*pos_incr = *pos_incr + total_vertex_bytes / 4;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* wait for vertex and/or index buffers if not inlined */
|
||||
mask = 0x0;
|
||||
if (!render->inline_verts)
|
||||
mask |= (1 << TAG_VERTEX_BUFFER);
|
||||
if (!render->inline_indexes)
|
||||
mask |= (1 << TAG_INDEX_BUFFER);
|
||||
wait_on_mask_all(mask);
|
||||
|
||||
|
||||
/**
|
||||
** find tiles which intersect the prim bounding box
|
||||
**/
|
||||
uint txmin, tymin, box_width_tiles, box_num_tiles;
|
||||
#if 0
|
||||
tile_bounding_box(render, &txmin, &tymin,
|
||||
|
|
@ -278,7 +328,10 @@ cmd_render(const struct cell_command_render *render)
|
|||
/* make sure any pending clears have completed */
|
||||
wait_on_mask(1 << TAG_SURFACE_CLEAR);
|
||||
|
||||
/* loop over tiles */
|
||||
|
||||
/**
|
||||
** loop over tiles, rendering tris
|
||||
**/
|
||||
for (i = spu.init.id; i < box_num_tiles; i += spu.init.num_spus) {
|
||||
const uint tx = txmin + i % box_width_tiles;
|
||||
const uint ty = tymin + i / box_width_tiles;
|
||||
|
|
@ -300,14 +353,15 @@ cmd_render(const struct cell_command_render *render)
|
|||
}
|
||||
|
||||
ASSERT(render->prim_type == PIPE_PRIM_TRIANGLES);
|
||||
ASSERT(render->num_indexes % 3 == 0);
|
||||
|
||||
/* loop over tris */
|
||||
for (j = 0; j < render->num_indexes; j += 3) {
|
||||
const float *v0, *v1, *v2;
|
||||
|
||||
v0 = (const float *) (vertex_data + indexes[j+0] * vertex_size);
|
||||
v1 = (const float *) (vertex_data + indexes[j+1] * vertex_size);
|
||||
v2 = (const float *) (vertex_data + indexes[j+2] * vertex_size);
|
||||
v0 = (const float *) (vertices + indexes[j+0] * vertex_size);
|
||||
v1 = (const float *) (vertices + indexes[j+1] * vertex_size);
|
||||
v2 = (const float *) (vertices + indexes[j+2] * vertex_size);
|
||||
|
||||
tri_draw(v0, v1, v2, tx, ty);
|
||||
}
|
||||
|
|
@ -508,8 +562,9 @@ cmd_batch(uint opcode)
|
|||
{
|
||||
struct cell_command_render *render
|
||||
= (struct cell_command_render *) &buffer[pos];
|
||||
cmd_render(render);
|
||||
pos += sizeof(*render) / 4;
|
||||
uint pos_incr;
|
||||
cmd_render(render, &pos_incr);
|
||||
pos += sizeof(*render) / 4 + pos_incr;
|
||||
}
|
||||
break;
|
||||
case CELL_CMD_FINISH:
|
||||
|
|
@ -591,7 +646,11 @@ main_loop(void)
|
|||
cmd_clear_surface(&cmd.clear);
|
||||
break;
|
||||
case CELL_CMD_RENDER:
|
||||
cmd_render(&cmd.render);
|
||||
{
|
||||
uint pos_incr;
|
||||
cmd_render(&cmd.render, &pos_incr);
|
||||
assert(pos_incr == 0);
|
||||
}
|
||||
break;
|
||||
case CELL_CMD_BATCH:
|
||||
cmd_batch(opcode);
|
||||
|
|
|
|||
|
|
@ -50,6 +50,8 @@ struct vbuf_stage {
|
|||
|
||||
struct vbuf_render *render;
|
||||
|
||||
const struct vertex_info *vinfo;
|
||||
|
||||
/** Vertex size in bytes */
|
||||
unsigned vertex_size;
|
||||
|
||||
|
|
@ -82,10 +84,9 @@ vbuf_stage( struct draw_stage *stage )
|
|||
}
|
||||
|
||||
|
||||
static void vbuf_flush_indices( struct draw_stage *stage );
|
||||
static void vbuf_flush_vertices( struct draw_stage *stage );
|
||||
static void vbuf_alloc_vertices( struct draw_stage *stage,
|
||||
unsigned new_vertex_size );
|
||||
static void vbuf_flush_indices( struct vbuf_stage *vbuf );
|
||||
static void vbuf_flush_vertices( struct vbuf_stage *vbuf );
|
||||
static void vbuf_alloc_vertices( struct vbuf_stage *vbuf );
|
||||
|
||||
|
||||
static INLINE boolean
|
||||
|
|
@ -100,12 +101,12 @@ static INLINE void
|
|||
check_space( struct vbuf_stage *vbuf, unsigned nr )
|
||||
{
|
||||
if (vbuf->nr_vertices + nr > vbuf->max_vertices ) {
|
||||
vbuf_flush_vertices(&vbuf->stage);
|
||||
vbuf_alloc_vertices(&vbuf->stage, vbuf->vertex_size);
|
||||
vbuf_flush_vertices(vbuf);
|
||||
vbuf_alloc_vertices(vbuf);
|
||||
}
|
||||
|
||||
if (vbuf->nr_indices + nr > vbuf->max_indices )
|
||||
vbuf_flush_indices(&vbuf->stage);
|
||||
vbuf_flush_indices(vbuf);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -120,10 +121,12 @@ static INLINE void
|
|||
emit_vertex( struct vbuf_stage *vbuf,
|
||||
struct vertex_header *vertex )
|
||||
{
|
||||
const struct vertex_info *vinfo = vbuf->render->get_vertex_info(vbuf->render);
|
||||
const struct vertex_info *vinfo = vbuf->vinfo;
|
||||
|
||||
uint i;
|
||||
uint count = 0; /* for debug/sanity */
|
||||
|
||||
assert(vinfo == vbuf->render->get_vertex_info(vbuf->render));
|
||||
|
||||
// fprintf(stderr, "emit vertex %d to %p\n",
|
||||
// vbuf->nr_vertices, vbuf->vertex_ptr);
|
||||
|
|
@ -246,9 +249,8 @@ vbuf_point( struct draw_stage *stage,
|
|||
* will be flushed if needed and a new one allocated.
|
||||
*/
|
||||
static void
|
||||
vbuf_set_prim( struct draw_stage *stage, uint newprim )
|
||||
vbuf_set_prim( struct vbuf_stage *vbuf, uint newprim )
|
||||
{
|
||||
struct vbuf_stage *vbuf = vbuf_stage(stage);
|
||||
const struct vertex_info *vinfo;
|
||||
unsigned vertex_size;
|
||||
|
||||
|
|
@ -263,10 +265,13 @@ vbuf_set_prim( struct draw_stage *stage, uint newprim )
|
|||
vertex_size = vinfo->size * sizeof(float);
|
||||
|
||||
if (vertex_size != vbuf->vertex_size)
|
||||
vbuf_flush_vertices(stage);
|
||||
vbuf_flush_vertices(vbuf);
|
||||
|
||||
vbuf->vinfo = vinfo;
|
||||
vbuf->vertex_size = vertex_size;
|
||||
|
||||
if (!vbuf->vertices)
|
||||
vbuf_alloc_vertices(stage, vertex_size);
|
||||
vbuf_alloc_vertices(vbuf);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -274,9 +279,11 @@ static void
|
|||
vbuf_first_tri( struct draw_stage *stage,
|
||||
struct prim_header *prim )
|
||||
{
|
||||
vbuf_flush_indices( stage );
|
||||
struct vbuf_stage *vbuf = vbuf_stage( stage );
|
||||
|
||||
vbuf_flush_indices( vbuf );
|
||||
stage->tri = vbuf_tri;
|
||||
vbuf_set_prim(stage, PIPE_PRIM_TRIANGLES);
|
||||
vbuf_set_prim(vbuf, PIPE_PRIM_TRIANGLES);
|
||||
stage->tri( stage, prim );
|
||||
}
|
||||
|
||||
|
|
@ -285,9 +292,11 @@ static void
|
|||
vbuf_first_line( struct draw_stage *stage,
|
||||
struct prim_header *prim )
|
||||
{
|
||||
vbuf_flush_indices( stage );
|
||||
struct vbuf_stage *vbuf = vbuf_stage( stage );
|
||||
|
||||
vbuf_flush_indices( vbuf );
|
||||
stage->line = vbuf_line;
|
||||
vbuf_set_prim(stage, PIPE_PRIM_LINES);
|
||||
vbuf_set_prim(vbuf, PIPE_PRIM_LINES);
|
||||
stage->line( stage, prim );
|
||||
}
|
||||
|
||||
|
|
@ -296,18 +305,18 @@ static void
|
|||
vbuf_first_point( struct draw_stage *stage,
|
||||
struct prim_header *prim )
|
||||
{
|
||||
vbuf_flush_indices( stage );
|
||||
struct vbuf_stage *vbuf = vbuf_stage( stage );
|
||||
|
||||
vbuf_flush_indices( vbuf );
|
||||
stage->point = vbuf_point;
|
||||
vbuf_set_prim(stage, PIPE_PRIM_POINTS);
|
||||
vbuf_set_prim(vbuf, PIPE_PRIM_POINTS);
|
||||
stage->point( stage, prim );
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
vbuf_flush_indices( struct draw_stage *stage )
|
||||
vbuf_flush_indices( struct vbuf_stage *vbuf )
|
||||
{
|
||||
struct vbuf_stage *vbuf = vbuf_stage( stage );
|
||||
|
||||
if(!vbuf->nr_indices)
|
||||
return;
|
||||
|
||||
|
|
@ -331,9 +340,12 @@ vbuf_flush_indices( struct draw_stage *stage )
|
|||
|
||||
vbuf->nr_indices = 0;
|
||||
|
||||
/* don't need to reset point/line/tri functions */
|
||||
#if 0
|
||||
stage->point = vbuf_first_point;
|
||||
stage->line = vbuf_first_line;
|
||||
stage->tri = vbuf_first_tri;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -345,12 +357,10 @@ vbuf_flush_indices( struct draw_stage *stage )
|
|||
* we flush.
|
||||
*/
|
||||
static void
|
||||
vbuf_flush_vertices( struct draw_stage *stage )
|
||||
vbuf_flush_vertices( struct vbuf_stage *vbuf )
|
||||
{
|
||||
struct vbuf_stage *vbuf = vbuf_stage( stage );
|
||||
|
||||
if(vbuf->vertices) {
|
||||
vbuf_flush_indices(stage);
|
||||
vbuf_flush_indices(vbuf);
|
||||
|
||||
/* Reset temporary vertices ids */
|
||||
if(vbuf->nr_vertices)
|
||||
|
|
@ -361,7 +371,7 @@ vbuf_flush_vertices( struct draw_stage *stage )
|
|||
vbuf->vertices,
|
||||
vbuf->vertex_size,
|
||||
vbuf->nr_vertices);
|
||||
vbuf->nr_vertices = 0;
|
||||
vbuf->max_vertices = vbuf->nr_vertices = 0;
|
||||
vbuf->vertex_ptr = vbuf->vertices = NULL;
|
||||
|
||||
}
|
||||
|
|
@ -369,16 +379,12 @@ vbuf_flush_vertices( struct draw_stage *stage )
|
|||
|
||||
|
||||
static void
|
||||
vbuf_alloc_vertices( struct draw_stage *stage,
|
||||
unsigned new_vertex_size )
|
||||
vbuf_alloc_vertices( struct vbuf_stage *vbuf )
|
||||
{
|
||||
struct vbuf_stage *vbuf = vbuf_stage( stage );
|
||||
|
||||
assert(!vbuf->nr_indices);
|
||||
assert(!vbuf->vertices);
|
||||
|
||||
/* Allocate a new vertex buffer */
|
||||
vbuf->vertex_size = new_vertex_size;
|
||||
vbuf->max_vertices = vbuf->render->max_vertex_buffer_bytes / vbuf->vertex_size;
|
||||
vbuf->vertices = (uint *) vbuf->render->allocate_vertices(vbuf->render,
|
||||
(ushort) vbuf->vertex_size,
|
||||
|
|
@ -391,14 +397,16 @@ vbuf_alloc_vertices( struct draw_stage *stage,
|
|||
static void
|
||||
vbuf_flush( struct draw_stage *stage, unsigned flags )
|
||||
{
|
||||
vbuf_flush_indices( stage );
|
||||
struct vbuf_stage *vbuf = vbuf_stage( stage );
|
||||
|
||||
vbuf_flush_indices( vbuf );
|
||||
|
||||
stage->point = vbuf_first_point;
|
||||
stage->line = vbuf_first_line;
|
||||
stage->tri = vbuf_first_tri;
|
||||
|
||||
if (flags & DRAW_FLUSH_BACKEND)
|
||||
vbuf_flush_vertices( stage );
|
||||
vbuf_flush_vertices( vbuf );
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -5,13 +5,12 @@ include $(TOP)/configs/current
|
|||
LIBNAME = pipebuffer
|
||||
|
||||
DRIVER_SOURCES = \
|
||||
pb_buffer.c \
|
||||
pb_buffer_client.c \
|
||||
pb_buffer_fenced.c \
|
||||
pb_buffer_malloc.c \
|
||||
pb_bufmgr_fenced.c \
|
||||
pb_bufmgr_mm.c \
|
||||
pb_bufmgr_pool.c
|
||||
pb_bufmgr_pool.c \
|
||||
pb_winsys.c
|
||||
|
||||
C_SOURCES = \
|
||||
$(DRIVER_SOURCES)
|
||||
|
|
|
|||
|
|
@ -1,77 +0,0 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
|
||||
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
|
||||
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/**
|
||||
* \file
|
||||
* Buffer implementation.
|
||||
*
|
||||
* \author José Fonseca <jrfonseca@tungstengraphics.com>
|
||||
*/
|
||||
|
||||
|
||||
#include "pb_buffer.h"
|
||||
#include "pipe/p_winsys.h"
|
||||
|
||||
|
||||
|
||||
static void *
|
||||
pb_winsys_map(struct pipe_winsys *winsys,
|
||||
struct pipe_buffer *ws_buf,
|
||||
unsigned flags)
|
||||
{
|
||||
struct pb_buffer *buf = pb_buffer(ws_buf);
|
||||
|
||||
return buf->vtbl->map(buf, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
pb_winsys_unmap(struct pipe_winsys *winsys,
|
||||
struct pipe_buffer *ws_buf)
|
||||
{
|
||||
struct pb_buffer *buf = pb_buffer(ws_buf);
|
||||
|
||||
buf->vtbl->unmap(buf);
|
||||
}
|
||||
|
||||
static void
|
||||
pb_winsys_destroy(struct pipe_winsys *winsys,
|
||||
struct pipe_buffer *ws_buf)
|
||||
{
|
||||
struct pb_buffer *buf = pb_buffer(ws_buf);
|
||||
|
||||
buf->vtbl->destroy(buf);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void
|
||||
pb_init_winsys(struct pipe_winsys *winsys)
|
||||
{
|
||||
winsys->buffer_map = pb_winsys_map;
|
||||
winsys->buffer_unmap = pb_winsys_unmap;
|
||||
winsys->buffer_destroy = pb_winsys_destroy;
|
||||
}
|
||||
|
|
@ -48,8 +48,9 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
#include "pipe/p_compiler.h"
|
||||
|
||||
#include "pipe/p_state.h"
|
||||
#include "pipe/p_inlines.h"
|
||||
|
||||
|
||||
struct pb_vtbl;
|
||||
|
||||
|
|
@ -81,6 +82,7 @@ struct pb_buffer
|
|||
const struct pb_vtbl *vtbl;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Virtual function table for the buffer storage operations.
|
||||
*
|
||||
|
|
@ -116,6 +118,24 @@ struct pb_vtbl
|
|||
};
|
||||
|
||||
|
||||
static INLINE struct pipe_buffer *
|
||||
pb_pipe_buffer( struct pb_buffer *pbuf )
|
||||
{
|
||||
assert(pbuf);
|
||||
return &pbuf->base;
|
||||
}
|
||||
|
||||
|
||||
static INLINE struct pb_buffer *
|
||||
pb_buffer( struct pipe_buffer *buf )
|
||||
{
|
||||
assert(buf);
|
||||
/* Could add a magic cookie check on debug builds.
|
||||
*/
|
||||
return (struct pb_buffer *)buf;
|
||||
}
|
||||
|
||||
|
||||
/* Accessor functions for pb->vtbl:
|
||||
*/
|
||||
static INLINE void *
|
||||
|
|
@ -143,6 +163,7 @@ pb_get_base_buffer( struct pb_buffer *buf,
|
|||
buf->vtbl->get_base_buffer(buf, base_buf, offset);
|
||||
}
|
||||
|
||||
|
||||
static INLINE void
|
||||
pb_destroy(struct pb_buffer *buf)
|
||||
{
|
||||
|
|
@ -151,19 +172,20 @@ pb_destroy(struct pb_buffer *buf)
|
|||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* User buffers are special buffers that initially reference memory
|
||||
* held by the user but which may if necessary copy that memory into
|
||||
* device memory behind the scenes, for submission to hardware.
|
||||
*
|
||||
* These are particularly useful when the referenced data is never
|
||||
* submitted to hardware at all, in the particular case of software
|
||||
* vertex processing.
|
||||
/* XXX: thread safety issues!
|
||||
*/
|
||||
struct pb_buffer *
|
||||
pb_user_buffer_create(void *data, unsigned bytes);
|
||||
static INLINE void
|
||||
pb_reference(struct pb_buffer **dst,
|
||||
struct pb_buffer *src)
|
||||
{
|
||||
if (src)
|
||||
src->base.refcount++;
|
||||
|
||||
if (*dst && --(*dst)->base.refcount == 0)
|
||||
pb_destroy( *dst );
|
||||
|
||||
*dst = src;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
|
|
@ -175,22 +197,8 @@ pb_malloc_buffer_create(size_t size,
|
|||
const struct pb_desc *desc);
|
||||
|
||||
|
||||
static INLINE struct pipe_buffer *
|
||||
pb_pipe_buffer( struct pb_buffer *pbuf )
|
||||
{
|
||||
return &pbuf->base;
|
||||
}
|
||||
|
||||
static INLINE struct pb_buffer *
|
||||
pb_buffer( struct pipe_buffer *buf )
|
||||
{
|
||||
/* Could add a magic cookie check on debug builds.
|
||||
*/
|
||||
return (struct pb_buffer *)buf;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
pb_init_winsys(struct pipe_winsys *winsys);
|
||||
|
||||
|
||||
#endif /*PB_BUFFER_H_*/
|
||||
|
|
|
|||
|
|
@ -80,7 +80,6 @@ struct fenced_buffer
|
|||
|
||||
struct pb_buffer *buffer;
|
||||
|
||||
unsigned refcount;
|
||||
struct pipe_fence_handle *fence;
|
||||
|
||||
struct list_head head;
|
||||
|
|
@ -145,7 +144,7 @@ _fenced_buffer_list_check_free(struct fenced_buffer_list *fenced_list,
|
|||
|
||||
/* Do the delayed destroy:
|
||||
*/
|
||||
pb_destroy(fenced_buf->buffer);
|
||||
pb_reference(&fenced_buf->buffer, NULL);
|
||||
free(fenced_buf);
|
||||
}
|
||||
}
|
||||
|
|
@ -162,7 +161,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
|
|||
fenced_list->numDelayed++;
|
||||
}
|
||||
else {
|
||||
pb_destroy(fenced_buf->buffer);
|
||||
pb_reference(&fenced_buf->buffer, NULL);
|
||||
free(fenced_buf);
|
||||
}
|
||||
|
||||
|
|
@ -220,9 +219,13 @@ fenced_buffer_create(struct fenced_buffer_list *fenced_list,
|
|||
if(!buf)
|
||||
return NULL;
|
||||
|
||||
buf->base.base.refcount = 1;
|
||||
buf->base.base.alignment = buffer->base.alignment;
|
||||
buf->base.base.usage = buffer->base.usage;
|
||||
buf->base.base.size = buffer->base.size;
|
||||
|
||||
buf->base.vtbl = &fenced_buffer_vtbl;
|
||||
buf->buffer = buffer;
|
||||
buf->refcount = 1;
|
||||
buf->list = fenced_list;
|
||||
|
||||
return &buf->base;
|
||||
|
|
|
|||
|
|
@ -114,10 +114,11 @@ pb_malloc_buffer_create(size_t size,
|
|||
if(!buf)
|
||||
return NULL;
|
||||
|
||||
buf->base.vtbl = &malloc_buffer_vtbl;
|
||||
buf->base.base.refcount = 1;
|
||||
buf->base.base.alignment = desc->alignment;
|
||||
buf->base.base.usage = desc->usage;
|
||||
buf->base.base.size = size;
|
||||
buf->base.vtbl = &malloc_buffer_vtbl;
|
||||
|
||||
buf->data = align_malloc(size, desc->alignment < sizeof(void*) ? sizeof(void*) : desc->alignment);
|
||||
if(!buf->data) {
|
||||
|
|
|
|||
|
|
@ -88,6 +88,7 @@ fenced_bufmgr_create_buffer(struct pb_manager *mgr,
|
|||
|
||||
fenced_buf = fenced_buffer_create(fenced_mgr->fenced_list, buf);
|
||||
if(!fenced_buf) {
|
||||
assert(buf->base.refcount == 1);
|
||||
pb_destroy(buf);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -466,6 +466,11 @@ mm_bufmgr_create_buffer(struct pb_manager *mgr,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
mm_buf->base.base.refcount = 1;
|
||||
mm_buf->base.base.alignment = desc->alignment;
|
||||
mm_buf->base.base.usage = desc->usage;
|
||||
mm_buf->base.base.size = size;
|
||||
|
||||
mm_buf->base.vtbl = &mm_buffer_vtbl;
|
||||
|
||||
mm_buf->mgr = mm;
|
||||
|
|
@ -505,7 +510,7 @@ mm_bufmgr_destroy(struct pb_manager *mgr)
|
|||
mmDestroy(mm->heap);
|
||||
|
||||
pb_unmap(mm->buffer);
|
||||
pb_destroy(mm->buffer);
|
||||
pb_reference(&mm->buffer, NULL);
|
||||
|
||||
_glthread_UNLOCK_MUTEX(mm->mutex);
|
||||
|
||||
|
|
@ -579,7 +584,7 @@ mm_bufmgr_create(struct pb_manager *provider,
|
|||
|
||||
mgr = mm_bufmgr_create_from_buffer(buffer, size, align2);
|
||||
if (!mgr) {
|
||||
pb_destroy(buffer);
|
||||
pb_reference(&buffer, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -111,6 +111,8 @@ pool_buffer_destroy(struct pb_buffer *buf)
|
|||
struct pool_buffer *pool_buf = pool_buffer(buf);
|
||||
struct pool_pb_manager *pool = pool_buf->mgr;
|
||||
|
||||
assert(pool_buf->base.base.refcount == 0);
|
||||
|
||||
_glthread_LOCK_MUTEX(pool->mutex);
|
||||
LIST_ADD(&pool_buf->head, &pool->free);
|
||||
pool->numFree++;
|
||||
|
|
@ -192,7 +194,13 @@ pool_bufmgr_create_buffer(struct pb_manager *mgr,
|
|||
--pool->numFree;
|
||||
|
||||
_glthread_UNLOCK_MUTEX(pool->mutex);
|
||||
|
||||
pool_buf = LIST_ENTRY(struct pool_buffer, item, head);
|
||||
assert(pool_buf->base.base.refcount == 0);
|
||||
pool_buf->base.base.refcount = 1;
|
||||
pool_buf->base.base.alignment = desc->alignment;
|
||||
pool_buf->base.base.usage = desc->usage;
|
||||
|
||||
return SUPER(pool_buf);
|
||||
}
|
||||
|
||||
|
|
@ -206,7 +214,7 @@ pool_bufmgr_destroy(struct pb_manager *mgr)
|
|||
FREE(pool->bufs);
|
||||
|
||||
pb_unmap(pool->buffer);
|
||||
pb_destroy(pool->buffer);
|
||||
pb_reference(&pool->buffer, NULL);
|
||||
|
||||
_glthread_UNLOCK_MUTEX(pool->mutex);
|
||||
|
||||
|
|
@ -256,6 +264,10 @@ pool_bufmgr_create(struct pb_manager *provider,
|
|||
|
||||
pool_buf = pool->bufs;
|
||||
for (i = 0; i < numBufs; ++i) {
|
||||
pool_buf->base.base.refcount = 0;
|
||||
pool_buf->base.base.alignment = 0;
|
||||
pool_buf->base.base.usage = 0;
|
||||
pool_buf->base.base.size = bufSize;
|
||||
pool_buf->base.vtbl = &pool_buffer_vtbl;
|
||||
pool_buf->mgr = pool;
|
||||
pool_buf->start = i * bufSize;
|
||||
|
|
@ -271,7 +283,7 @@ failure:
|
|||
if(pool->map)
|
||||
pb_unmap(pool->buffer);
|
||||
if(pool->buffer)
|
||||
pb_destroy(pool->buffer);
|
||||
pb_reference(&pool->buffer, NULL);
|
||||
if(pool)
|
||||
FREE(pool);
|
||||
return NULL;
|
||||
|
|
|
|||
|
|
@ -34,10 +34,21 @@
|
|||
*/
|
||||
|
||||
|
||||
#include "pipe/p_winsys.h"
|
||||
#include "pipe/p_util.h"
|
||||
|
||||
#include "pb_buffer.h"
|
||||
|
||||
|
||||
/**
|
||||
* User buffers are special buffers that initially reference memory
|
||||
* held by the user but which may if necessary copy that memory into
|
||||
* device memory behind the scenes, for submission to hardware.
|
||||
*
|
||||
* These are particularly useful when the referenced data is never
|
||||
* submitted to hardware at all, in the particular case of software
|
||||
* vertex processing.
|
||||
*/
|
||||
struct pb_user_buffer
|
||||
{
|
||||
struct pb_buffer base;
|
||||
|
|
@ -67,7 +78,7 @@ pb_user_buffer_destroy(struct pb_buffer *buf)
|
|||
|
||||
static void *
|
||||
pb_user_buffer_map(struct pb_buffer *buf,
|
||||
unsigned flags)
|
||||
unsigned flags)
|
||||
{
|
||||
return pb_user_buffer(buf)->data;
|
||||
}
|
||||
|
|
@ -82,8 +93,8 @@ pb_user_buffer_unmap(struct pb_buffer *buf)
|
|||
|
||||
static void
|
||||
pb_user_buffer_get_base_buffer(struct pb_buffer *buf,
|
||||
struct pb_buffer **base_buf,
|
||||
unsigned *offset)
|
||||
struct pb_buffer **base_buf,
|
||||
unsigned *offset)
|
||||
{
|
||||
*base_buf = buf;
|
||||
*offset = 0;
|
||||
|
|
@ -99,17 +110,61 @@ pb_user_buffer_vtbl = {
|
|||
};
|
||||
|
||||
|
||||
struct pb_buffer *
|
||||
pb_user_buffer_create(void *data, unsigned bytes)
|
||||
static struct pipe_buffer *
|
||||
pb_winsys_user_buffer_create(struct pipe_winsys *winsys,
|
||||
void *data,
|
||||
unsigned bytes)
|
||||
{
|
||||
struct pb_user_buffer *buf = CALLOC_STRUCT(pb_user_buffer);
|
||||
|
||||
if(!buf)
|
||||
return NULL;
|
||||
|
||||
buf->base.vtbl = &pb_user_buffer_vtbl;
|
||||
buf->base.base.refcount = 1;
|
||||
buf->base.base.size = bytes;
|
||||
buf->base.base.alignment = 0;
|
||||
buf->base.base.usage = 0;
|
||||
|
||||
buf->base.vtbl = &pb_user_buffer_vtbl;
|
||||
buf->data = data;
|
||||
|
||||
return &buf->base;
|
||||
return &buf->base.base;
|
||||
}
|
||||
|
||||
|
||||
static void *
|
||||
pb_winsys_buffer_map(struct pipe_winsys *winsys,
|
||||
struct pipe_buffer *buf,
|
||||
unsigned flags)
|
||||
{
|
||||
(void)winsys;
|
||||
return pb_map(pb_buffer(buf), flags);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
pb_winsys_buffer_unmap(struct pipe_winsys *winsys,
|
||||
struct pipe_buffer *buf)
|
||||
{
|
||||
(void)winsys;
|
||||
pb_unmap(pb_buffer(buf));
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
pb_winsys_buffer_destroy(struct pipe_winsys *winsys,
|
||||
struct pipe_buffer *buf)
|
||||
{
|
||||
(void)winsys;
|
||||
pb_destroy(pb_buffer(buf));
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
pb_init_winsys(struct pipe_winsys *winsys)
|
||||
{
|
||||
winsys->user_buffer_create = pb_winsys_user_buffer_create;
|
||||
winsys->buffer_map = pb_winsys_buffer_map;
|
||||
winsys->buffer_unmap = pb_winsys_buffer_unmap;
|
||||
winsys->buffer_destroy = pb_winsys_buffer_destroy;
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue