[965] Convert brw_draw_upload to managing dri_bos, not gl_buffer_objects.

This helps us avoid a bunch of mess with gl_client_arrays that we filled
with unused data and confused readers.
This commit is contained in:
Eric Anholt 2008-02-02 23:27:19 -08:00
parent 0907c639c8
commit 2abcc512a3
4 changed files with 127 additions and 210 deletions

View file

@ -372,8 +372,16 @@ struct brw_cached_batch_item {
struct brw_vertex_element { struct brw_vertex_element {
const struct gl_client_array *glarray; const struct gl_client_array *glarray;
/** Size of a complete element */
GLuint element_size; GLuint element_size;
/** Number of uploaded elements for this input. */
GLuint count; GLuint count;
/** Byte stride between elements in the uploaded array */
GLuint stride;
/** Offset of the first element within the buffer object */
unsigned int offset;
/** Buffer object containing the uploaded vertex data */
dri_bo *bo;
}; };
@ -433,11 +441,8 @@ struct brw_context
#define BRW_UPLOAD_INIT_SIZE (128*1024) #define BRW_UPLOAD_INIT_SIZE (128*1024)
struct { struct {
struct gl_buffer_object *vbo[BRW_NR_UPLOAD_BUFS]; dri_bo *bo;
GLuint buf;
GLuint offset; GLuint offset;
GLuint size;
GLuint wrap;
} upload; } upload;
/* Summary of size and varying of active arrays, so we can check /* Summary of size and varying of active arrays, so we can check

View file

@ -327,20 +327,6 @@ static GLboolean brw_try_draw_prims( GLcontext *ctx,
brw->no_batch_wrap = GL_FALSE; brw->no_batch_wrap = GL_FALSE;
/* Free any completed data so it doesn't clog up texture memory - we
* won't be referencing it again.
*/
while (brw->vb.upload.wrap != brw->vb.upload.buf) {
ctx->Driver.BufferData(ctx,
GL_ARRAY_BUFFER_ARB,
BRW_UPLOAD_INIT_SIZE,
NULL,
GL_DYNAMIC_DRAW_ARB,
brw->vb.upload.vbo[brw->vb.upload.wrap]);
brw->vb.upload.wrap++;
brw->vb.upload.wrap %= BRW_NR_UPLOAD_BUFS;
}
UNLOCK_HARDWARE(intel); UNLOCK_HARDWARE(intel);
if (!retval) if (!retval)
@ -418,44 +404,16 @@ void brw_draw_init( struct brw_context *brw )
{ {
GLcontext *ctx = &brw->intel.ctx; GLcontext *ctx = &brw->intel.ctx;
struct vbo_context *vbo = vbo_context(ctx); struct vbo_context *vbo = vbo_context(ctx);
GLuint i;
/* Register our drawing function: /* Register our drawing function:
*/ */
vbo->draw_prims = brw_draw_prims; vbo->draw_prims = brw_draw_prims;
brw->vb.upload.size = BRW_UPLOAD_INIT_SIZE;
for (i = 0; i < BRW_NR_UPLOAD_BUFS; i++) {
brw->vb.upload.vbo[i] = ctx->Driver.NewBufferObject(ctx, 1, GL_ARRAY_BUFFER_ARB);
ctx->Driver.BufferData(ctx,
GL_ARRAY_BUFFER_ARB,
BRW_UPLOAD_INIT_SIZE,
NULL,
GL_DYNAMIC_DRAW_ARB,
brw->vb.upload.vbo[i]);
/* Set the internal VBOs to no-backing-store. We only use them as a
* temporary within a brw_try_draw_prims while the lock is held.
*/
if (!brw->intel.ttm) {
struct intel_buffer_object *intel_bo =
intel_buffer_object(brw->vb.upload.vbo[i]);
dri_bo_fake_disable_backing_store(intel_bufferobj_buffer(&brw->intel,
intel_bo,
INTEL_READ),
NULL, NULL);
}
}
} }
void brw_draw_destroy( struct brw_context *brw ) void brw_draw_destroy( struct brw_context *brw )
{ {
GLcontext *ctx = &brw->intel.ctx; if (brw->vb.upload.bo != NULL) {
GLuint i; dri_bo_unreference(brw->vb.upload.bo);
brw->vb.upload.bo = NULL;
for (i = 0; i < BRW_NR_UPLOAD_BUFS; i++) }
ctx->Driver.DeleteBuffer(ctx, brw->vb.upload.vbo[i]);
} }

View file

@ -44,13 +44,6 @@
#include "intel_buffer_objects.h" #include "intel_buffer_objects.h"
#include "intel_tex.h" #include "intel_tex.h"
static dri_bo *array_buffer( struct intel_context *intel,
const struct gl_client_array *array )
{
return intel_bufferobj_buffer(intel, intel_buffer_object(array->BufferObj),
INTEL_WRITE_PART);
}
static GLuint double_types[5] = { static GLuint double_types[5] = {
0, 0,
BRW_SURFACEFORMAT_R64_FLOAT, BRW_SURFACEFORMAT_R64_FLOAT,
@ -246,34 +239,40 @@ static void copy_strided_array( GLubyte *dest,
static void wrap_buffers( struct brw_context *brw, static void wrap_buffers( struct brw_context *brw,
GLuint size ) GLuint size )
{ {
GLcontext *ctx = &brw->intel.ctx;
if (size < BRW_UPLOAD_INIT_SIZE) if (size < BRW_UPLOAD_INIT_SIZE)
size = BRW_UPLOAD_INIT_SIZE; size = BRW_UPLOAD_INIT_SIZE;
brw->vb.upload.buf++;
brw->vb.upload.buf %= BRW_NR_UPLOAD_BUFS;
brw->vb.upload.offset = 0; brw->vb.upload.offset = 0;
ctx->Driver.BufferData(ctx, if (brw->vb.upload.bo != NULL)
GL_ARRAY_BUFFER_ARB, dri_bo_unreference(brw->vb.upload.bo);
size, brw->vb.upload.bo = dri_bo_alloc(brw->intel.bufmgr, "temporary VBO",
NULL, size, 1,
GL_DYNAMIC_DRAW_ARB, DRM_BO_FLAG_MEM_LOCAL |
brw->vb.upload.vbo[brw->vb.upload.buf]); DRM_BO_FLAG_CACHED |
DRM_BO_FLAG_CACHED_MAPPED);
/* Set the internal VBO\ to no-backing-store. We only use them as a
* temporary within a brw_try_draw_prims while the lock is held.
*/
if (!brw->intel.ttm)
dri_bo_fake_disable_backing_store(brw->vb.upload.bo, NULL, NULL);
} }
static void get_space( struct brw_context *brw, static void get_space( struct brw_context *brw,
GLuint size, GLuint size,
struct gl_buffer_object **vbo_return, dri_bo **bo_return,
GLuint *offset_return ) GLuint *offset_return )
{ {
size = ALIGN(size, 64); size = ALIGN(size, 64);
if (brw->vb.upload.offset + size > BRW_UPLOAD_INIT_SIZE)
wrap_buffers(brw, size);
*vbo_return = brw->vb.upload.vbo[brw->vb.upload.buf]; if (brw->vb.upload.bo == NULL ||
brw->vb.upload.offset + size > brw->vb.upload.bo->size) {
wrap_buffers(brw, size);
}
dri_bo_reference(brw->vb.upload.bo);
*bo_return = brw->vb.upload.bo;
*offset_return = brw->vb.upload.offset; *offset_return = brw->vb.upload.offset;
brw->vb.upload.offset += size; brw->vb.upload.offset += size;
@ -281,88 +280,29 @@ static void get_space( struct brw_context *brw,
static void static void
copy_array_to_vbo_array( struct brw_context *brw, copy_array_to_vbo_array( struct brw_context *brw,
struct gl_client_array *vbo_array, struct brw_vertex_element *element,
const struct gl_client_array *array, GLuint dst_stride)
GLuint element_size,
GLuint count)
{ {
GLcontext *ctx = &brw->intel.ctx; GLuint size = element->count * dst_stride;
GLuint size = count * element_size;
struct gl_buffer_object *vbo;
GLuint offset;
GLuint new_stride;
get_space(brw, size, &vbo, &offset); get_space(brw, size, &element->bo, &element->offset);
if (array->StrideB == 0) { if (element->glarray->StrideB == 0) {
assert(count == 1); assert(element->count == 1);
new_stride = 0; element->stride = 0;
} else {
element->stride = dst_stride;
} }
else
new_stride = element_size;
vbo_array->Size = array->Size; dri_bo_map(element->bo, GL_TRUE);
vbo_array->Type = array->Type; copy_strided_array((unsigned char *)element->bo->virtual + element->offset,
vbo_array->Stride = new_stride; element->glarray->Ptr,
vbo_array->StrideB = new_stride; dst_stride,
vbo_array->Ptr = (const void *)offset; element->glarray->StrideB,
vbo_array->Enabled = 1; element->count);
vbo_array->Normalized = array->Normalized; dri_bo_unmap(element->bo);
vbo_array->_MaxElement = array->_MaxElement; /* ? */
vbo_array->BufferObj = vbo;
{
GLubyte *map = ctx->Driver.MapBuffer(ctx,
GL_ARRAY_BUFFER_ARB,
GL_DYNAMIC_DRAW_ARB,
vbo);
map += offset;
copy_strided_array( map,
array->Ptr,
element_size,
array->StrideB,
count);
ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER_ARB, vbo_array->BufferObj);
}
} }
/**
* Just a wrapper to highlight which cause of copy_array_to_vbo_array
* is happening in the profile.
*/
static void
interleaved_copy_array_to_vbo_array(struct brw_context *brw,
struct gl_client_array *vbo_array,
const struct gl_client_array *array,
GLuint element_size,
GLuint count)
{
copy_array_to_vbo_array(brw, vbo_array, array, element_size, count);
}
static void
interleaved_vbo_array( struct brw_context *brw,
struct gl_client_array *vbo_array,
const struct gl_client_array *uploaded_array,
const struct gl_client_array *array,
const char *ptr)
{
vbo_array->Size = array->Size;
vbo_array->Type = array->Type;
vbo_array->Stride = array->Stride;
vbo_array->StrideB = array->StrideB;
vbo_array->Ptr = (const void *)((const char *)uploaded_array->Ptr +
((const char *)array->Ptr - ptr));
vbo_array->Enabled = 1;
vbo_array->Normalized = array->Normalized;
vbo_array->_MaxElement = array->_MaxElement;
vbo_array->BufferObj = uploaded_array->BufferObj;
}
GLboolean brw_upload_vertices( struct brw_context *brw, GLboolean brw_upload_vertices( struct brw_context *brw,
GLuint min_index, GLuint min_index,
GLuint max_index ) GLuint max_index )
@ -371,9 +311,8 @@ GLboolean brw_upload_vertices( struct brw_context *brw,
struct intel_context *intel = intel_context(ctx); struct intel_context *intel = intel_context(ctx);
GLuint tmp = brw->vs.prog_data->inputs_read; GLuint tmp = brw->vs.prog_data->inputs_read;
GLuint i; GLuint i;
const void *ptr = NULL; const unsigned char *ptr = NULL;
GLuint interleave = 0; GLuint interleave = 0;
struct gl_client_array vbo_array_temp[VERT_ATTRIB_MAX];
struct brw_vertex_element *enabled[VERT_ATTRIB_MAX]; struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
GLuint nr_enabled = 0; GLuint nr_enabled = 0;
@ -385,18 +324,45 @@ GLboolean brw_upload_vertices( struct brw_context *brw,
*/ */
if (0) if (0)
_mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index); _mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);
/* Accumulate the list of enabled arrays. */
while (tmp) { while (tmp) {
GLuint i = _mesa_ffsll(tmp)-1; GLuint i = _mesa_ffsll(tmp)-1;
struct brw_vertex_element *input = &brw->vb.inputs[i]; struct brw_vertex_element *input = &brw->vb.inputs[i];
tmp &= ~(1<<i); tmp &= ~(1<<i);
enabled[nr_enabled++] = input; enabled[nr_enabled++] = input;
}
/* XXX: In the rare cases where this happens we fallback all
* the way to software rasterization, although a tnl fallback
* would be sufficient. I don't know of *any* real world
* cases with > 17 vertex attributes enabled, so it probably
* isn't an issue at this point.
*/
if (nr_enabled >= BRW_VEP_MAX)
return GL_FALSE;
for (i = 0; i < nr_enabled; i++) {
struct brw_vertex_element *input = enabled[i];
input->element_size = get_size(input->glarray->Type) * input->glarray->Size; input->element_size = get_size(input->glarray->Type) * input->glarray->Size;
input->count = input->glarray->StrideB ? max_index + 1 - min_index : 1; input->count = input->glarray->StrideB ? max_index + 1 - min_index : 1;
if (!input->glarray->BufferObj->Name) { if (input->glarray->BufferObj->Name != 0) {
struct intel_buffer_object *intel_buffer =
intel_buffer_object(input->glarray->BufferObj);
/* Named buffer object: Just reference its contents directly. */
input->bo = intel_bufferobj_buffer(intel, intel_buffer,
INTEL_READ);
dri_bo_reference(input->bo);
input->offset = (unsigned long)input->glarray->Ptr;
input->stride = input->glarray->StrideB;
} else {
/* Queue the buffer object up to be uploaded in the next pass,
* when we've decided if we're doing interleaved or not.
*/
if (i == 0) { if (i == 0) {
/* Position array not properly enabled: /* Position array not properly enabled:
*/ */
@ -407,8 +373,9 @@ GLboolean brw_upload_vertices( struct brw_context *brw,
ptr = input->glarray->Ptr; ptr = input->glarray->Ptr;
} }
else if (interleave != input->glarray->StrideB || else if (interleave != input->glarray->StrideB ||
(const char *)input->glarray->Ptr - (const char *)ptr < 0 || (const unsigned char *)input->glarray->Ptr - ptr < 0 ||
(const char *)input->glarray->Ptr - (const char *)ptr > interleave) { (const unsigned char *)input->glarray->Ptr - ptr > interleave)
{
interleave = 0; interleave = 0;
} }
@ -425,43 +392,29 @@ GLboolean brw_upload_vertices( struct brw_context *brw,
} }
} }
/* Upload interleaved arrays if all uploads are interleaved /* Handle any arrays to be uploaded. */
*/
if (nr_uploads > 1 && interleave && interleave <= 256) { if (nr_uploads > 1 && interleave && interleave <= 256) {
interleaved_copy_array_to_vbo_array(brw, &vbo_array_temp[0], /* All uploads are interleaved, so upload the arrays together as
upload[0]->glarray, * interleaved. First, upload the contents and set up upload[0].
interleave, */
upload[0]->count); copy_array_to_vbo_array(brw, upload[0], interleave);
upload[0]->glarray = &vbo_array_temp[0];
for (i = 1; i < nr_uploads; i++) { for (i = 1; i < nr_uploads; i++) {
interleaved_vbo_array(brw, /* Then, just point upload[i] at upload[0]'s buffer. */
&vbo_array_temp[i], upload[i]->stride = interleave;
upload[0]->glarray, upload[i]->offset = upload[0]->offset +
upload[i]->glarray, ((const unsigned char *)upload[i]->glarray->Ptr - ptr);
ptr); upload[i]->bo = upload[0]->bo;
upload[i]->glarray = &vbo_array_temp[i]; dri_bo_reference(upload[i]->bo);
} }
} }
else { else {
/* Upload non-interleaved arrays */
for (i = 0; i < nr_uploads; i++) { for (i = 0; i < nr_uploads; i++) {
copy_array_to_vbo_array(brw, &vbo_array_temp[i], copy_array_to_vbo_array(brw, upload[i], upload[i]->element_size);
upload[i]->glarray,
upload[i]->element_size,
upload[i]->count);
upload[i]->glarray = &vbo_array_temp[i];
} }
} }
/* XXX: In the rare cases where this happens we fallback all
* the way to software rasterization, although a tnl fallback
* would be sufficient. I don't know of *any* real world
* cases with > 17 vertex attributes enabled, so it probably
* isn't an issue at this point.
*/
if (nr_enabled >= BRW_VEP_MAX)
return GL_FALSE;
/* Now emit VB and VEP state packets. /* Now emit VB and VEP state packets.
* *
* This still defines a hardware VB for each input, even if they * This still defines a hardware VB for each input, even if they
@ -477,12 +430,18 @@ GLboolean brw_upload_vertices( struct brw_context *brw,
OUT_BATCH((i << BRW_VB0_INDEX_SHIFT) | OUT_BATCH((i << BRW_VB0_INDEX_SHIFT) |
BRW_VB0_ACCESS_VERTEXDATA | BRW_VB0_ACCESS_VERTEXDATA |
(input->glarray->StrideB << BRW_VB0_PITCH_SHIFT)); (input->stride << BRW_VB0_PITCH_SHIFT));
OUT_RELOC(array_buffer(intel, input->glarray), OUT_RELOC(input->bo,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
(GLuint)input->glarray->Ptr); input->offset);
OUT_BATCH(max_index); OUT_BATCH(max_index);
OUT_BATCH(0); /* Instance data step rate */ OUT_BATCH(0); /* Instance data step rate */
/* Unreference the buffer so it can get freed, now that we won't
* touch it any more.
*/
dri_bo_unreference(input->bo);
input->bo = NULL;
} }
ADVANCE_BATCH(); ADVANCE_BATCH();
@ -527,6 +486,7 @@ void brw_upload_indices( struct brw_context *brw,
GLcontext *ctx = &brw->intel.ctx; GLcontext *ctx = &brw->intel.ctx;
struct intel_context *intel = &brw->intel; struct intel_context *intel = &brw->intel;
GLuint ib_size = get_size(index_buffer->type) * index_buffer->count; GLuint ib_size = get_size(index_buffer->type) * index_buffer->count;
dri_bo *bo;
struct gl_buffer_object *bufferobj = index_buffer->obj; struct gl_buffer_object *bufferobj = index_buffer->obj;
GLuint offset = (GLuint)index_buffer->ptr; GLuint offset = (GLuint)index_buffer->ptr;
@ -536,40 +496,31 @@ void brw_upload_indices( struct brw_context *brw,
/* Get new bufferobj, offset: /* Get new bufferobj, offset:
*/ */
get_space(brw, ib_size, &bufferobj, &offset); get_space(brw, ib_size, &bo, &offset);
/* Straight upload /* Straight upload
*/ */
ctx->Driver.BufferSubData( ctx, dri_bo_subdata(bo, offset, ib_size, index_buffer->ptr);
GL_ELEMENT_ARRAY_BUFFER_ARB,
offset,
ib_size,
index_buffer->ptr,
bufferobj);
} else { } else {
/* If the index buffer isn't aligned to its element size, we have to /* If the index buffer isn't aligned to its element size, we have to
* rebase it into a temporary. * rebase it into a temporary.
*/ */
if ((get_size(index_buffer->type) - 1) & offset) { if ((get_size(index_buffer->type) - 1) & offset) {
struct gl_buffer_object *vbo;
GLuint voffset;
GLubyte *map = ctx->Driver.MapBuffer(ctx, GLubyte *map = ctx->Driver.MapBuffer(ctx,
GL_ELEMENT_ARRAY_BUFFER_ARB, GL_ELEMENT_ARRAY_BUFFER_ARB,
GL_DYNAMIC_DRAW_ARB, GL_DYNAMIC_DRAW_ARB,
bufferobj); bufferobj);
map += offset; map += offset;
get_space(brw, ib_size, &vbo, &voffset);
ctx->Driver.BufferSubData(ctx,
GL_ELEMENT_ARRAY_BUFFER_ARB,
voffset,
ib_size,
map,
vbo);
ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
bufferobj = vbo; get_space(brw, ib_size, &bo, &offset);
offset = voffset;
dri_bo_subdata(bo, offset, ib_size, map);
ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
} else {
bo = intel_bufferobj_buffer(intel, intel_buffer_object(bufferobj),
INTEL_READ);
dri_bo_reference(bo);
} }
} }
@ -577,9 +528,6 @@ void brw_upload_indices( struct brw_context *brw,
*/ */
{ {
struct brw_indexbuffer ib; struct brw_indexbuffer ib;
dri_bo *buffer = intel_bufferobj_buffer(intel,
intel_buffer_object(bufferobj),
INTEL_READ);
memset(&ib, 0, sizeof(ib)); memset(&ib, 0, sizeof(ib));
@ -591,10 +539,12 @@ void brw_upload_indices( struct brw_context *brw,
BEGIN_BATCH(4, IGNORE_CLIPRECTS); BEGIN_BATCH(4, IGNORE_CLIPRECTS);
OUT_BATCH( ib.header.dword ); OUT_BATCH( ib.header.dword );
OUT_RELOC( buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, offset); OUT_RELOC( bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, offset);
OUT_RELOC( buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, OUT_RELOC( bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
offset + ib_size); offset + ib_size);
OUT_BATCH( 0 ); OUT_BATCH( 0 );
ADVANCE_BATCH(); ADVANCE_BATCH();
dri_bo_unreference(bo);
} }
} }

View file

@ -110,7 +110,11 @@ static void brw_new_batch( struct intel_context *intel )
/* Move to the end of the current upload buffer so that we'll force choosing /* Move to the end of the current upload buffer so that we'll force choosing
* a new buffer next time. * a new buffer next time.
*/ */
brw->vb.upload.offset = brw->vb.upload.vbo[brw->vb.upload.buf]->Size; if (brw->vb.upload.bo != NULL) {
dri_bo_unreference(brw->vb.upload.bo);
brw->vb.upload.bo = NULL;
brw->vb.upload.offset = 0;
}
} }
static void brw_note_fence( struct intel_context *intel, static void brw_note_fence( struct intel_context *intel,