draw: try to prevent overflows on index buffers

Pass in the size of the index buffer, when available, and use it
to handle out of bounds conditions. The behavior in the case of
an overflow needs to be the same as with other overflows in the
vertex processing pipeline meaning that a vertex should still
be generated but all attributes in it set to zero.

Signed-off-by: Zack Rusin <zackr@vmware.com>
Reviewed-by: José Fonseca <jfonseca@vmware.com>
Reviewed-by: Roland Scheidegger <sroland@vmware.com>
This commit is contained in:
Zack Rusin 2013-05-13 23:07:14 -04:00
parent d5250da818
commit 5104ed3dbf
15 changed files with 139 additions and 70 deletions

View file

@ -627,7 +627,8 @@ void draw_set_render( struct draw_context *draw,
*/
void
draw_set_indexes(struct draw_context *draw,
const void *elements, unsigned elem_size)
const void *elements, unsigned elem_size,
unsigned elem_buffer_space)
{
assert(elem_size == 0 ||
elem_size == 1 ||
@ -635,6 +636,10 @@ draw_set_indexes(struct draw_context *draw,
elem_size == 4);
draw->pt.user.elts = elements;
draw->pt.user.eltSizeIB = elem_size;
if (elem_size)
draw->pt.user.eltMax = elem_buffer_space / elem_size;
else
draw->pt.user.eltMax = 0;
}

View file

@ -212,7 +212,8 @@ void draw_set_vertex_elements(struct draw_context *draw,
const struct pipe_vertex_element *elements);
void draw_set_indexes(struct draw_context *draw,
const void *elements, unsigned elem_size);
const void *elements, unsigned elem_size,
unsigned available_space);
void draw_set_mapped_vertex_buffer(struct draw_context *draw,
unsigned attr, const void *buffer,

View file

@ -1455,14 +1455,16 @@ draw_llvm_generate(struct draw_llvm *llvm, struct draw_llvm_variant *variant,
struct gallivm_state *gallivm = variant->gallivm;
LLVMContextRef context = gallivm->context;
LLVMTypeRef int32_type = LLVMInt32TypeInContext(context);
LLVMTypeRef arg_types[8];
LLVMTypeRef arg_types[9];
unsigned num_arg_types =
elts ? Elements(arg_types) : Elements(arg_types) - 1;
LLVMTypeRef func_type;
LLVMValueRef context_ptr;
LLVMBasicBlockRef block;
LLVMBuilderRef builder;
struct lp_type vs_type;
LLVMValueRef end, start;
LLVMValueRef count, fetch_elts, fetch_count;
LLVMValueRef count, fetch_elts, fetch_elt_max, fetch_count;
LLVMValueRef stride, step, io_itr;
LLVMValueRef io_ptr, vbuffers_ptr, vb_ptr;
LLVMValueRef zero = lp_build_const_int32(gallivm, 0);
@ -1495,19 +1497,21 @@ draw_llvm_generate(struct draw_llvm *llvm, struct draw_llvm_variant *variant,
memset(&system_values, 0, sizeof(system_values));
arg_types[0] = get_context_ptr_type(variant); /* context */
arg_types[1] = get_vertex_header_ptr_type(variant); /* vertex_header */
arg_types[2] = get_buffer_ptr_type(variant); /* vbuffers */
if (elts)
arg_types[3] = LLVMPointerType(int32_type, 0);/* fetch_elts * */
else
arg_types[3] = int32_type; /* start */
arg_types[4] = int32_type; /* fetch_count / count */
arg_types[5] = int32_type; /* stride */
arg_types[6] = get_vb_ptr_type(variant); /* pipe_vertex_buffer's */
arg_types[7] = int32_type; /* instance_id */
i = 0;
arg_types[i++] = get_context_ptr_type(variant); /* context */
arg_types[i++] = get_vertex_header_ptr_type(variant); /* vertex_header */
arg_types[i++] = get_buffer_ptr_type(variant); /* vbuffers */
if (elts) {
arg_types[i++] = LLVMPointerType(int32_type, 0);/* fetch_elts */
arg_types[i++] = int32_type; /* fetch_elt_max */
} else
arg_types[i++] = int32_type; /* start */
arg_types[i++] = int32_type; /* fetch_count / count */
arg_types[i++] = int32_type; /* stride */
arg_types[i++] = get_vb_ptr_type(variant); /* pipe_vertex_buffer's */
arg_types[i++] = int32_type; /* instance_id */
func_type = LLVMFunctionType(int32_type, arg_types, Elements(arg_types), 0);
func_type = LLVMFunctionType(int32_type, arg_types, num_arg_types, 0);
variant_func = LLVMAddFunction(gallivm->module,
elts ? "draw_llvm_shader_elts" : "draw_llvm_shader",
@ -1519,7 +1523,7 @@ draw_llvm_generate(struct draw_llvm *llvm, struct draw_llvm_variant *variant,
variant->function = variant_func;
LLVMSetFunctionCallConv(variant_func, LLVMCCallConv);
for (i = 0; i < Elements(arg_types); ++i)
for (i = 0; i < num_arg_types; ++i)
if (LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind)
LLVMAddAttribute(LLVMGetParam(variant_func, i),
LLVMNoAliasAttribute);
@ -1527,9 +1531,9 @@ draw_llvm_generate(struct draw_llvm *llvm, struct draw_llvm_variant *variant,
context_ptr = LLVMGetParam(variant_func, 0);
io_ptr = LLVMGetParam(variant_func, 1);
vbuffers_ptr = LLVMGetParam(variant_func, 2);
stride = LLVMGetParam(variant_func, 5);
vb_ptr = LLVMGetParam(variant_func, 6);
system_values.instance_id = LLVMGetParam(variant_func, 7);
stride = LLVMGetParam(variant_func, 5 + (elts ? 1 : 0));
vb_ptr = LLVMGetParam(variant_func, 6 + (elts ? 1 : 0));
system_values.instance_id = LLVMGetParam(variant_func, 7 + (elts ? 1 : 0));
lp_build_name(context_ptr, "context");
lp_build_name(io_ptr, "io");
@ -1539,9 +1543,11 @@ draw_llvm_generate(struct draw_llvm *llvm, struct draw_llvm_variant *variant,
lp_build_name(system_values.instance_id, "instance_id");
if (elts) {
fetch_elts = LLVMGetParam(variant_func, 3);
fetch_count = LLVMGetParam(variant_func, 4);
fetch_elts = LLVMGetParam(variant_func, 3);
fetch_elt_max = LLVMGetParam(variant_func, 4);
fetch_count = LLVMGetParam(variant_func, 5);
lp_build_name(fetch_elts, "fetch_elts");
lp_build_name(fetch_elt_max, "fetch_elt_max");
lp_build_name(fetch_count, "fetch_count");
start = count = NULL;
}
@ -1621,16 +1627,48 @@ draw_llvm_generate(struct draw_llvm *llvm, struct draw_llvm_variant *variant,
* a few of the 4 vertex fetches will be out of bounds */
true_index = lp_build_min(&bld, true_index, fetch_max);
system_values.vertex_id = LLVMBuildInsertElement(
gallivm->builder,
system_values.vertex_id, true_index,
lp_build_const_int32(gallivm, i), "");
if (elts) {
LLVMValueRef fetch_ptr;
fetch_ptr = LLVMBuildGEP(builder, fetch_elts,
&true_index, 1, "");
true_index = LLVMBuildLoad(builder, fetch_ptr, "fetch_elt");
LLVMValueRef index_overflowed;
LLVMValueRef index_ptr =
lp_build_alloca(
gallivm,
lp_build_vec_type(gallivm, lp_type_int(32)), "");
struct lp_build_if_state if_ctx;
index_overflowed = LLVMBuildICmp(builder, LLVMIntUGE,
true_index, fetch_elt_max,
"index_overflowed");
lp_build_if(&if_ctx, gallivm, index_overflowed);
{
/* Generate maximum possible index so that
* generate_fetch can treat it just like
* any other overflow and return zeros.
* We don't have to worry about the restart
* primitive index because it has already been
* handled
*/
LLVMValueRef val =
lp_build_const_int32(gallivm, 0xffffffff);
LLVMBuildStore(builder, val, index_ptr);
}
lp_build_else(&if_ctx);
{
LLVMValueRef val;
fetch_ptr = LLVMBuildGEP(builder, fetch_elts,
&true_index, 1, "");
val = LLVMBuildLoad(builder, fetch_ptr, "");
LLVMBuildStore(builder, val, index_ptr);
}
lp_build_endif(&if_ctx);
true_index = LLVMBuildLoad(builder, index_ptr, "true_index");
}
system_values.vertex_id = LLVMBuildInsertElement(gallivm->builder,
system_values.vertex_id, true_index,
lp_build_const_int32(gallivm, i), "");
for (j = 0; j < draw->pt.nr_vertex_elements; ++j) {
struct pipe_vertex_element *velem = &draw->pt.vertex_element[j];
LLVMValueRef vb_index =

View file

@ -271,6 +271,7 @@ typedef int
struct vertex_header *io,
const struct draw_vertex_buffer vbuffers[PIPE_MAX_ATTRIBS],
const unsigned *fetch_elts,
unsigned fetch_max_elt,
unsigned fetch_count,
unsigned stride,
struct pipe_vertex_buffer *vertex_buffers,

View file

@ -186,7 +186,8 @@ struct draw_context
/** bytes per index (0, 1, 2 or 4) */
unsigned eltSizeIB;
unsigned eltSize;
int eltBias;
unsigned eltMax;
int eltBias;
unsigned min_index;
unsigned max_index;
@ -460,4 +461,12 @@ draw_get_rasterizer_no_cull( struct draw_context *draw,
boolean flatshade );
/**
* Return index i from the index buffer.
* If the index buffer would overflow we return the
* index of the first element in the vb.
*/
#define DRAW_GET_IDX(elts, i) \
((i) >= draw->pt.user.eltMax) ? 0 : elts[i]
#endif /* DRAW_PRIVATE_H */

View file

@ -339,7 +339,7 @@ draw_print_arrays(struct draw_context *draw, uint prim, int start, uint count)
#define PRIM_RESTART_LOOP(elements) \
do { \
for (i = start; i < end; i++) { \
if (elements[i] == info->restart_index) { \
if (i < elt_max && elements[i] == info->restart_index) { \
if (cur_count > 0) { \
/* draw elts up to prev pos */ \
draw_pt_arrays(draw, prim, cur_start, cur_count); \
@ -371,6 +371,7 @@ draw_pt_arrays_restart(struct draw_context *draw,
const unsigned start = info->start;
const unsigned count = info->count;
const unsigned end = start + count;
const unsigned elt_max = draw->pt.user.eltMax;
unsigned i, cur_start, cur_count;
assert(info->primitive_restart);
@ -496,7 +497,7 @@ draw_vbo(struct draw_context *draw,
assert(info->instance_count > 0);
if (info->indexed)
assert(draw->pt.user.elts);
count = info->count;
draw->pt.user.eltBias = info->index_bias;
@ -506,7 +507,7 @@ draw_vbo(struct draw_context *draw,
if (0)
debug_printf("draw_vbo(mode=%u start=%u count=%u):\n",
info->mode, info->start, info->count);
info->mode, info->start, count);
if (0)
tgsi_dump(draw->vs.vertex_shader->state.tokens, 0);
@ -534,7 +535,7 @@ draw_vbo(struct draw_context *draw,
}
if (0)
draw_print_arrays(draw, info->mode, info->start, MIN2(info->count, 20));
draw_print_arrays(draw, info->mode, info->start, MIN2(count, 20));
index_limit = util_draw_max_index(draw->pt.vertex_buffer,
draw->pt.vertex_element,

View file

@ -359,6 +359,7 @@ llvm_pipeline_generic( struct draw_pt_middle_end *middle,
llvm_vert_info.verts,
draw->pt.user.vbuffer,
fetch_info->elts,
draw->pt.user.eltMax,
fetch_info->count,
fpme->vertex_size,
draw->pt.vertex_buffer,

View file

@ -45,16 +45,16 @@ CONCAT(vsplit_primitive_, ELT_TYPE)(struct vsplit_frontend *vsplit,
unsigned fetch_start, fetch_count;
const ushort *draw_elts = NULL;
unsigned i;
ib += istart;
const unsigned start = istart;
const unsigned end = istart + icount;
/* use the ib directly */
if (min_index == 0 && sizeof(ib[0]) == sizeof(draw_elts[0])) {
if (icount > vsplit->max_vertices)
return FALSE;
for (i = 0; i < icount; i++) {
ELT_TYPE idx = ib[i];
for (i = start; i < end; i++) {
ELT_TYPE idx = DRAW_GET_IDX(ib, i);
if (idx < min_index || idx > max_index) {
debug_printf("warning: index out of range\n");
}
@ -85,23 +85,23 @@ CONCAT(vsplit_primitive_, ELT_TYPE)(struct vsplit_frontend *vsplit,
if (!draw_elts) {
if (min_index == 0) {
for (i = 0; i < icount; i++) {
ELT_TYPE idx = ib[i];
for (i = start; i < end; i++) {
ELT_TYPE idx = DRAW_GET_IDX(ib, i);
if (idx < min_index || idx > max_index) {
debug_printf("warning: index out of range\n");
}
vsplit->draw_elts[i] = (ushort) idx;
vsplit->draw_elts[i - start] = (ushort) idx;
}
}
else {
for (i = 0; i < icount; i++) {
ELT_TYPE idx = ib[i];
for (i = start; i < end; i++) {
ELT_TYPE idx = DRAW_GET_IDX(ib, i);
if (idx < min_index || idx > max_index) {
debug_printf("warning: index out of range\n");
}
vsplit->draw_elts[i] = (ushort) (idx - min_index);
vsplit->draw_elts[i - start] = (ushort) (idx - min_index);
}
}
@ -138,41 +138,41 @@ CONCAT(vsplit_segment_cache_, ELT_TYPE)(struct vsplit_frontend *vsplit,
spoken = !!spoken;
if (ibias == 0) {
if (spoken)
ADD_CACHE(vsplit, ib[ispoken]);
ADD_CACHE(vsplit, DRAW_GET_IDX(ib, ispoken));
for (i = spoken; i < icount; i++)
ADD_CACHE(vsplit, ib[istart + i]);
ADD_CACHE(vsplit, DRAW_GET_IDX(ib, istart + i));
if (close)
ADD_CACHE(vsplit, ib[iclose]);
ADD_CACHE(vsplit, DRAW_GET_IDX(ib, iclose));
}
else if (ibias > 0) {
if (spoken)
ADD_CACHE(vsplit, (uint) ib[ispoken] + ibias);
ADD_CACHE(vsplit, (uint) DRAW_GET_IDX(ib, ispoken) + ibias);
for (i = spoken; i < icount; i++)
ADD_CACHE(vsplit, (uint) ib[istart + i] + ibias);
ADD_CACHE(vsplit, (uint) DRAW_GET_IDX(ib, istart + i) + ibias);
if (close)
ADD_CACHE(vsplit, (uint) ib[iclose] + ibias);
ADD_CACHE(vsplit, (uint) DRAW_GET_IDX(ib, iclose) + ibias);
}
else {
if (spoken) {
if ((int) ib[ispoken] < -ibias)
return;
ADD_CACHE(vsplit, ib[ispoken] + ibias);
ADD_CACHE(vsplit, DRAW_GET_IDX(ib, ispoken) + ibias);
}
for (i = spoken; i < icount; i++) {
if ((int) ib[istart + i] < -ibias)
if ((int) DRAW_GET_IDX(ib, istart + i) < -ibias)
return;
ADD_CACHE(vsplit, ib[istart + i] + ibias);
ADD_CACHE(vsplit, DRAW_GET_IDX(ib, istart + i) + ibias);
}
if (close) {
if ((int) ib[iclose] < -ibias)
if ((int) DRAW_GET_IDX(ib, iclose) < -ibias)
return;
ADD_CACHE(vsplit, ib[iclose] + ibias);
ADD_CACHE(vsplit, DRAW_GET_IDX(ib, iclose) + ibias);
}
}

View file

@ -83,7 +83,7 @@ i915_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
mapped_indices = i915_buffer(i915->index_buffer.buffer)->data;
draw_set_indexes(draw,
(ubyte *) mapped_indices + i915->index_buffer.offset,
i915->index_buffer.index_size);
i915->index_buffer.index_size, ~0);
}
if (i915->constants[PIPE_SHADER_VERTEX])
@ -109,7 +109,7 @@ i915_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
draw_set_mapped_vertex_buffer(i915->draw, i, NULL, 0);
}
if (mapped_indices)
draw_set_indexes(draw, NULL, 0);
draw_set_indexes(draw, NULL, 0, 0);
if (i915->num_vertex_sampler_views > 0)
i915_cleanup_vertex_sampling(i915);

View file

@ -81,13 +81,19 @@ llvmpipe_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
/* Map index buffer, if present */
if (info->indexed) {
unsigned available_space = ~0;
mapped_indices = lp->index_buffer.user_buffer;
if (!mapped_indices)
if (!mapped_indices) {
mapped_indices = llvmpipe_resource_data(lp->index_buffer.buffer);
if (lp->index_buffer.buffer->width0 > lp->index_buffer.offset)
available_space =
(lp->index_buffer.buffer->width0 - lp->index_buffer.offset);
else
available_space = 0;
}
draw_set_indexes(draw,
(ubyte *) mapped_indices + lp->index_buffer.offset,
lp->index_buffer.index_size);
lp->index_buffer.index_size, available_space);
}
for (i = 0; i < lp->num_so_targets; i++) {
@ -126,7 +132,7 @@ llvmpipe_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
draw_set_mapped_vertex_buffer(draw, i, NULL, 0);
}
if (mapped_indices) {
draw_set_indexes(draw, NULL, 0);
draw_set_indexes(draw, NULL, 0, 0);
}
draw_set_mapped_so_targets(draw, 0, NULL);

View file

@ -423,9 +423,9 @@ nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
PIPE_TRANSFER_READ, &transferi);
draw_set_indexes(draw,
(ubyte *) map + nv30->idxbuf.offset,
nv30->idxbuf.index_size);
nv30->idxbuf.index_size, ~0);
} else {
draw_set_indexes(draw, NULL, 0);
draw_set_indexes(draw, NULL, 0, 0);
}
draw_vbo(draw, info);

View file

@ -1856,7 +1856,7 @@ static void r300_set_index_buffer_swtcl(struct pipe_context* pipe,
}
draw_set_indexes(r300->draw,
(const ubyte *) buf + ib->offset,
ib->index_size);
ib->index_size, ~0);
}
}

View file

@ -89,13 +89,20 @@ softpipe_draw_vbo(struct pipe_context *pipe,
/* Map index buffer, if present */
if (info->indexed) {
unsigned available_space = ~0;
mapped_indices = sp->index_buffer.user_buffer;
if (!mapped_indices)
if (!mapped_indices) {
mapped_indices = softpipe_resource(sp->index_buffer.buffer)->data;
if (sp->index_buffer.buffer->width0 > sp->index_buffer.offset)
available_space =
(sp->index_buffer.buffer->width0 - sp->index_buffer.offset);
else
available_space = 0;
}
draw_set_indexes(draw,
(ubyte *) mapped_indices + sp->index_buffer.offset,
sp->index_buffer.index_size);
sp->index_buffer.index_size, available_space);
}
@ -125,7 +132,7 @@ softpipe_draw_vbo(struct pipe_context *pipe,
draw_set_mapped_vertex_buffer(draw, i, NULL, 0);
}
if (mapped_indices) {
draw_set_indexes(draw, NULL, 0);
draw_set_indexes(draw, NULL, 0, 0);
}
draw_set_mapped_so_targets(draw, 0, NULL);

View file

@ -84,7 +84,7 @@ svga_swtnl_draw_vbo(struct svga_context *svga,
&ib_transfer);
draw_set_indexes(draw,
(const ubyte *) map + svga->curr.ib.offset,
svga->curr.ib.index_size);
svga->curr.ib.index_size, ~0);
}
if (svga->curr.cb[PIPE_SHADER_VERTEX]) {
@ -118,7 +118,7 @@ svga_swtnl_draw_vbo(struct svga_context *svga,
if (ib_transfer) {
pipe_buffer_unmap(&svga->pipe, ib_transfer);
draw_set_indexes(draw, NULL, 0);
draw_set_indexes(draw, NULL, 0, 0);
}
if (svga->curr.cb[PIPE_SHADER_VERTEX]) {

View file

@ -233,7 +233,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
draw_set_indexes(draw,
(ubyte *) mapped_indices + ibuffer.offset,
ibuffer.index_size);
ibuffer.index_size, ~0);
}
/* set the constant buffer */
@ -252,7 +252,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
* unmap vertex/index buffers
*/
if (ib) {
draw_set_indexes(draw, NULL, 0);
draw_set_indexes(draw, NULL, 0, 0);
if (ib_transfer)
pipe_buffer_unmap(pipe, ib_transfer);
pipe_resource_reference(&ibuffer.buffer, NULL);