gallium: decrease the size of pipe_vertex_buffer - 24 -> 16 bytes

This commit is contained in:
Marek Olšák 2017-04-02 14:30:16 +02:00
parent fe437882ea
commit c24c3b94ed
80 changed files with 400 additions and 368 deletions

View file

@ -408,8 +408,8 @@ void cso_destroy_context( struct cso_context *ctx )
util_unreference_framebuffer_state(&ctx->fb);
util_unreference_framebuffer_state(&ctx->fb_saved);
pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer, NULL);
pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, NULL);
pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_current);
pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_saved);
for (i = 0; i < PIPE_SHADER_TYPES; i++) {
pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL);
@ -1150,15 +1150,9 @@ void cso_set_vertex_buffers(struct cso_context *ctx,
const struct pipe_vertex_buffer *vb =
buffers + (ctx->aux_vertex_buffer_index - start_slot);
pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer,
vb->buffer);
memcpy(&ctx->aux_vertex_buffer_current, vb,
sizeof(struct pipe_vertex_buffer));
}
else {
pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer,
NULL);
ctx->aux_vertex_buffer_current.user_buffer = NULL;
pipe_vertex_buffer_reference(&ctx->aux_vertex_buffer_current, vb);
} else {
pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_current);
}
}
@ -1175,10 +1169,8 @@ cso_save_aux_vertex_buffer_slot(struct cso_context *ctx)
return;
}
pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer,
ctx->aux_vertex_buffer_current.buffer);
memcpy(&ctx->aux_vertex_buffer_saved, &ctx->aux_vertex_buffer_current,
sizeof(struct pipe_vertex_buffer));
pipe_vertex_buffer_reference(&ctx->aux_vertex_buffer_saved,
&ctx->aux_vertex_buffer_current);
}
static void
@ -1193,7 +1185,7 @@ cso_restore_aux_vertex_buffer_slot(struct cso_context *ctx)
cso_set_vertex_buffers(ctx, ctx->aux_vertex_buffer_index, 1,
&ctx->aux_vertex_buffer_saved);
pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, NULL);
pipe_vertex_buffer_unreference(&ctx->aux_vertex_buffer_saved);
}
unsigned cso_get_aux_vertex_buffer_slot(struct cso_context *ctx)

View file

@ -206,9 +206,8 @@ void draw_destroy( struct draw_context *draw )
}
}
for (i = 0; i < draw->pt.nr_vertex_buffers; i++) {
pipe_resource_reference(&draw->pt.vertex_buffer[i].buffer, NULL);
}
for (i = 0; i < draw->pt.nr_vertex_buffers; i++)
pipe_vertex_buffer_unreference(&draw->pt.vertex_buffer[i]);
/* Not so fast -- we're just borrowing this at the moment.
*

View file

@ -352,9 +352,9 @@ create_jit_vertex_buffer_type(struct gallivm_state *gallivm,
LLVMTypeRef elem_types[4];
LLVMTypeRef vb_type;
elem_types[0] =
elem_types[1] = LLVMInt32TypeInContext(gallivm->context);
elem_types[2] =
elem_types[0] = LLVMInt16TypeInContext(gallivm->context);
elem_types[1] = LLVMInt8TypeInContext(gallivm->context);
elem_types[2] = LLVMInt32TypeInContext(gallivm->context);
elem_types[3] = LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0);
vb_type = LLVMStructTypeInContext(gallivm->context, elem_types,
@ -363,8 +363,12 @@ create_jit_vertex_buffer_type(struct gallivm_state *gallivm,
(void) target; /* silence unused var warning for non-debug build */
LP_CHECK_MEMBER_OFFSET(struct pipe_vertex_buffer, stride,
target, vb_type, 0);
LP_CHECK_MEMBER_OFFSET(struct pipe_vertex_buffer, buffer_offset,
LP_CHECK_MEMBER_OFFSET(struct pipe_vertex_buffer, is_user_buffer,
target, vb_type, 1);
LP_CHECK_MEMBER_OFFSET(struct pipe_vertex_buffer, buffer_offset,
target, vb_type, 2);
LP_CHECK_MEMBER_OFFSET(struct pipe_vertex_buffer, buffer.resource,
target, vb_type, 3);
LP_CHECK_STRUCT_SIZE(struct pipe_vertex_buffer, target, vb_type);
@ -1699,6 +1703,8 @@ draw_llvm_generate(struct draw_llvm *llvm, struct draw_llvm_variant *variant)
vbuffer_ptr = LLVMBuildGEP(builder, vbuffers_ptr, &vb_index, 1, "");
vb_info = LLVMBuildGEP(builder, vb_ptr, &vb_index, 1, "");
vb_stride[j] = draw_jit_vbuffer_stride(gallivm, vb_info);
vb_stride[j] = LLVMBuildZExt(gallivm->builder, vb_stride[j],
LLVMInt32TypeInContext(context), "");
vb_buffer_offset = draw_jit_vbuffer_offset(gallivm, vb_info);
map_ptr[j] = draw_jit_dvbuffer_map(gallivm, vbuffer_ptr);
buffer_size = draw_jit_dvbuffer_size(gallivm, vbuffer_ptr);

View file

@ -172,7 +172,7 @@ enum {
lp_build_struct_get(_gallivm, _ptr, 0, "stride")
#define draw_jit_vbuffer_offset(_gallivm, _ptr) \
lp_build_struct_get(_gallivm, _ptr, 1, "buffer_offset")
lp_build_struct_get(_gallivm, _ptr, 2, "buffer_offset")
enum {
DRAW_JIT_DVBUFFER_MAP = 0,

View file

@ -579,15 +579,15 @@ hud_draw(struct hud_context *hud, struct pipe_resource *tex)
hud->whitelines.buffer_size +
hud->text.buffer_size +
hud->color_prims.buffer_size,
16, &hud->bg.vbuf.buffer_offset, &hud->bg.vbuf.buffer,
16, &hud->bg.vbuf.buffer_offset, &hud->bg.vbuf.buffer.resource,
(void**)&hud->bg.vertices);
if (!hud->bg.vertices) {
goto out;
}
pipe_resource_reference(&hud->whitelines.vbuf.buffer, hud->bg.vbuf.buffer);
pipe_resource_reference(&hud->text.vbuf.buffer, hud->bg.vbuf.buffer);
pipe_resource_reference(&hud->color_prims.vbuf.buffer, hud->bg.vbuf.buffer);
pipe_resource_reference(&hud->whitelines.vbuf.buffer.resource, hud->bg.vbuf.buffer.resource);
pipe_resource_reference(&hud->text.vbuf.buffer.resource, hud->bg.vbuf.buffer.resource);
pipe_resource_reference(&hud->color_prims.vbuf.buffer.resource, hud->bg.vbuf.buffer.resource);
hud->whitelines.vbuf.buffer_offset = hud->bg.vbuf.buffer_offset +
hud->bg.buffer_size;
@ -654,7 +654,7 @@ hud_draw(struct hud_context *hud, struct pipe_resource *tex)
&hud->bg.vbuf);
cso_draw_arrays(cso, PIPE_PRIM_QUADS, 0, hud->bg.num_vertices);
}
pipe_resource_reference(&hud->bg.vbuf.buffer, NULL);
pipe_resource_reference(&hud->bg.vbuf.buffer.resource, NULL);
/* draw accumulated vertices for white lines */
cso_set_blend(cso, &hud->no_blend);
@ -675,7 +675,7 @@ hud_draw(struct hud_context *hud, struct pipe_resource *tex)
cso_set_fragment_shader_handle(hud->cso, hud->fs_color);
cso_draw_arrays(cso, PIPE_PRIM_LINES, 0, hud->whitelines.num_vertices);
}
pipe_resource_reference(&hud->whitelines.vbuf.buffer, NULL);
pipe_resource_reference(&hud->whitelines.vbuf.buffer.resource, NULL);
/* draw accumulated vertices for text */
cso_set_blend(cso, &hud->alpha_blend);
@ -685,7 +685,7 @@ hud_draw(struct hud_context *hud, struct pipe_resource *tex)
cso_set_fragment_shader_handle(hud->cso, hud->fs_text);
cso_draw_arrays(cso, PIPE_PRIM_QUADS, 0, hud->text.num_vertices);
}
pipe_resource_reference(&hud->text.vbuf.buffer, NULL);
pipe_resource_reference(&hud->text.vbuf.buffer.resource, NULL);
/* draw the rest */
cso_set_rasterizer(cso, &hud->rasterizer_aa_lines);

View file

@ -539,7 +539,7 @@ void util_blitter_restore_vertex_states(struct blitter_context *blitter)
/* Vertex buffer. */
pipe->set_vertex_buffers(pipe, ctx->base.vb_slot, 1,
&ctx->base.saved_vertex_buffer);
pipe_resource_reference(&ctx->base.saved_vertex_buffer.buffer, NULL);
pipe_vertex_buffer_unreference(&ctx->base.saved_vertex_buffer);
/* Vertex elements. */
pipe->bind_vertex_elements_state(pipe, ctx->base.saved_velem_state);
@ -1209,15 +1209,15 @@ static void blitter_draw(struct blitter_context_priv *ctx,
vb.stride = 8 * sizeof(float);
u_upload_data(pipe->stream_uploader, 0, sizeof(ctx->vertices), 4, ctx->vertices,
&vb.buffer_offset, &vb.buffer);
if (!vb.buffer)
&vb.buffer_offset, &vb.buffer.resource);
if (!vb.buffer.resource)
return;
u_upload_unmap(pipe->stream_uploader);
pipe->set_vertex_buffers(pipe, ctx->base.vb_slot, 1, &vb);
util_draw_arrays_instanced(pipe, PIPE_PRIM_TRIANGLE_FAN, 0, 4,
0, num_instances);
pipe_resource_reference(&vb.buffer, NULL);
pipe_resource_reference(&vb.buffer.resource, NULL);
}
void util_blitter_draw_rectangle(struct blitter_context *blitter,
@ -2199,7 +2199,8 @@ void util_blitter_copy_buffer(struct blitter_context *blitter,
blitter_check_saved_vertex_states(ctx);
blitter_disable_render_cond(ctx);
vb.buffer = src;
vb.is_user_buffer = false;
vb.buffer.resource = src;
vb.buffer_offset = srcx;
vb.stride = 4;
@ -2259,8 +2260,8 @@ void util_blitter_clear_buffer(struct blitter_context *blitter,
}
u_upload_data(pipe->stream_uploader, 0, num_channels*4, 4, clear_value,
&vb.buffer_offset, &vb.buffer);
if (!vb.buffer)
&vb.buffer_offset, &vb.buffer.resource);
if (!vb.buffer.resource)
goto out;
vb.stride = 0;
@ -2291,7 +2292,7 @@ out:
util_blitter_restore_render_cond(blitter);
util_blitter_unset_running_flag(blitter);
pipe_so_target_reference(&so_target, NULL);
pipe_resource_reference(&vb.buffer, NULL);
pipe_resource_reference(&vb.buffer.resource, NULL);
}
/* probably radeon specific */

View file

@ -503,10 +503,8 @@ static inline void
util_blitter_save_vertex_buffer_slot(struct blitter_context *blitter,
struct pipe_vertex_buffer *vertex_buffers)
{
pipe_resource_reference(&blitter->saved_vertex_buffer.buffer,
vertex_buffers[blitter->vb_slot].buffer);
memcpy(&blitter->saved_vertex_buffer, &vertex_buffers[blitter->vb_slot],
sizeof(struct pipe_vertex_buffer));
pipe_vertex_buffer_reference(&blitter->saved_vertex_buffer,
&vertex_buffers[blitter->vb_slot]);
}
static inline void

View file

@ -62,13 +62,13 @@ util_draw_max_index(
const struct util_format_description *format_desc;
unsigned format_size;
if (!buffer->buffer) {
if (buffer->is_user_buffer || !buffer->buffer.resource) {
continue;
}
assert(buffer->buffer->height0 == 1);
assert(buffer->buffer->depth0 == 1);
buffer_size = buffer->buffer->width0;
assert(buffer->buffer.resource->height0 == 1);
assert(buffer->buffer.resource->depth0 == 1);
buffer_size = buffer->buffer.resource->width0;
format_desc = util_format_description(element->src_format);
assert(format_desc->block.width == 1);

View file

@ -54,7 +54,7 @@ util_draw_vertex_buffer(struct pipe_context *pipe,
/* tell pipe about the vertex buffer */
memset(&vbuffer, 0, sizeof(vbuffer));
vbuffer.buffer = vbuf;
vbuffer.buffer.resource = vbuf;
vbuffer.stride = num_attribs * 4 * sizeof(float); /* vertex size */
vbuffer.buffer_offset = offset;
@ -82,7 +82,8 @@ util_draw_user_vertex_buffer(struct cso_context *cso, void *buffer,
assert(num_attribs <= PIPE_MAX_ATTRIBS);
vbuffer.user_buffer = buffer;
vbuffer.is_user_buffer = true;
vbuffer.buffer.user = buffer;
vbuffer.stride = num_attribs * 4 * sizeof(float); /* vertex size */
/* note: vertex elements already set by caller */

View file

@ -863,9 +863,9 @@ util_dump_vertex_buffer(FILE *stream, const struct pipe_vertex_buffer *state)
util_dump_struct_begin(stream, "pipe_vertex_buffer");
util_dump_member(stream, uint, state, stride);
util_dump_member(stream, bool, state, is_user_buffer);
util_dump_member(stream, uint, state, buffer_offset);
util_dump_member(stream, ptr, state, buffer);
util_dump_member(stream, ptr, state, user_buffer);
util_dump_member(stream, ptr, state, buffer.resource);
util_dump_struct_end(stream);
}

View file

@ -51,10 +51,13 @@ void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst,
if (src) {
for (i = 0; i < count; i++) {
if (src[i].buffer || src[i].user_buffer) {
if (src[i].buffer.resource)
bitmask |= 1 << i;
}
pipe_resource_reference(&dst[i].buffer, src[i].buffer);
pipe_vertex_buffer_unreference(&dst[i]);
if (!src[i].is_user_buffer)
pipe_resource_reference(&dst[i].buffer.resource, src[i].buffer.resource);
}
/* Copy over the other members of pipe_vertex_buffer. */
@ -65,10 +68,8 @@ void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst,
}
else {
/* Unreference the buffers. */
for (i = 0; i < count; i++) {
pipe_resource_reference(&dst[i].buffer, NULL);
dst[i].user_buffer = NULL;
}
for (i = 0; i < count; i++)
pipe_vertex_buffer_unreference(&dst[i]);
*enabled_buffers &= ~(((1ull << count) - 1) << start_slot);
}
@ -87,7 +88,7 @@ void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
uint32_t enabled_buffers = 0;
for (i = 0; i < *dst_count; i++) {
if (dst[i].buffer || dst[i].user_buffer)
if (dst[i].buffer.resource)
enabled_buffers |= (1ull << i);
}

View file

@ -187,6 +187,25 @@ pipe_so_target_reference(struct pipe_stream_output_target **ptr,
*ptr = target;
}
static inline void
pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
{
if (dst->is_user_buffer)
dst->buffer.user = NULL;
else
pipe_resource_reference(&dst->buffer.resource, NULL);
}
static inline void
pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
const struct pipe_vertex_buffer *src)
{
pipe_vertex_buffer_unreference(dst);
if (!src->is_user_buffer)
pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
memcpy(dst, src, sizeof(*src));
}
static inline void
pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
struct pipe_resource *pt, unsigned level, unsigned layer)

View file

@ -377,13 +377,12 @@ void u_vbuf_destroy(struct u_vbuf *mgr)
mgr->pipe->set_vertex_buffers(mgr->pipe, 0, num_vb, NULL);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
pipe_resource_reference(&mgr->vertex_buffer[i].buffer, NULL);
}
for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
pipe_resource_reference(&mgr->real_vertex_buffer[i].buffer, NULL);
}
pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, NULL);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
pipe_vertex_buffer_unreference(&mgr->vertex_buffer[i]);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[i]);
pipe_vertex_buffer_unreference(&mgr->aux_vertex_buffer_saved);
translate_cache_destroy(mgr->translate_cache);
cso_cache_delete(mgr->cso_cache);
@ -417,17 +416,17 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
vb = &mgr->vertex_buffer[i];
offset = vb->buffer_offset + vb->stride * start_vertex;
if (vb->user_buffer) {
map = (uint8_t*)vb->user_buffer + offset;
if (vb->is_user_buffer) {
map = (uint8_t*)vb->buffer.user + offset;
} else {
unsigned size = vb->stride ? num_vertices * vb->stride
: sizeof(double)*4;
if (offset+size > vb->buffer->width0) {
size = vb->buffer->width0 - offset;
if (offset+size > vb->buffer.resource->width0) {
size = vb->buffer.resource->width0 - offset;
}
map = pipe_buffer_map_range(mgr->pipe, vb->buffer, offset, size,
map = pipe_buffer_map_range(mgr->pipe, vb->buffer.resource, offset, size,
PIPE_TRANSFER_READ, &vb_transfer[i]);
}
@ -510,8 +509,8 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
/* Move the buffer reference. */
pipe_resource_reference(
&mgr->real_vertex_buffer[out_vb].buffer, NULL);
mgr->real_vertex_buffer[out_vb].buffer = out_buffer;
&mgr->real_vertex_buffer[out_vb].buffer.resource, NULL);
mgr->real_vertex_buffer[out_vb].buffer.resource = out_buffer;
return PIPE_OK;
}
@ -721,7 +720,7 @@ static void u_vbuf_translate_end(struct u_vbuf *mgr)
for (i = 0; i < VB_NUM; i++) {
unsigned vb = mgr->fallback_vbs[i];
if (vb != ~0u) {
pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer, NULL);
pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer.resource, NULL);
mgr->fallback_vbs[i] = ~0;
/* This will cause the buffer to be unbound in the driver later. */
@ -830,8 +829,8 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
for (i = 0; i < count; i++) {
unsigned dst_index = start_slot + i;
pipe_resource_reference(&mgr->vertex_buffer[dst_index].buffer, NULL);
pipe_resource_reference(&mgr->real_vertex_buffer[dst_index].buffer,
pipe_vertex_buffer_unreference(&mgr->vertex_buffer[dst_index]);
pipe_resource_reference(&mgr->real_vertex_buffer[dst_index].buffer.resource,
NULL);
}
@ -845,18 +844,13 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
struct pipe_vertex_buffer *orig_vb = &mgr->vertex_buffer[dst_index];
struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[dst_index];
if (!vb->buffer && !vb->user_buffer) {
pipe_resource_reference(&orig_vb->buffer, NULL);
pipe_resource_reference(&real_vb->buffer, NULL);
real_vb->user_buffer = NULL;
if (!vb->buffer.resource) {
pipe_vertex_buffer_unreference(orig_vb);
pipe_vertex_buffer_unreference(real_vb);
continue;
}
pipe_resource_reference(&orig_vb->buffer, vb->buffer);
orig_vb->user_buffer = vb->user_buffer;
real_vb->buffer_offset = orig_vb->buffer_offset = vb->buffer_offset;
real_vb->stride = orig_vb->stride = vb->stride;
pipe_vertex_buffer_reference(orig_vb, vb);
if (vb->stride) {
nonzero_stride_vb_mask |= 1 << dst_index;
@ -866,18 +860,23 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
if ((!mgr->caps.buffer_offset_unaligned && vb->buffer_offset % 4 != 0) ||
(!mgr->caps.buffer_stride_unaligned && vb->stride % 4 != 0)) {
incompatible_vb_mask |= 1 << dst_index;
pipe_resource_reference(&real_vb->buffer, NULL);
real_vb->buffer_offset = vb->buffer_offset;
real_vb->stride = vb->stride;
pipe_vertex_buffer_unreference(real_vb);
real_vb->is_user_buffer = false;
continue;
}
if (!mgr->caps.user_vertex_buffers && vb->user_buffer) {
if (!mgr->caps.user_vertex_buffers && vb->is_user_buffer) {
user_vb_mask |= 1 << dst_index;
pipe_resource_reference(&real_vb->buffer, NULL);
real_vb->buffer_offset = vb->buffer_offset;
real_vb->stride = vb->stride;
pipe_vertex_buffer_unreference(real_vb);
real_vb->is_user_buffer = false;
continue;
}
pipe_resource_reference(&real_vb->buffer, vb->buffer);
real_vb->user_buffer = vb->user_buffer;
pipe_vertex_buffer_reference(real_vb, vb);
}
mgr->user_vb_mask |= user_vb_mask;
@ -933,7 +932,7 @@ u_vbuf_upload_buffers(struct u_vbuf *mgr,
continue;
}
if (!vb->user_buffer) {
if (!vb->is_user_buffer) {
continue;
}
@ -983,11 +982,11 @@ u_vbuf_upload_buffers(struct u_vbuf *mgr,
assert(start < end);
real_vb = &mgr->real_vertex_buffer[i];
ptr = mgr->vertex_buffer[i].user_buffer;
ptr = mgr->vertex_buffer[i].buffer.user;
u_upload_data(mgr->pipe->stream_uploader, start, end - start, 4, ptr + start,
&real_vb->buffer_offset, &real_vb->buffer);
if (!real_vb->buffer)
u_upload_data(mgr->pipe->stream_uploader, start, end - start, 4,
ptr + start, &real_vb->buffer_offset, &real_vb->buffer.resource);
if (!real_vb->buffer.resource)
return PIPE_ERROR_OUT_OF_MEMORY;
real_vb->buffer_offset -= start;
@ -1320,16 +1319,13 @@ void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr)
void u_vbuf_save_aux_vertex_buffer_slot(struct u_vbuf *mgr)
{
struct pipe_vertex_buffer *vb =
&mgr->vertex_buffer[mgr->aux_vertex_buffer_slot];
pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, vb->buffer);
memcpy(&mgr->aux_vertex_buffer_saved, vb, sizeof(*vb));
pipe_vertex_buffer_reference(&mgr->aux_vertex_buffer_saved,
&mgr->vertex_buffer[mgr->aux_vertex_buffer_slot]);
}
void u_vbuf_restore_aux_vertex_buffer_slot(struct u_vbuf *mgr)
{
u_vbuf_set_vertex_buffers(mgr, mgr->aux_vertex_buffer_slot, 1,
&mgr->aux_vertex_buffer_saved);
pipe_resource_reference(&mgr->aux_vertex_buffer_saved.buffer, NULL);
pipe_vertex_buffer_unreference(&mgr->aux_vertex_buffer_saved);
}

View file

@ -295,7 +295,7 @@ vl_bicubic_filter_init(struct vl_bicubic_filter *filter, struct pipe_context *pi
goto error_sampler;
filter->quad = vl_vb_upload_quads(pipe);
if(!filter->quad.buffer)
if(!filter->quad.buffer.resource)
goto error_quad;
memset(&ve, 0, sizeof(ve));
@ -349,7 +349,7 @@ error_vs:
pipe->delete_vertex_elements_state(pipe, filter->ves);
error_ves:
pipe_resource_reference(&filter->quad.buffer, NULL);
pipe_resource_reference(&filter->quad.buffer.resource, NULL);
error_quad:
pipe->delete_sampler_state(pipe, filter->sampler);
@ -373,7 +373,7 @@ vl_bicubic_filter_cleanup(struct vl_bicubic_filter *filter)
filter->pipe->delete_blend_state(filter->pipe, filter->blend);
filter->pipe->delete_rasterizer_state(filter->pipe, filter->rs_state);
filter->pipe->delete_vertex_elements_state(filter->pipe, filter->ves);
pipe_resource_reference(&filter->quad.buffer, NULL);
pipe_resource_reference(&filter->quad.buffer.resource, NULL);
filter->pipe->delete_vs_state(filter->pipe, filter->vs);
filter->pipe->delete_fs_state(filter->pipe, filter->fs);

View file

@ -605,7 +605,8 @@ init_buffers(struct vl_compositor *c)
*/
c->vertex_buf.stride = sizeof(struct vertex2f) + sizeof(struct vertex4f) * 2;
c->vertex_buf.buffer_offset = 0;
c->vertex_buf.buffer = NULL;
c->vertex_buf.buffer.resource = NULL;
c->vertex_buf.is_user_buffer = false;
vertex_elems[0].src_offset = 0;
vertex_elems[0].instance_divisor = 0;
@ -630,7 +631,7 @@ cleanup_buffers(struct vl_compositor *c)
assert(c);
c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
pipe_resource_reference(&c->vertex_buf.buffer, NULL);
pipe_resource_reference(&c->vertex_buf.buffer.resource, NULL);
}
static inline struct u_rect
@ -812,7 +813,7 @@ gen_vertex_data(struct vl_compositor *c, struct vl_compositor_state *s, struct u
u_upload_alloc(c->pipe->stream_uploader, 0,
c->vertex_buf.stride * VL_COMPOSITOR_MAX_LAYERS * 4, /* size */
4, /* alignment */
&c->vertex_buf.buffer_offset, &c->vertex_buf.buffer,
&c->vertex_buf.buffer_offset, &c->vertex_buf.buffer.resource,
(void**)&vb);
for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) {

View file

@ -308,7 +308,7 @@ vl_deint_filter_init(struct vl_deint_filter *filter, struct pipe_context *pipe,
goto error_sampler;
filter->quad = vl_vb_upload_quads(pipe);
if(!filter->quad.buffer)
if(!filter->quad.buffer.resource)
goto error_quad;
memset(&ve, 0, sizeof(ve));
@ -361,7 +361,7 @@ error_vs:
pipe->delete_vertex_elements_state(pipe, filter->ves);
error_ves:
pipe_resource_reference(&filter->quad.buffer, NULL);
pipe_resource_reference(&filter->quad.buffer.resource, NULL);
error_quad:
pipe->delete_sampler_state(pipe, filter->sampler);
@ -396,7 +396,7 @@ vl_deint_filter_cleanup(struct vl_deint_filter *filter)
filter->pipe->delete_blend_state(filter->pipe, filter->blend[2]);
filter->pipe->delete_rasterizer_state(filter->pipe, filter->rs_state);
filter->pipe->delete_vertex_elements_state(filter->pipe, filter->ves);
pipe_resource_reference(&filter->quad.buffer, NULL);
pipe_resource_reference(&filter->quad.buffer.resource, NULL);
filter->pipe->delete_vs_state(filter->pipe, filter->vs);
filter->pipe->delete_fs_state(filter->pipe, filter->fs_copy_top);

View file

@ -184,7 +184,7 @@ vl_matrix_filter_init(struct vl_matrix_filter *filter, struct pipe_context *pipe
goto error_sampler;
filter->quad = vl_vb_upload_quads(pipe);
if(!filter->quad.buffer)
if(!filter->quad.buffer.resource)
goto error_quad;
memset(&ve, 0, sizeof(ve));
@ -233,7 +233,7 @@ error_offsets:
pipe->delete_vertex_elements_state(pipe, filter->ves);
error_ves:
pipe_resource_reference(&filter->quad.buffer, NULL);
pipe_resource_reference(&filter->quad.buffer.resource, NULL);
error_quad:
pipe->delete_sampler_state(pipe, filter->sampler);
@ -257,7 +257,7 @@ vl_matrix_filter_cleanup(struct vl_matrix_filter *filter)
filter->pipe->delete_blend_state(filter->pipe, filter->blend);
filter->pipe->delete_rasterizer_state(filter->pipe, filter->rs_state);
filter->pipe->delete_vertex_elements_state(filter->pipe, filter->ves);
pipe_resource_reference(&filter->quad.buffer, NULL);
pipe_resource_reference(&filter->quad.buffer.resource, NULL);
filter->pipe->delete_vs_state(filter->pipe, filter->vs);
filter->pipe->delete_fs_state(filter->pipe, filter->fs);

View file

@ -295,7 +295,7 @@ vl_median_filter_init(struct vl_median_filter *filter, struct pipe_context *pipe
goto error_sampler;
filter->quad = vl_vb_upload_quads(pipe);
if(!filter->quad.buffer)
if(!filter->quad.buffer.resource)
goto error_quad;
memset(&ve, 0, sizeof(ve));
@ -337,7 +337,7 @@ error_offsets:
pipe->delete_vertex_elements_state(pipe, filter->ves);
error_ves:
pipe_resource_reference(&filter->quad.buffer, NULL);
pipe_resource_reference(&filter->quad.buffer.resource, NULL);
error_quad:
pipe->delete_sampler_state(pipe, filter->sampler);
@ -361,7 +361,7 @@ vl_median_filter_cleanup(struct vl_median_filter *filter)
filter->pipe->delete_blend_state(filter->pipe, filter->blend);
filter->pipe->delete_rasterizer_state(filter->pipe, filter->rs_state);
filter->pipe->delete_vertex_elements_state(filter->pipe, filter->ves);
pipe_resource_reference(&filter->quad.buffer, NULL);
pipe_resource_reference(&filter->quad.buffer.resource, NULL);
filter->pipe->delete_vs_state(filter->pipe, filter->vs);
filter->pipe->delete_fs_state(filter->pipe, filter->fs);

View file

@ -509,8 +509,8 @@ vl_mpeg12_destroy(struct pipe_video_codec *decoder)
dec->context->delete_vertex_elements_state(dec->context, dec->ves_ycbcr);
dec->context->delete_vertex_elements_state(dec->context, dec->ves_mv);
pipe_resource_reference(&dec->quads.buffer, NULL);
pipe_resource_reference(&dec->pos.buffer, NULL);
pipe_resource_reference(&dec->quads.buffer.resource, NULL);
pipe_resource_reference(&dec->pos.buffer.resource, NULL);
pipe_sampler_view_reference(&dec->zscan_linear, NULL);
pipe_sampler_view_reference(&dec->zscan_normal, NULL);

View file

@ -49,23 +49,23 @@ vl_vb_upload_quads(struct pipe_context *pipe)
/* create buffer */
quad.stride = sizeof(struct vertex2f);
quad.buffer_offset = 0;
quad.buffer = pipe_buffer_create
quad.buffer.resource = pipe_buffer_create
(
pipe->screen,
PIPE_BIND_VERTEX_BUFFER,
PIPE_USAGE_DEFAULT,
sizeof(struct vertex2f) * 4
);
quad.user_buffer = NULL;
quad.is_user_buffer = false;
if(!quad.buffer)
if(!quad.buffer.resource)
return quad;
/* and fill it */
v = pipe_buffer_map
(
pipe,
quad.buffer,
quad.buffer.resource,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
&buf_transfer
);
@ -94,23 +94,23 @@ vl_vb_upload_pos(struct pipe_context *pipe, unsigned width, unsigned height)
/* create buffer */
pos.stride = sizeof(struct vertex2s);
pos.buffer_offset = 0;
pos.buffer = pipe_buffer_create
pos.buffer.resource = pipe_buffer_create
(
pipe->screen,
PIPE_BIND_VERTEX_BUFFER,
PIPE_USAGE_DEFAULT,
sizeof(struct vertex2s) * width * height
);
pos.user_buffer = NULL;
pos.is_user_buffer = false;
if(!pos.buffer)
if(!pos.buffer.resource)
return pos;
/* and fill it */
v = pipe_buffer_map
(
pipe,
pos.buffer,
pos.buffer.resource,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
&buf_transfer
);
@ -268,8 +268,8 @@ vl_vb_get_ycbcr(struct vl_vertex_buffer *buffer, int component)
buf.stride = sizeof(struct vl_ycbcr_block);
buf.buffer_offset = 0;
buf.buffer = buffer->ycbcr[component].resource;
buf.user_buffer = NULL;
buf.buffer.resource = buffer->ycbcr[component].resource;
buf.is_user_buffer = false;
return buf;
}
@ -283,8 +283,8 @@ vl_vb_get_mv(struct vl_vertex_buffer *buffer, int motionvector)
buf.stride = sizeof(struct vl_motionvector);
buf.buffer_offset = 0;
buf.buffer = buffer->mv[motionvector].resource;
buf.user_buffer = NULL;
buf.buffer.resource = buffer->mv[motionvector].resource;
buf.is_user_buffer = false;
return buf;
}

View file

@ -308,11 +308,10 @@ dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE
dd_dump_render_condition(dstate, f);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
if (dstate->vertex_buffers[i].buffer ||
dstate->vertex_buffers[i].user_buffer) {
if (dstate->vertex_buffers[i].buffer.resource) {
DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
if (dstate->vertex_buffers[i].buffer)
DUMP_M(resource, &dstate->vertex_buffers[i], buffer);
if (!dstate->vertex_buffers[i].is_user_buffer)
DUMP_M(resource, &dstate->vertex_buffers[i], buffer.resource);
}
if (dstate->velems) {
@ -765,7 +764,7 @@ dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
util_set_index_buffer(&dst->index_buffer, NULL);
for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
pipe_resource_reference(&dst->vertex_buffers[i].buffer, NULL);
pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
pipe_so_target_reference(&dst->so_targets[i], NULL);
@ -802,10 +801,8 @@ dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
util_set_index_buffer(&dst->index_buffer, &src->index_buffer);
for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
pipe_resource_reference(&dst->vertex_buffers[i].buffer,
src->vertex_buffers[i].buffer);
memcpy(&dst->vertex_buffers[i], &src->vertex_buffers[i],
sizeof(src->vertex_buffers[i]));
pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
&src->vertex_buffers[i]);
}
dst->num_so_targets = src->num_so_targets;

View file

@ -234,8 +234,8 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
/* Mark VBOs as being read */
for (i = 0; i < ctx->vertex_buffer.count; i++) {
assert(!ctx->vertex_buffer.vb[i].user_buffer);
resource_read(ctx, ctx->vertex_buffer.vb[i].buffer);
assert(!ctx->vertex_buffer.vb[i].is_user_buffer);
resource_read(ctx, ctx->vertex_buffer.vb[i].buffer.resource);
}
/* Mark index buffer as being read */

View file

@ -428,11 +428,11 @@ etna_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot,
struct compiled_set_vertex_buffer *cs = &so->cvb[idx];
struct pipe_vertex_buffer *vbi = &so->vb[idx];
assert(!vbi->user_buffer); /* XXX support user_buffer using
etna_usermem_map */
assert(!vbi->is_user_buffer); /* XXX support user_buffer using
etna_usermem_map */
if (vbi->buffer) { /* GPU buffer */
cs->FE_VERTEX_STREAM_BASE_ADDR.bo = etna_resource(vbi->buffer)->bo;
if (vbi->buffer.resource) { /* GPU buffer */
cs->FE_VERTEX_STREAM_BASE_ADDR.bo = etna_resource(vbi->buffer.resource)->bo;
cs->FE_VERTEX_STREAM_BASE_ADDR.offset = vbi->buffer_offset;
cs->FE_VERTEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
cs->FE_VERTEX_STREAM_CONTROL =

View file

@ -69,8 +69,8 @@ emit_vertexbufs(struct fd_context *ctx)
struct pipe_vertex_buffer *vb =
&vertexbuf->vb[elem->vertex_buffer_index];
bufs[i].offset = vb->buffer_offset;
bufs[i].size = fd_bo_size(fd_resource(vb->buffer)->bo);
bufs[i].prsc = vb->buffer;
bufs[i].size = fd_bo_size(fd_resource(vb->buffer.resource)->bo);
bufs[i].prsc = vb->buffer.resource;
}
// NOTE I believe the 0x78 (or 0x9c in solid_vp) relates to the

View file

@ -401,7 +401,7 @@ fd3_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd3_emit *emit)
struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
const struct pipe_vertex_buffer *vb =
&vtx->vertexbuf.vb[elem->vertex_buffer_index];
struct fd_resource *rsc = fd_resource(vb->buffer);
struct fd_resource *rsc = fd_resource(vb->buffer.resource);
enum pipe_format pfmt = elem->src_format;
enum a3xx_vtx_fmt fmt = fd3_pipe2vtx(pfmt);
bool switchnext = (i != last) ||

View file

@ -403,7 +403,7 @@ fd4_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd4_emit *emit)
struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
const struct pipe_vertex_buffer *vb =
&vtx->vertexbuf.vb[elem->vertex_buffer_index];
struct fd_resource *rsc = fd_resource(vb->buffer);
struct fd_resource *rsc = fd_resource(vb->buffer.resource);
enum pipe_format pfmt = elem->src_format;
enum a4xx_vtx_fmt fmt = fd4_pipe2vtx(pfmt);
bool switchnext = (i != last) ||

View file

@ -425,7 +425,7 @@ fd5_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd5_emit *emit)
struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
const struct pipe_vertex_buffer *vb =
&vtx->vertexbuf.vb[elem->vertex_buffer_index];
struct fd_resource *rsc = fd_resource(vb->buffer);
struct fd_resource *rsc = fd_resource(vb->buffer.resource);
enum pipe_format pfmt = elem->src_format;
enum a5xx_vtx_fmt fmt = fd5_pipe2vtx(pfmt);
bool isint = util_format_is_pure_integer(pfmt);

View file

@ -210,7 +210,7 @@ fd_context_setup_common_vbos(struct fd_context *ctx)
}});
ctx->solid_vbuf_state.vertexbuf.count = 1;
ctx->solid_vbuf_state.vertexbuf.vb[0].stride = 12;
ctx->solid_vbuf_state.vertexbuf.vb[0].buffer = ctx->solid_vbuf;
ctx->solid_vbuf_state.vertexbuf.vb[0].buffer.resource = ctx->solid_vbuf;
/* setup blit_vbuf_state: */
ctx->blit_vbuf_state.vtx = pctx->create_vertex_elements_state(
@ -225,9 +225,9 @@ fd_context_setup_common_vbos(struct fd_context *ctx)
}});
ctx->blit_vbuf_state.vertexbuf.count = 2;
ctx->blit_vbuf_state.vertexbuf.vb[0].stride = 8;
ctx->blit_vbuf_state.vertexbuf.vb[0].buffer = ctx->blit_texcoord_vbuf;
ctx->blit_vbuf_state.vertexbuf.vb[0].buffer.resource = ctx->blit_texcoord_vbuf;
ctx->blit_vbuf_state.vertexbuf.vb[1].stride = 12;
ctx->blit_vbuf_state.vertexbuf.vb[1].buffer = ctx->solid_vbuf;
ctx->blit_vbuf_state.vertexbuf.vb[1].buffer.resource = ctx->solid_vbuf;
}
void

View file

@ -164,8 +164,8 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
/* Mark VBOs as being read */
foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) {
assert(!ctx->vtx.vertexbuf.vb[i].user_buffer);
resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer);
assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
}
/* Mark index buffer as being read */

View file

@ -58,7 +58,7 @@ fd_invalidate_resource(struct fd_context *ctx, struct pipe_resource *prsc)
/* VBOs */
for (unsigned i = 0; i < ctx->vtx.vertexbuf.count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) {
if (ctx->vtx.vertexbuf.vb[i].buffer == prsc)
if (ctx->vtx.vertexbuf.vb[i].buffer.resource == prsc)
ctx->dirty |= FD_DIRTY_VTXBUF;
}

View file

@ -259,8 +259,8 @@ fd_set_vertex_buffers(struct pipe_context *pctx,
*/
if (ctx->screen->gpu_id < 300) {
for (i = 0; i < count; i++) {
bool new_enabled = vb && (vb[i].buffer || vb[i].user_buffer);
bool old_enabled = so->vb[i].buffer || so->vb[i].user_buffer;
bool new_enabled = vb && vb[i].buffer.resource;
bool old_enabled = so->vb[i].buffer.resource != NULL;
uint32_t new_stride = vb ? vb[i].stride : 0;
uint32_t old_stride = so->vb[i].stride;
if ((new_enabled != old_enabled) || (new_stride != old_stride)) {

View file

@ -73,9 +73,10 @@ i915_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
* Map vertex buffers
*/
for (i = 0; i < i915->nr_vertex_buffers; i++) {
const void *buf = i915->vertex_buffers[i].user_buffer;
const void *buf = i915->vertex_buffers[i].is_user_buffer ?
i915->vertex_buffers[i].buffer.user : NULL;
if (!buf)
buf = i915_buffer(i915->vertex_buffers[i].buffer)->data;
buf = i915_buffer(i915->vertex_buffers[i].buffer.resource)->data;
draw_set_mapped_vertex_buffer(draw, i, buf, ~0);
}

View file

@ -96,7 +96,7 @@ static void llvmpipe_destroy( struct pipe_context *pipe )
}
for (i = 0; i < llvmpipe->num_vertex_buffers; i++) {
pipe_resource_reference(&llvmpipe->vertex_buffer[i].buffer, NULL);
pipe_vertex_buffer_unreference(&llvmpipe->vertex_buffer[i]);
}
lp_delete_setup_variants(llvmpipe);

View file

@ -73,14 +73,15 @@ llvmpipe_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
* Map vertex buffers
*/
for (i = 0; i < lp->num_vertex_buffers; i++) {
const void *buf = lp->vertex_buffer[i].user_buffer;
const void *buf = lp->vertex_buffer[i].is_user_buffer ?
lp->vertex_buffer[i].buffer.user : NULL;
size_t size = ~0;
if (!buf) {
if (!lp->vertex_buffer[i].buffer) {
if (!lp->vertex_buffer[i].buffer.resource) {
continue;
}
buf = llvmpipe_resource_data(lp->vertex_buffer[i].buffer);
size = lp->vertex_buffer[i].buffer->width0;
buf = llvmpipe_resource_data(lp->vertex_buffer[i].buffer.resource);
size = lp->vertex_buffer[i].buffer.resource->width0;
}
draw_set_mapped_vertex_buffer(draw, i, buf, size);
}

View file

@ -115,7 +115,7 @@ nv30_invalidate_resource_storage(struct nouveau_context *nv,
if (res->bind & PIPE_BIND_VERTEX_BUFFER) {
for (i = 0; i < nv30->num_vtxbufs; ++i) {
if (nv30->vtxbuf[i].buffer == res) {
if (nv30->vtxbuf[i].buffer.resource == res) {
nv30->dirty |= NV30_NEW_ARRAYS;
nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
if (!--ref)

View file

@ -419,10 +419,11 @@ nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
}
for (i = 0; i < nv30->num_vtxbufs; i++) {
const void *map = nv30->vtxbuf[i].user_buffer;
const void *map = nv30->vtxbuf[i].is_user_buffer ?
nv30->vtxbuf[i].buffer.user : NULL;
if (!map) {
if (nv30->vtxbuf[i].buffer)
map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer,
if (nv30->vtxbuf[i].buffer.resource)
map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer.resource,
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_READ, &transfer[i]);
}

View file

@ -209,9 +209,9 @@ nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info)
for (i = 0; i < nv30->num_vtxbufs; ++i) {
uint8_t *data;
struct pipe_vertex_buffer *vb = &nv30->vtxbuf[i];
struct nv04_resource *res = nv04_resource(vb->buffer);
struct nv04_resource *res = nv04_resource(vb->buffer.resource);
if (!vb->buffer && !vb->user_buffer) {
if (!vb->buffer.resource) {
continue;
}
@ -281,8 +281,8 @@ nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info)
nouveau_resource_unmap(nv04_resource(nv30->idxbuf.buffer));
for (i = 0; i < nv30->num_vtxbufs; ++i) {
if (nv30->vtxbuf[i].buffer) {
nouveau_resource_unmap(nv04_resource(nv30->vtxbuf[i].buffer));
if (nv30->vtxbuf[i].buffer.resource) {
nouveau_resource_unmap(nv04_resource(nv30->vtxbuf[i].buffer.resource));
}
}

View file

@ -39,9 +39,9 @@ nv30_memory_barrier(struct pipe_context *pipe, unsigned flags)
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nv30->num_vtxbufs; ++i) {
if (!nv30->vtxbuf[i].buffer)
if (!nv30->vtxbuf[i].buffer.resource)
continue;
if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv30->base.vbo_dirty = true;
}

View file

@ -40,7 +40,7 @@ nv30_emit_vtxattr(struct nv30_context *nv30, struct pipe_vertex_buffer *vb,
{
const unsigned nc = util_format_get_nr_components(ve->src_format);
struct nouveau_pushbuf *push = nv30->base.pushbuf;
struct nv04_resource *res = nv04_resource(vb->buffer);
struct nv04_resource *res = nv04_resource(vb->buffer.resource);
const struct util_format_description *desc =
util_format_description(ve->src_format);
const void *data;
@ -102,12 +102,12 @@ nv30_prevalidate_vbufs(struct nv30_context *nv30)
for (i = 0; i < nv30->num_vtxbufs; i++) {
vb = &nv30->vtxbuf[i];
if (!vb->stride || !vb->buffer) /* NOTE: user_buffer not implemented */
if (!vb->stride || !vb->buffer.resource) /* NOTE: user_buffer not implemented */
continue;
buf = nv04_resource(vb->buffer);
buf = nv04_resource(vb->buffer.resource);
/* NOTE: user buffers with temporary storage count as mapped by GPU */
if (!nouveau_resource_mapped_by_gpu(vb->buffer)) {
if (!nouveau_resource_mapped_by_gpu(vb->buffer.resource)) {
if (nv30->vbo_push_hint) {
nv30->vbo_fifo = ~0;
continue;
@ -138,7 +138,7 @@ nv30_update_user_vbufs(struct nv30_context *nv30)
struct pipe_vertex_element *ve = &nv30->vertex->pipe[i];
const int b = ve->vertex_buffer_index;
struct pipe_vertex_buffer *vb = &nv30->vtxbuf[b];
struct nv04_resource *buf = nv04_resource(vb->buffer);
struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
if (!(nv30->vbo_user & (1 << b)))
continue;
@ -173,7 +173,7 @@ nv30_release_user_vbufs(struct nv30_context *nv30)
int i = ffs(vbo_user) - 1;
vbo_user &= ~(1 << i);
nouveau_buffer_release_gpu_storage(nv04_resource(nv30->vtxbuf[i].buffer));
nouveau_buffer_release_gpu_storage(nv04_resource(nv30->vtxbuf[i].buffer.resource));
}
nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXTMP);
@ -235,7 +235,7 @@ nv30_vbo_validate(struct nv30_context *nv30)
vb = &nv30->vtxbuf[ve->vertex_buffer_index];
user = (nv30->vbo_user & (1 << ve->vertex_buffer_index));
res = nv04_resource(vb->buffer);
res = nv04_resource(vb->buffer.resource);
if (nv30->vbo_fifo || unlikely(vb->stride == 0)) {
if (!nv30->vbo_fifo)
@ -583,9 +583,9 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
}
for (i = 0; i < nv30->num_vtxbufs && !nv30->base.vbo_dirty; ++i) {
if (!nv30->vtxbuf[i].buffer)
if (!nv30->vtxbuf[i].buffer.resource)
continue;
if (nv30->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv30->base.vbo_dirty = true;
}

View file

@ -62,9 +62,9 @@ nv50_memory_barrier(struct pipe_context *pipe, unsigned flags)
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nv50->num_vtxbufs; ++i) {
if (!nv50->vtxbuf[i].buffer)
if (!nv50->vtxbuf[i].buffer.resource && !nv50->vtxbuf[i].is_user_buffer)
continue;
if (nv50->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
if (nv50->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv50->base.vbo_dirty = true;
}
@ -144,7 +144,7 @@ nv50_context_unreference_resources(struct nv50_context *nv50)
assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (i = 0; i < nv50->num_vtxbufs; ++i)
pipe_resource_reference(&nv50->vtxbuf[i].buffer, NULL);
pipe_resource_reference(&nv50->vtxbuf[i].buffer.resource, NULL);
pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
@ -230,7 +230,7 @@ nv50_invalidate_resource_storage(struct nouveau_context *ctx,
assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (i = 0; i < nv50->num_vtxbufs; ++i) {
if (nv50->vtxbuf[i].buffer == res) {
if (nv50->vtxbuf[i].buffer.resource == res) {
nv50->dirty_3d |= NV50_NEW_3D_ARRAYS;
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_VERTEX);
if (!--ref)

View file

@ -264,11 +264,11 @@ nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info)
const struct pipe_vertex_buffer *vb = &nv50->vtxbuf[i];
const uint8_t *data;
if (unlikely(vb->buffer))
if (unlikely(!vb->is_user_buffer))
data = nouveau_resource_map_offset(&nv50->base,
nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD);
nv04_resource(vb->buffer.resource), vb->buffer_offset, NOUVEAU_BO_RD);
else
data = vb->user_buffer;
data = vb->buffer.user;
if (apply_bias && likely(!(nv50->vertex->instance_bufs & (1 << i))))
data += (ptrdiff_t)info->index_bias * vb->stride;

View file

@ -1060,7 +1060,7 @@ nv50_set_vertex_buffers(struct pipe_context *pipe,
for (i = 0; i < count; ++i) {
unsigned dst_index = start_slot + i;
if (!vb[i].buffer && vb[i].user_buffer) {
if (vb[i].is_user_buffer) {
nv50->vbo_user |= 1 << dst_index;
if (!vb[i].stride)
nv50->vbo_constant |= 1 << dst_index;
@ -1071,8 +1071,8 @@ nv50_set_vertex_buffers(struct pipe_context *pipe,
nv50->vbo_user &= ~(1 << dst_index);
nv50->vbo_constant &= ~(1 << dst_index);
if (vb[i].buffer &&
vb[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
if (vb[i].buffer.resource &&
vb[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv50->vtxbufs_coherent |= (1 << dst_index);
else
nv50->vtxbufs_coherent &= ~(1 << dst_index);

View file

@ -141,13 +141,13 @@ nv50_emit_vtxattr(struct nv50_context *nv50, struct pipe_vertex_buffer *vb,
struct pipe_vertex_element *ve, unsigned attr)
{
struct nouveau_pushbuf *push = nv50->base.pushbuf;
const void *data = (const uint8_t *)vb->user_buffer + ve->src_offset;
const void *data = (const uint8_t *)vb->buffer.user + ve->src_offset;
float v[4];
const unsigned nc = util_format_get_nr_components(ve->src_format);
const struct util_format_description *desc =
util_format_description(ve->src_format);
assert(vb->user_buffer);
assert(vb->is_user_buffer);
if (desc->channel[0].pure_integer) {
if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED) {
@ -200,7 +200,7 @@ nv50_user_vbuf_range(struct nv50_context *nv50, unsigned vbi,
if (unlikely(nv50->vertex->instance_bufs & (1 << vbi))) {
/* TODO: use min and max instance divisor to get a proper range */
*base = 0;
*size = nv50->vtxbuf[vbi].buffer->width0;
*size = nv50->vtxbuf[vbi].buffer.resource->width0;
} else {
/* NOTE: if there are user buffers, we *must* have index bounds */
assert(nv50->vb_elt_limit != ~0);
@ -227,7 +227,7 @@ nv50_upload_user_buffers(struct nv50_context *nv50,
nv50_user_vbuf_range(nv50, b, &base, &size);
limits[b] = base + size - 1;
addrs[b] = nouveau_scratch_data(&nv50->base, vb->user_buffer, base, size,
addrs[b] = nouveau_scratch_data(&nv50->base, vb->buffer.user, base, size,
&bo);
if (addrs[b])
BCTX_REFN_bo(nv50->bufctx_3d, 3D_VERTEX_TMP, NOUVEAU_BO_GART |
@ -266,7 +266,7 @@ nv50_update_user_vbufs(struct nv50_context *nv50)
struct nouveau_bo *bo;
const uint32_t bo_flags = NOUVEAU_BO_GART | NOUVEAU_BO_RD;
written |= 1 << b;
address[b] = nouveau_scratch_data(&nv50->base, vb->user_buffer,
address[b] = nouveau_scratch_data(&nv50->base, vb->buffer.user,
base, size, &bo);
if (address[b])
BCTX_REFN_bo(nv50->bufctx_3d, 3D_VERTEX_TMP, bo_flags, bo);
@ -317,8 +317,9 @@ nv50_vertex_arrays_validate(struct nv50_context *nv50)
/* if vertex buffer was written by GPU - flush VBO cache */
assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (i = 0; i < nv50->num_vtxbufs; ++i) {
struct nv04_resource *buf = nv04_resource(nv50->vtxbuf[i].buffer);
if (buf && buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
struct nv04_resource *buf = nv04_resource(nv50->vtxbuf[i].buffer.resource);
if (!nv50->vtxbuf[i].is_user_buffer &&
buf && buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
nv50->base.vbo_dirty = true;
}
@ -386,12 +387,12 @@ nv50_vertex_arrays_validate(struct nv50_context *nv50)
address = addrs[b] + ve->pipe.src_offset;
limit = addrs[b] + limits[b];
} else
if (!vb->buffer) {
if (!vb->buffer.resource) {
BEGIN_NV04(push, NV50_3D(VERTEX_ARRAY_FETCH(i)), 1);
PUSH_DATA (push, 0);
continue;
} else {
struct nv04_resource *buf = nv04_resource(vb->buffer);
struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
if (!(refd & (1 << b))) {
refd |= 1 << b;
BCTX_REFN(nv50->bufctx_3d, 3D_VERTEX, buf, RD);

View file

@ -62,9 +62,9 @@ nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags)
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nvc0->num_vtxbufs; ++i) {
if (!nvc0->vtxbuf[i].buffer)
if (!nvc0->vtxbuf[i].buffer.resource && !nvc0->vtxbuf[i].is_user_buffer)
continue;
if (nvc0->vtxbuf[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
if (nvc0->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nvc0->base.vbo_dirty = true;
}
@ -147,7 +147,7 @@ nvc0_context_unreference_resources(struct nvc0_context *nvc0)
util_unreference_framebuffer_state(&nvc0->framebuffer);
for (i = 0; i < nvc0->num_vtxbufs; ++i)
pipe_resource_reference(&nvc0->vtxbuf[i].buffer, NULL);
pipe_vertex_buffer_unreference(&nvc0->vtxbuf[i]);
pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
@ -260,7 +260,7 @@ nvc0_invalidate_resource_storage(struct nouveau_context *ctx,
if (res->target == PIPE_BUFFER) {
for (i = 0; i < nvc0->num_vtxbufs; ++i) {
if (nvc0->vtxbuf[i].buffer == res) {
if (nvc0->vtxbuf[i].buffer.resource == res) {
nvc0->dirty_3d |= NVC0_NEW_3D_ARRAYS;
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX);
if (!--ref)

View file

@ -941,7 +941,7 @@ nvc0_set_vertex_buffers(struct pipe_context *pipe,
for (i = 0; i < count; ++i) {
unsigned dst_index = start_slot + i;
if (vb[i].user_buffer) {
if (vb[i].is_user_buffer) {
nvc0->vbo_user |= 1 << dst_index;
if (!vb[i].stride && nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
nvc0->constant_vbos |= 1 << dst_index;
@ -952,8 +952,8 @@ nvc0_set_vertex_buffers(struct pipe_context *pipe,
nvc0->vbo_user &= ~(1 << dst_index);
nvc0->constant_vbos &= ~(1 << dst_index);
if (vb[i].buffer &&
vb[i].buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
if (vb[i].buffer.resource &&
vb[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nvc0->vtxbufs_coherent |= (1 << dst_index);
else
nvc0->vtxbufs_coherent &= ~(1 << dst_index);

View file

@ -176,8 +176,8 @@ nvc0_set_constant_vertex_attrib(struct nvc0_context *nvc0, const unsigned a)
uint32_t mode;
const struct util_format_description *desc;
void *dst;
const void *src = (const uint8_t *)vb->user_buffer + ve->src_offset;
assert(!vb->buffer);
const void *src = (const uint8_t *)vb->buffer.user + ve->src_offset;
assert(vb->is_user_buffer);
desc = util_format_description(ve->src_format);
@ -254,7 +254,7 @@ nvc0_update_user_vbufs(struct nvc0_context *nvc0)
struct nouveau_bo *bo;
const uint32_t bo_flags = NOUVEAU_BO_RD | NOUVEAU_BO_GART;
written |= 1 << b;
address[b] = nouveau_scratch_data(&nvc0->base, vb->user_buffer,
address[b] = nouveau_scratch_data(&nvc0->base, vb->buffer.user,
base, size, &bo);
if (bo)
BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, bo_flags, bo);
@ -289,7 +289,7 @@ nvc0_update_user_vbufs_shared(struct nvc0_context *nvc0)
nvc0_user_vbuf_range(nvc0, b, &base, &size);
address = nouveau_scratch_data(&nvc0->base, nvc0->vtxbuf[b].user_buffer,
address = nouveau_scratch_data(&nvc0->base, nvc0->vtxbuf[b].buffer.user,
base, size, &bo);
if (bo)
BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, bo_flags, bo);
@ -346,9 +346,9 @@ nvc0_validate_vertex_buffers(struct nvc0_context *nvc0)
/* address/value set in nvc0_update_user_vbufs */
continue;
}
res = nv04_resource(vb->buffer);
res = nv04_resource(vb->buffer.resource);
offset = ve->pipe.src_offset + vb->buffer_offset;
limit = vb->buffer->width0 - 1;
limit = vb->buffer.resource->width0 - 1;
if (unlikely(ve->pipe.instance_divisor)) {
BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(i)), 4);
@ -395,12 +395,12 @@ nvc0_validate_vertex_buffers_shared(struct nvc0_context *nvc0)
}
/* address/value set in nvc0_update_user_vbufs_shared */
continue;
} else if (!vb->buffer) {
} else if (!vb->buffer.resource) {
/* there can be holes in the vertex buffer lists */
IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(b)), 0);
continue;
}
buf = nv04_resource(vb->buffer);
buf = nv04_resource(vb->buffer.resource);
offset = vb->buffer_offset;
limit = buf->base.width0 - 1;

View file

@ -69,11 +69,11 @@ nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
const uint8_t *map;
const struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[i];
if (likely(!vb->buffer))
map = (const uint8_t *)vb->user_buffer;
if (likely(vb->is_user_buffer))
map = (const uint8_t *)vb->buffer.user;
else
map = nouveau_resource_map_offset(&nvc0->base,
nv04_resource(vb->buffer), vb->buffer_offset, NOUVEAU_BO_RD);
nv04_resource(vb->buffer.resource), vb->buffer_offset, NOUVEAU_BO_RD);
if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i)))
map += (intptr_t)index_bias * vb->stride;
@ -101,16 +101,16 @@ nvc0_push_map_edgeflag(struct push_context *ctx, struct nvc0_context *nvc0,
unsigned attr = nvc0->vertprog->vp.edgeflag;
struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe;
struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index];
struct nv04_resource *buf = nv04_resource(vb->buffer);
struct nv04_resource *buf = nv04_resource(vb->buffer.resource);
ctx->edgeflag.stride = vb->stride;
ctx->edgeflag.width = util_format_get_blocksize(ve->src_format);
if (buf) {
if (!vb->is_user_buffer) {
unsigned offset = vb->buffer_offset + ve->src_offset;
ctx->edgeflag.data = nouveau_resource_map_offset(&nvc0->base,
buf, offset, NOUVEAU_BO_RD);
} else {
ctx->edgeflag.data = (const uint8_t *)vb->user_buffer + ve->src_offset;
ctx->edgeflag.data = (const uint8_t *)vb->buffer.user + ve->src_offset;
}
if (index_bias)
@ -586,7 +586,7 @@ nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
if (info->indexed)
nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
for (i = 0; i < nvc0->num_vtxbufs; ++i)
nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer));
nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer.resource));
NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
}

View file

@ -63,7 +63,7 @@ static void r300_release_referenced_objects(struct r300_context *r300)
}
/* Manually-created vertex buffers. */
pipe_resource_reference(&r300->dummy_vb.buffer, NULL);
pipe_vertex_buffer_unreference(&r300->dummy_vb);
pb_reference(&r300->vbo, NULL);
r300->context.delete_depth_stencil_alpha_state(&r300->context,
@ -468,7 +468,7 @@ struct pipe_context* r300_create_context(struct pipe_screen* screen,
vb.height0 = 1;
vb.depth0 = 1;
r300->dummy_vb.buffer = screen->resource_create(screen, &vb);
r300->dummy_vb.buffer.resource = screen->resource_create(screen, &vb);
r300->context.set_vertex_buffers(&r300->context, 0, 1, &r300->dummy_vb);
}

View file

@ -966,7 +966,7 @@ void r300_emit_vertex_arrays(struct r300_context* r300, int offset,
}
for (i = 0; i < vertex_array_count; i++) {
buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer);
buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer.resource);
OUT_CS_RELOC(buf);
}
} else {
@ -1018,7 +1018,7 @@ void r300_emit_vertex_arrays(struct r300_context* r300, int offset,
}
for (i = 0; i < vertex_array_count; i++) {
buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer);
buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer.resource);
OUT_CS_RELOC(buf);
}
}
@ -1381,7 +1381,7 @@ validate:
struct pipe_resource *buf;
for (; vbuf != last; vbuf++) {
buf = vbuf->buffer;
buf = vbuf->buffer.resource;
if (!buf)
continue;

View file

@ -373,7 +373,7 @@ static void r300_draw_arrays_immediate(struct r300_context *r300,
/* Map the buffer. */
if (!map[vbi]) {
map[vbi] = (uint32_t*)r300->rws->buffer_map(
r300_resource(vbuf->buffer)->buf,
r300_resource(vbuf->buffer.resource)->buf,
r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * info->start;
}
@ -741,13 +741,13 @@ static unsigned r300_max_vertex_count(struct r300_context *r300)
unsigned size, max_count, value;
/* We're not interested in constant and per-instance attribs. */
if (!vb->buffer ||
if (!vb->buffer.resource ||
!vb->stride ||
velems[i].instance_divisor) {
continue;
}
size = vb->buffer->width0;
size = vb->buffer.resource->width0;
/* Subtract buffer_offset. */
value = vb->buffer_offset;

View file

@ -111,7 +111,7 @@ r300_buffer_transfer_map( struct pipe_context *context,
/* We changed the buffer, now we need to bind it where the old one was bound. */
for (i = 0; i < r300->nr_vertex_buffers; i++) {
if (r300->vertex_buffer[i].buffer == &rbuf->b.b) {
if (r300->vertex_buffer[i].buffer.resource == &rbuf->b.b) {
r300->vertex_arrays_dirty = TRUE;
break;
}

View file

@ -1773,12 +1773,12 @@ static void r300_set_vertex_buffers_swtcl(struct pipe_context* pipe,
return;
for (i = 0; i < count; i++) {
if (buffers[i].user_buffer) {
if (buffers[i].is_user_buffer) {
draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
buffers[i].user_buffer, ~0);
} else if (buffers[i].buffer) {
buffers[i].buffer.user, ~0);
} else if (buffers[i].buffer.resource) {
draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
r300_resource(buffers[i].buffer)->malloced_buffer, ~0);
r300_resource(buffers[i].buffer.resource)->malloced_buffer, ~0);
}
}
}

View file

@ -146,8 +146,8 @@ static void evergreen_cs_set_vertex_buffer(struct r600_context *rctx,
struct pipe_vertex_buffer *vb = &state->vb[vb_index];
vb->stride = 1;
vb->buffer_offset = offset;
vb->buffer = buffer;
vb->user_buffer = NULL;
vb->buffer.resource = buffer;
vb->is_user_buffer = false;
/* The vertex instructions in the compute shaders use the texture cache,
* so we need to invalidate it. */

View file

@ -1953,7 +1953,7 @@ static void evergreen_emit_vertex_buffers(struct r600_context *rctx,
unsigned buffer_index = u_bit_scan(&dirty_mask);
vb = &state->vb[buffer_index];
rbuffer = (struct r600_resource*)vb->buffer;
rbuffer = (struct r600_resource*)vb->buffer.resource;
assert(rbuffer);
va = rbuffer->gpu_address + vb->buffer_offset;

View file

@ -1658,7 +1658,7 @@ static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom
unsigned buffer_index = u_bit_scan(&dirty_mask);
vb = &rctx->vertex_buffer_state.vb[buffer_index];
rbuffer = (struct r600_resource*)vb->buffer;
rbuffer = (struct r600_resource*)vb->buffer.resource;
assert(rbuffer);
offset = vb->buffer_offset;

View file

@ -562,21 +562,21 @@ static void r600_set_vertex_buffers(struct pipe_context *ctx,
if (input) {
for (i = 0; i < count; i++) {
if (memcmp(&input[i], &vb[i], sizeof(struct pipe_vertex_buffer))) {
if (input[i].buffer) {
if (input[i].buffer.resource) {
vb[i].stride = input[i].stride;
vb[i].buffer_offset = input[i].buffer_offset;
pipe_resource_reference(&vb[i].buffer, input[i].buffer);
pipe_resource_reference(&vb[i].buffer.resource, input[i].buffer.resource);
new_buffer_mask |= 1 << i;
r600_context_add_resource_size(ctx, input[i].buffer);
r600_context_add_resource_size(ctx, input[i].buffer.resource);
} else {
pipe_resource_reference(&vb[i].buffer, NULL);
pipe_resource_reference(&vb[i].buffer.resource, NULL);
disable_mask |= 1 << i;
}
}
}
} else {
for (i = 0; i < count; i++) {
pipe_resource_reference(&vb[i].buffer, NULL);
pipe_resource_reference(&vb[i].buffer.resource, NULL);
}
disable_mask = ((1ull << count) - 1);
}
@ -2838,7 +2838,7 @@ static void r600_invalidate_buffer(struct pipe_context *ctx, struct pipe_resourc
mask = rctx->vertex_buffer_state.enabled_mask;
while (mask) {
i = u_bit_scan(&mask);
if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) {
if (rctx->vertex_buffer_state.vb[i].buffer.resource == &rbuffer->b.b) {
rctx->vertex_buffer_state.dirty_mask |= 1 << i;
r600_vertex_buffers_dirty(rctx);
}

View file

@ -1008,11 +1008,11 @@ static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
continue;
if (!sctx->vertex_buffer[vb].buffer)
if (!sctx->vertex_buffer[vb].buffer.resource)
continue;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)sctx->vertex_buffer[vb].buffer,
(struct r600_resource*)sctx->vertex_buffer[vb].buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
@ -1071,7 +1071,7 @@ bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
uint32_t *desc = &ptr[i*4];
vb = &sctx->vertex_buffer[vbo_index];
rbuffer = (struct r600_resource*)vb->buffer;
rbuffer = (struct r600_resource*)vb->buffer.resource;
if (!rbuffer) {
memset(desc, 0, 16);
continue;
@ -1087,18 +1087,18 @@ bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
if (sctx->b.chip_class != VI && vb->stride) {
/* Round up by rounding down and adding 1 */
desc[2] = (vb->buffer->width0 - offset -
desc[2] = (vb->buffer.resource->width0 - offset -
velems->format_size[i]) /
vb->stride + 1;
} else {
desc[2] = vb->buffer->width0 - offset;
desc[2] = vb->buffer.resource->width0 - offset;
}
desc[3] = velems->rsrc_word3[i];
if (first_vb_use_mask & (1 << i)) {
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)vb->buffer,
(struct r600_resource*)vb->buffer.resource,
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
}
@ -1658,10 +1658,10 @@ static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource
if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
continue;
if (!sctx->vertex_buffer[vb].buffer)
if (!sctx->vertex_buffer[vb].buffer.resource)
continue;
if (sctx->vertex_buffer[vb].buffer == buf) {
if (sctx->vertex_buffer[vb].buffer.resource == buf) {
sctx->vertex_buffers_dirty = true;
break;
}

View file

@ -3845,7 +3845,7 @@ static void si_set_vertex_buffers(struct pipe_context *ctx,
const struct pipe_vertex_buffer *src = buffers + i;
struct pipe_vertex_buffer *dsti = dst + i;
if (unlikely(src->user_buffer)) {
if (unlikely(src->is_user_buffer)) {
/* Zero-stride attribs only. */
assert(src->stride == 0);
@ -3856,14 +3856,14 @@ static void si_set_vertex_buffers(struct pipe_context *ctx,
* Use const_uploader to upload into VRAM directly.
*/
u_upload_data(sctx->b.b.const_uploader, 0, 32, 32,
src->user_buffer,
src->buffer.user,
&dsti->buffer_offset,
&dsti->buffer);
&dsti->buffer.resource);
dsti->stride = 0;
} else {
struct pipe_resource *buf = src->buffer;
struct pipe_resource *buf = src->buffer.resource;
pipe_resource_reference(&dsti->buffer, buf);
pipe_resource_reference(&dsti->buffer.resource, buf);
dsti->buffer_offset = src->buffer_offset;
dsti->stride = src->stride;
r600_context_add_resource_size(ctx, buf);
@ -3873,7 +3873,7 @@ static void si_set_vertex_buffers(struct pipe_context *ctx,
}
} else {
for (i = 0; i < count; i++) {
pipe_resource_reference(&dst[i].buffer, NULL);
pipe_resource_reference(&dst[i].buffer.resource, NULL);
}
}
sctx->vertex_buffers_dirty = true;

View file

@ -778,8 +778,11 @@ rbug_set_vertex_buffers(struct pipe_context *_pipe,
if (num_buffers && _buffers) {
memcpy(unwrapped_buffers, _buffers, num_buffers * sizeof(*_buffers));
for (i = 0; i < num_buffers; i++)
unwrapped_buffers[i].buffer = rbug_resource_unwrap(_buffers[i].buffer);
for (i = 0; i < num_buffers; i++) {
if (!_buffers[i].is_user_buffer)
unwrapped_buffers[i].buffer.resource =
rbug_resource_unwrap(_buffers[i].buffer.resource);
}
buffers = unwrapped_buffers;
}

View file

@ -114,7 +114,7 @@ softpipe_destroy( struct pipe_context *pipe )
}
for (i = 0; i < softpipe->num_vertex_buffers; i++) {
pipe_resource_reference(&softpipe->vertex_buffer[i].buffer, NULL);
pipe_vertex_buffer_unreference(&softpipe->vertex_buffer[i]);
}
tgsi_exec_machine_destroy(softpipe->fs_machine);

View file

@ -82,14 +82,15 @@ softpipe_draw_vbo(struct pipe_context *pipe,
/* Map vertex buffers */
for (i = 0; i < sp->num_vertex_buffers; i++) {
const void *buf = sp->vertex_buffer[i].user_buffer;
const void *buf = sp->vertex_buffer[i].is_user_buffer ?
sp->vertex_buffer[i].buffer.user : NULL;
size_t size = ~0;
if (!buf) {
if (!sp->vertex_buffer[i].buffer) {
if (!sp->vertex_buffer[i].buffer.resource) {
continue;
}
buf = softpipe_resource_data(sp->vertex_buffer[i].buffer);
size = sp->vertex_buffer[i].buffer->width0;
buf = softpipe_resource_data(sp->vertex_buffer[i].buffer.resource);
size = sp->vertex_buffer[i].buffer.resource->width0;
}
draw_set_mapped_vertex_buffer(draw, i, buf, size);
}

View file

@ -74,7 +74,7 @@ svga_hwtnl_destroy(struct svga_hwtnl *hwtnl)
}
for (i = 0; i < hwtnl->cmd.vbuf_count; i++)
pipe_resource_reference(&hwtnl->cmd.vbufs[i].buffer, NULL);
pipe_vertex_buffer_unreference(&hwtnl->cmd.vbufs[i]);
for (i = 0; i < hwtnl->cmd.prim_count; i++)
pipe_resource_reference(&hwtnl->cmd.prim_ib[i], NULL);
@ -139,16 +139,12 @@ svga_hwtnl_vertex_buffers(struct svga_hwtnl *hwtnl,
unsigned i;
for (i = 0; i < count; i++) {
pipe_resource_reference(&dst[i].buffer, src[i].buffer);
dst[i].user_buffer = src[i].user_buffer;
dst[i].stride = src[i].stride;
dst[i].buffer_offset = src[i].buffer_offset;
pipe_vertex_buffer_reference(&dst[i], &src[i]);
}
/* release old buffer references */
for ( ; i < hwtnl->cmd.vbuf_count; i++) {
pipe_resource_reference(&dst[i].buffer, NULL);
dst[i].user_buffer = NULL; /* just to be safe */
pipe_vertex_buffer_unreference(&dst[i]);
/* don't bother zeroing stride/offset fields */
}
@ -175,7 +171,7 @@ svga_hwtnl_is_buffer_referred(struct svga_hwtnl *hwtnl,
}
for (i = 0; i < hwtnl->cmd.vbuf_count; ++i) {
if (hwtnl->cmd.vbufs[i].buffer == buffer) {
if (hwtnl->cmd.vbufs[i].buffer.resource == buffer) {
return TRUE;
}
}
@ -205,7 +201,7 @@ draw_vgpu9(struct svga_hwtnl *hwtnl)
for (i = 0; i < hwtnl->cmd.vdecl_count; i++) {
unsigned j = hwtnl->cmd.vdecl_buffer_index[i];
handle = svga_buffer_handle(svga, hwtnl->cmd.vbufs[j].buffer);
handle = svga_buffer_handle(svga, hwtnl->cmd.vbufs[j].buffer.resource);
if (!handle)
return PIPE_ERROR_OUT_OF_MEMORY;
@ -526,7 +522,7 @@ draw_vgpu10(struct svga_hwtnl *hwtnl,
/* Get handle for each referenced vertex buffer */
for (i = 0; i < vbuf_count; i++) {
struct svga_buffer *sbuf = svga_buffer(hwtnl->cmd.vbufs[i].buffer);
struct svga_buffer *sbuf = svga_buffer(hwtnl->cmd.vbufs[i].buffer.resource);
if (sbuf) {
assert(sbuf->key.flags & SVGA3D_SURFACE_BIND_VERTEX_BUFFER);
@ -800,7 +796,7 @@ check_draw_params(struct svga_hwtnl *hwtnl,
for (i = 0; i < hwtnl->cmd.vdecl_count; i++) {
unsigned j = hwtnl->cmd.vdecl_buffer_index[i];
const struct pipe_vertex_buffer *vb = &hwtnl->cmd.vbufs[j];
unsigned size = vb->buffer ? vb->buffer->width0 : 0;
unsigned size = vb->buffer.resource ? vb->buffer.resource->width0 : 0;
unsigned offset = hwtnl->cmd.vdecl[i].array.offset;
unsigned stride = hwtnl->cmd.vdecl[i].array.stride;
int index_bias = (int) range->indexBias + hwtnl->index_bias;

View file

@ -328,7 +328,7 @@ void svga_cleanup_vertex_state( struct svga_context *svga )
unsigned i;
for (i = 0 ; i < svga->curr.num_vertex_buffers; i++)
pipe_resource_reference(&svga->curr.vb[i].buffer, NULL);
pipe_vertex_buffer_unreference(&svga->curr.vb[i]);
pipe_resource_reference(&svga->state.hw_draw.ib, NULL);

View file

@ -73,10 +73,10 @@ emit_hw_vs_vdecl(struct svga_context *svga, unsigned dirty)
unsigned int offset = vb->buffer_offset + ve[i].src_offset;
unsigned tmp_neg_bias = 0;
if (!vb->buffer)
if (!vb->buffer.resource)
continue;
buffer = svga_buffer(vb->buffer);
buffer = svga_buffer(vb->buffer.resource);
if (buffer->uploaded.start > offset) {
tmp_neg_bias = buffer->uploaded.start - offset;
if (vb->stride)
@ -91,10 +91,10 @@ emit_hw_vs_vdecl(struct svga_context *svga, unsigned dirty)
unsigned usage, index;
struct svga_buffer *buffer;
if (!vb->buffer)
if (!vb->buffer.resource)
continue;
buffer = svga_buffer(vb->buffer);
buffer = svga_buffer(vb->buffer.resource);
svga_generate_vdecl_semantics( i, &usage, &index );
/* SVGA_NEW_VELEMENT

View file

@ -245,10 +245,10 @@ svga_vbuf_submit_state( struct svga_vbuf_render *svga_render )
/* Specify the vertex buffer (there's only ever one) */
{
struct pipe_vertex_buffer vb;
vb.buffer = svga_render->vbuf;
vb.is_user_buffer = false;
vb.buffer.resource = svga_render->vbuf;
vb.buffer_offset = svga_render->vdecl_offset;
vb.stride = vdecl[0].array.stride;
vb.user_buffer = NULL;
svga_hwtnl_vertex_buffers(svga->hwtnl, 1, &vb);
}

View file

@ -70,9 +70,9 @@ svga_swtnl_draw_vbo(struct svga_context *svga,
* Map vertex buffers
*/
for (i = 0; i < svga->curr.num_vertex_buffers; i++) {
if (svga->curr.vb[i].buffer) {
if (svga->curr.vb[i].buffer.resource) {
map = pipe_buffer_map(&svga->pipe,
svga->curr.vb[i].buffer,
svga->curr.vb[i].buffer.resource,
PIPE_TRANSFER_READ,
&vb_transfer[i]);
@ -120,7 +120,7 @@ svga_swtnl_draw_vbo(struct svga_context *svga,
* unmap vertex/index buffers
*/
for (i = 0; i < svga->curr.num_vertex_buffers; i++) {
if (svga->curr.vb[i].buffer) {
if (svga->curr.vb[i].buffer.resource) {
pipe_buffer_unmap(&svga->pipe, vb_transfer[i]);
draw_set_mapped_vertex_buffer(draw, i, NULL, 0);
}

View file

@ -744,8 +744,8 @@ swr_update_resource_status(struct pipe_context *pipe,
/* VBO vertex buffers */
for (uint32_t i = 0; i < ctx->num_vertex_buffers; i++) {
struct pipe_vertex_buffer *vb = &ctx->vertex_buffer[i];
if (!vb->user_buffer)
swr_resource_read(vb->buffer);
if (!vb->is_user_buffer)
swr_resource_read(vb->buffer.resource);
}
/* VBO index buffer */
@ -1236,7 +1236,7 @@ swr_update_derived(struct pipe_context *pipe,
for (UINT i = 0; i < ctx->num_vertex_buffers; i++) {
struct pipe_vertex_buffer *vb = &ctx->vertex_buffer[i];
if (!vb->user_buffer)
if (!vb->is_user_buffer)
continue;
uint32_t elems, base, size;
@ -1258,16 +1258,16 @@ swr_update_derived(struct pipe_context *pipe,
struct pipe_vertex_buffer *vb = &ctx->vertex_buffer[i];
pitch = vb->stride;
if (!vb->user_buffer) {
if (!vb->is_user_buffer) {
/* VBO
* size is based on buffer->width0 rather than info.max_index
* to prevent having to validate VBO on each draw */
size = vb->buffer->width0;
size = vb->buffer.resource->width0;
elems = size / pitch;
partial_inbounds = size % pitch;
min_vertex_index = 0;
p_data = swr_resource_data(vb->buffer) + vb->buffer_offset;
p_data = swr_resource_data(vb->buffer.resource) + vb->buffer_offset;
} else {
/* Client buffer
* client memory is one-time use, re-trigger SWR_NEW_VERTEX to
@ -1281,7 +1281,7 @@ swr_update_derived(struct pipe_context *pipe,
/* Copy only needed vertices to scratch space */
size = AlignUp(size, 4);
const void *ptr = (const uint8_t *) vb->user_buffer + base;
const void *ptr = (const uint8_t *) vb->buffer.user + base;
memcpy(scratch, ptr, size);
ptr = scratch;
scratch += size;

View file

@ -653,9 +653,9 @@ void trace_dump_vertex_buffer(const struct pipe_vertex_buffer *state)
trace_dump_struct_begin("pipe_vertex_buffer");
trace_dump_member(uint, state, stride);
trace_dump_member(bool, state, is_user_buffer);
trace_dump_member(uint, state, buffer_offset);
trace_dump_member(ptr, state, buffer);
trace_dump_member(ptr, state, user_buffer);
trace_dump_member(ptr, state, buffer.resource);
trace_dump_struct_end();
}

View file

@ -186,7 +186,7 @@ vc4_emit_gl_shader_state(struct vc4_context *vc4,
struct pipe_vertex_element *elem = &vtx->pipe[i];
struct pipe_vertex_buffer *vb =
&vertexbuf->vb[elem->vertex_buffer_index];
struct vc4_resource *rsc = vc4_resource(vb->buffer);
struct vc4_resource *rsc = vc4_resource(vb->buffer.resource);
/* not vc4->dirty tracked: vc4->last_index_bias */
uint32_t offset = (vb->buffer_offset +
elem->src_offset +

View file

@ -124,7 +124,7 @@ static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
unsigned i;
for (i = 0; i < vctx->num_vertex_buffers; i++) {
res = virgl_resource(vctx->vertex_buffer[i].buffer);
res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
if (res)
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
}

View file

@ -389,7 +389,7 @@ int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
int i;
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
for (i = 0; i < num_buffers; i++) {
struct virgl_resource *res = virgl_resource(buffers[i].buffer);
struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
virgl_encoder_write_res(ctx, res);

View file

@ -534,7 +534,6 @@ struct pipe_transfer
};
/**
* A vertex buffer. Typically, all the vertex data/attributes for
* drawing something will be in one buffer. But it's also possible, for
@ -542,10 +541,14 @@ struct pipe_transfer
*/
struct pipe_vertex_buffer
{
unsigned stride; /**< stride to same attrib in next vertex, in bytes */
uint16_t stride; /**< stride to same attrib in next vertex, in bytes */
bool is_user_buffer;
unsigned buffer_offset; /**< offset to start of data in buffer, in bytes */
struct pipe_resource *buffer; /**< the actual buffer */
const void *user_buffer; /**< pointer to a user buffer if buffer == NULL */
union {
struct pipe_resource *resource; /**< the actual buffer */
const void *user; /**< pointer to a user buffer */
} buffer;
};

View file

@ -2815,26 +2815,27 @@ NineDevice9_DrawPrimitiveUP( struct NineDevice9 *This,
vtxbuf.stride = VertexStreamZeroStride;
vtxbuf.buffer_offset = 0;
vtxbuf.buffer = NULL;
vtxbuf.user_buffer = pVertexStreamZeroData;
vtxbuf.is_user_buffer = true;
vtxbuf.buffer.user = pVertexStreamZeroData;
if (!This->driver_caps.user_vbufs) {
vtxbuf.is_user_buffer = false;
vtxbuf.buffer.resource = NULL;
u_upload_data(This->vertex_uploader,
0,
(prim_count_to_vertex_count(PrimitiveType, PrimitiveCount)) * VertexStreamZeroStride, /* XXX */
4,
vtxbuf.user_buffer,
pVertexStreamZeroData,
&vtxbuf.buffer_offset,
&vtxbuf.buffer);
&vtxbuf.buffer.resource);
u_upload_unmap(This->vertex_uploader);
vtxbuf.user_buffer = NULL;
}
NineBeforeDraw(This);
nine_context_draw_primitive_from_vtxbuf(This, PrimitiveType, PrimitiveCount, &vtxbuf);
NineAfterDraw(This);
pipe_resource_reference(&vtxbuf.buffer, NULL);
pipe_vertex_buffer_unreference(&vtxbuf);
NineDevice9_PauseRecording(This);
NineDevice9_SetStreamSource(This, 0, NULL, 0, 0);
@ -2872,8 +2873,8 @@ NineDevice9_DrawIndexedPrimitiveUP( struct NineDevice9 *This,
vbuf.stride = VertexStreamZeroStride;
vbuf.buffer_offset = 0;
vbuf.buffer = NULL;
vbuf.user_buffer = pVertexStreamZeroData;
vbuf.is_user_buffer = true;
vbuf.buffer.user = pVertexStreamZeroData;
ibuf.index_size = (IndexDataFormat == D3DFMT_INDEX16) ? 2 : 4;
ibuf.offset = 0;
@ -2882,17 +2883,18 @@ NineDevice9_DrawIndexedPrimitiveUP( struct NineDevice9 *This,
if (!This->driver_caps.user_vbufs) {
const unsigned base = MinVertexIndex * VertexStreamZeroStride;
vbuf.is_user_buffer = false;
vbuf.buffer.resource = NULL;
u_upload_data(This->vertex_uploader,
base,
NumVertices * VertexStreamZeroStride, /* XXX */
4,
(const uint8_t *)vbuf.user_buffer + base,
(const uint8_t *)pVertexStreamZeroData + base,
&vbuf.buffer_offset,
&vbuf.buffer);
&vbuf.buffer.resource);
u_upload_unmap(This->vertex_uploader);
/* Won't be used: */
vbuf.buffer_offset -= base;
vbuf.user_buffer = NULL;
}
if (This->csmt_active) {
u_upload_data(This->pipe_secondary->stream_uploader,
@ -2915,7 +2917,7 @@ NineDevice9_DrawIndexedPrimitiveUP( struct NineDevice9 *This,
&ibuf);
NineAfterDraw(This);
pipe_resource_reference(&vbuf.buffer, NULL);
pipe_vertex_buffer_unreference(&vbuf);
pipe_resource_reference(&ibuf.buffer, NULL);
NineDevice9_PauseRecording(This);

View file

@ -402,7 +402,18 @@ name##_priv( struct NineDevice9 *device ARGS_FOR_DECLARATION( __VA_ARGS__ ) )
,\
y
#define ARG_BIND_BUF(x, y) \
#define ARG_BIND_VBUF(x, y) \
x _##y ,\
memcpy(&args->_##y , y, sizeof(x)); \
args->_##y.buffer.resource = NULL; \
pipe_resource_reference(&args->_##y.buffer.resource, y->buffer.resource); ,\
x *y ,\
&args->_##y ,\
pipe_resource_reference(&args->_##y.buffer.resource, NULL); ,\
,\
y
#define ARG_BIND_IBUF(x, y) \
x _##y ,\
memcpy(&args->_##y , y, sizeof(x)); \
args->_##y.buffer = NULL; \

View file

@ -899,9 +899,9 @@ update_vertex_buffers(struct NineDevice9 *device)
if (context->dummy_vbo_bound_at >= 0) {
if (!context->vbo_bound_done) {
dummy_vtxbuf.buffer = device->dummy_vbo;
dummy_vtxbuf.buffer.resource = device->dummy_vbo;
dummy_vtxbuf.stride = 0;
dummy_vtxbuf.user_buffer = NULL;
dummy_vtxbuf.is_user_buffer = false;
dummy_vtxbuf.buffer_offset = 0;
pipe->set_vertex_buffers(pipe, context->dummy_vbo_bound_at,
1, &dummy_vtxbuf);
@ -912,7 +912,7 @@ update_vertex_buffers(struct NineDevice9 *device)
for (i = 0; mask; mask >>= 1, ++i) {
if (mask & 1) {
if (context->vtxbuf[i].buffer)
if (context->vtxbuf[i].buffer.resource)
pipe->set_vertex_buffers(pipe, i, 1, &context->vtxbuf[i]);
else
pipe->set_vertex_buffers(pipe, i, 1, NULL);
@ -1526,7 +1526,7 @@ CSMT_ITEM_NO_WAIT(nine_context_set_stream_source_apply,
context->vtxbuf[i].stride = Stride;
context->vtxbuf[i].buffer_offset = OffsetInBytes;
pipe_resource_reference(&context->vtxbuf[i].buffer, res);
pipe_resource_reference(&context->vtxbuf[i].buffer.resource, res);
context->changed.vtxbuf |= 1 << StreamNumber;
}
@ -2609,7 +2609,7 @@ CSMT_ITEM_NO_WAIT(nine_context_draw_indexed_primitive,
CSMT_ITEM_NO_WAIT(nine_context_draw_primitive_from_vtxbuf,
ARG_VAL(D3DPRIMITIVETYPE, PrimitiveType),
ARG_VAL(UINT, PrimitiveCount),
ARG_BIND_BUF(struct pipe_vertex_buffer, vtxbuf))
ARG_BIND_VBUF(struct pipe_vertex_buffer, vtxbuf))
{
struct nine_context *context = &device->context;
struct pipe_draw_info info;
@ -2633,8 +2633,8 @@ CSMT_ITEM_NO_WAIT(nine_context_draw_indexed_primitive_from_vtxbuf_idxbuf,
ARG_VAL(UINT, MinVertexIndex),
ARG_VAL(UINT, NumVertices),
ARG_VAL(UINT, PrimitiveCount),
ARG_BIND_BUF(struct pipe_vertex_buffer, vbuf),
ARG_BIND_BUF(struct pipe_index_buffer, ibuf))
ARG_BIND_VBUF(struct pipe_vertex_buffer, vbuf),
ARG_BIND_IBUF(struct pipe_index_buffer, ibuf))
{
struct nine_context *context = &device->context;
struct pipe_draw_info info;
@ -3145,7 +3145,7 @@ nine_context_clear(struct NineDevice9 *device)
nine_bind(&context->ps, NULL);
nine_bind(&context->vdecl, NULL);
for (i = 0; i < PIPE_MAX_ATTRIBS; ++i)
pipe_resource_reference(&context->vtxbuf[i].buffer, NULL);
pipe_vertex_buffer_unreference(&context->vtxbuf[i]);
pipe_resource_reference(&context->idxbuf.buffer, NULL);
for (i = 0; i < NINE_MAX_SAMPLERS; ++i) {
@ -3283,33 +3283,36 @@ update_vertex_buffers_sw(struct NineDevice9 *device, int start_vertice, int num_
unsigned offset;
struct pipe_resource *buf;
struct pipe_box box;
void *userbuf;
vtxbuf = state->vtxbuf[i];
vtxbuf.buffer = NineVertexBuffer9_GetResource(state->stream[i], &offset);
buf = NineVertexBuffer9_GetResource(state->stream[i], &offset);
DBG("Locking %p (offset %d, length %d)\n", vtxbuf.buffer,
DBG("Locking %p (offset %d, length %d)\n", buf,
vtxbuf.buffer_offset, num_vertices * vtxbuf.stride);
u_box_1d(vtxbuf.buffer_offset + offset + start_vertice * vtxbuf.stride,
num_vertices * vtxbuf.stride, &box);
buf = vtxbuf.buffer;
vtxbuf.user_buffer = pipe->transfer_map(pipe, buf, 0, PIPE_TRANSFER_READ, &box,
&(sw_internal->transfers_so[i]));
vtxbuf.buffer = NULL;
userbuf = pipe->transfer_map(pipe, buf, 0, PIPE_TRANSFER_READ, &box,
&(sw_internal->transfers_so[i]));
vtxbuf.is_user_buffer = true;
vtxbuf.buffer.user = userbuf;
if (!device->driver_caps.user_sw_vbufs) {
vtxbuf.buffer.resource = NULL;
vtxbuf.is_user_buffer = false;
u_upload_data(device->pipe_sw->stream_uploader,
0,
box.width,
16,
vtxbuf.user_buffer,
userbuf,
&(vtxbuf.buffer_offset),
&(vtxbuf.buffer));
&(vtxbuf.buffer.resource));
u_upload_unmap(device->pipe_sw->stream_uploader);
vtxbuf.user_buffer = NULL;
}
pipe_sw->set_vertex_buffers(pipe_sw, i, 1, &vtxbuf);
if (vtxbuf.buffer)
pipe_resource_reference(&vtxbuf.buffer, NULL);
pipe_vertex_buffer_unreference(&vtxbuf);
} else
pipe_sw->set_vertex_buffers(pipe_sw, i, 1, NULL);
}

View file

@ -507,8 +507,8 @@ setup_interleaved_attribs(struct st_context *st,
*/
if (vpv->num_inputs == 0) {
/* just defensive coding here */
vbuffer->buffer = NULL;
vbuffer->user_buffer = NULL;
vbuffer->buffer.resource = NULL;
vbuffer->is_user_buffer = false;
vbuffer->buffer_offset = 0;
vbuffer->stride = 0;
}
@ -520,15 +520,15 @@ setup_interleaved_attribs(struct st_context *st,
return FALSE; /* out-of-memory error probably */
}
vbuffer->buffer = stobj->buffer;
vbuffer->user_buffer = NULL;
vbuffer->buffer.resource = stobj->buffer;
vbuffer->is_user_buffer = false;
vbuffer->buffer_offset = pointer_to_offset(low_addr);
vbuffer->stride = stride;
}
else {
/* all interleaved arrays in user memory */
vbuffer->buffer = NULL;
vbuffer->user_buffer = low_addr;
vbuffer->buffer.user = low_addr;
vbuffer->is_user_buffer = !!low_addr; /* if NULL, then unbind */
vbuffer->buffer_offset = 0;
vbuffer->stride = stride;
}
@ -584,8 +584,8 @@ setup_non_interleaved_attribs(struct st_context *st,
return FALSE; /* out-of-memory error probably */
}
vbuffer[bufidx].buffer = stobj->buffer;
vbuffer[bufidx].user_buffer = NULL;
vbuffer[bufidx].buffer.resource = stobj->buffer;
vbuffer[bufidx].is_user_buffer = false;
vbuffer[bufidx].buffer_offset = pointer_to_offset(array->Ptr);
}
else {
@ -603,8 +603,8 @@ setup_non_interleaved_attribs(struct st_context *st,
assert(ptr);
vbuffer[bufidx].buffer = NULL;
vbuffer[bufidx].user_buffer = ptr;
vbuffer[bufidx].buffer.user = ptr;
vbuffer[bufidx].is_user_buffer = !!ptr; /* if NULL, then unbind */
vbuffer[bufidx].buffer_offset = 0;
}

View file

@ -694,7 +694,7 @@ st_DrawAtlasBitmaps(struct gl_context *ctx,
vb.stride = sizeof(struct st_util_vertex);
u_upload_alloc(pipe->stream_uploader, 0, num_vert_bytes, 4,
&vb.buffer_offset, &vb.buffer, (void **) &verts);
&vb.buffer_offset, &vb.buffer.resource, (void **) &verts);
if (unlikely(!verts)) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glCallLists(bitmap text)");
@ -781,7 +781,7 @@ st_DrawAtlasBitmaps(struct gl_context *ctx,
out:
restore_render_state(ctx);
pipe_resource_reference(&vb.buffer, NULL);
pipe_resource_reference(&vb.buffer.resource, NULL);
pipe_sampler_view_reference(&sv, NULL);

View file

@ -385,8 +385,8 @@ st_draw_quad(struct st_context *st,
u_upload_alloc(st->pipe->stream_uploader, 0,
4 * sizeof(struct st_util_vertex), 4,
&vb.buffer_offset, &vb.buffer, (void **) &verts);
if (!vb.buffer) {
&vb.buffer_offset, &vb.buffer.resource, (void **) &verts);
if (!vb.buffer.resource) {
return false;
}
@ -453,7 +453,7 @@ st_draw_quad(struct st_context *st,
cso_draw_arrays(st->cso_context, PIPE_PRIM_TRIANGLE_FAN, 0, 4);
}
pipe_resource_reference(&vb.buffer, NULL);
pipe_resource_reference(&vb.buffer.resource, NULL);
return true;
}

View file

@ -194,27 +194,27 @@ st_feedback_draw_vbo(struct gl_context *ctx,
struct st_buffer_object *stobj = st_buffer_object(bufobj);
assert(stobj->buffer);
vbuffers[attr].buffer = NULL;
vbuffers[attr].user_buffer = NULL;
pipe_resource_reference(&vbuffers[attr].buffer, stobj->buffer);
vbuffers[attr].buffer.resource = NULL;
vbuffers[attr].is_user_buffer = false;
pipe_resource_reference(&vbuffers[attr].buffer.resource, stobj->buffer);
vbuffers[attr].buffer_offset = pointer_to_offset(low_addr);
velements[attr].src_offset = arrays[mesaAttr]->Ptr - low_addr;
/* map the attrib buffer */
map = pipe_buffer_map(pipe, vbuffers[attr].buffer,
map = pipe_buffer_map(pipe, vbuffers[attr].buffer.resource,
PIPE_TRANSFER_READ,
&vb_transfer[attr]);
draw_set_mapped_vertex_buffer(draw, attr, map,
vbuffers[attr].buffer->width0);
vbuffers[attr].buffer.resource->width0);
}
else {
vbuffers[attr].buffer = NULL;
vbuffers[attr].user_buffer = arrays[mesaAttr]->Ptr;
vbuffers[attr].buffer.user = arrays[mesaAttr]->Ptr;
vbuffers[attr].is_user_buffer = true;
vbuffers[attr].buffer_offset = 0;
velements[attr].src_offset = 0;
draw_set_mapped_vertex_buffer(draw, attr, vbuffers[attr].user_buffer,
~0);
draw_set_mapped_vertex_buffer(draw, attr,
vbuffers[attr].buffer.user, ~0);
}
/* common-case setup */
@ -292,7 +292,7 @@ st_feedback_draw_vbo(struct gl_context *ctx,
if (vb_transfer[attr])
pipe_buffer_unmap(pipe, vb_transfer[attr]);
draw_set_mapped_vertex_buffer(draw, attr, NULL, 0);
pipe_resource_reference(&vbuffers[attr].buffer, NULL);
pipe_vertex_buffer_unreference(&vbuffers[attr]);
}
draw_set_vertex_buffers(draw, 0, vp->num_inputs, NULL);
}

View file

@ -215,7 +215,7 @@ st_pbo_draw(struct st_context *st, const struct st_pbo_addresses *addr,
/* Upload vertices */
{
struct pipe_vertex_buffer vbo;
struct pipe_vertex_buffer vbo = {0};
struct pipe_vertex_element velem;
float x0 = (float) addr->xoffset / surface_width * 2.0f - 1.0f;
@ -225,12 +225,10 @@ st_pbo_draw(struct st_context *st, const struct st_pbo_addresses *addr,
float *verts = NULL;
vbo.user_buffer = NULL;
vbo.buffer = NULL;
vbo.stride = 2 * sizeof(float);
u_upload_alloc(st->pipe->stream_uploader, 0, 8 * sizeof(float), 4,
&vbo.buffer_offset, &vbo.buffer, (void **) &verts);
&vbo.buffer_offset, &vbo.buffer.resource, (void **) &verts);
if (!verts)
return false;
@ -254,7 +252,7 @@ st_pbo_draw(struct st_context *st, const struct st_pbo_addresses *addr,
cso_set_vertex_buffers(cso, velem.vertex_buffer_index, 1, &vbo);
pipe_resource_reference(&vbo.buffer, NULL);
pipe_resource_reference(&vbo.buffer.resource, NULL);
}
/* Upload constants */