gallium: add pipe_context::resource_release to eliminate buffer refcounting

refcounting uses atomics, which are a significant source of CPU overhead
in many applications. by adding a method to inform the driver that
the frontend has released ownership of a buffer, all other refcounting
for the buffer can be eliminated

see MR for more details

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36296>
This commit is contained in:
Mike Blumenkrantz 2025-07-18 07:34:20 -04:00 committed by Marge Bot
parent 7c1c2f8fce
commit b3133e250e
148 changed files with 730 additions and 770 deletions

View file

@ -413,7 +413,7 @@ cso_unbind_context(struct cso_context *cso)
ctx->base.pipe->set_shader_images(ctx->base.pipe, sh, 0, 0, maximg, NULL); ctx->base.pipe->set_shader_images(ctx->base.pipe, sh, 0, 0, maximg, NULL);
} }
for (int i = 0; i < maxcb; i++) { for (int i = 0; i < maxcb; i++) {
ctx->base.pipe->set_constant_buffer(ctx->base.pipe, sh, i, false, NULL); ctx->base.pipe->set_constant_buffer(ctx->base.pipe, sh, i, NULL);
} }
} }
} }
@ -422,9 +422,9 @@ cso_unbind_context(struct cso_context *cso)
struct pipe_stencil_ref sr = {0}; struct pipe_stencil_ref sr = {0};
ctx->base.pipe->set_stencil_ref(ctx->base.pipe, sr); ctx->base.pipe->set_stencil_ref(ctx->base.pipe, sr);
ctx->base.pipe->bind_fs_state(ctx->base.pipe, NULL); ctx->base.pipe->bind_fs_state(ctx->base.pipe, NULL);
ctx->base.pipe->set_constant_buffer(ctx->base.pipe, MESA_SHADER_FRAGMENT, 0, false, NULL); ctx->base.pipe->set_constant_buffer(ctx->base.pipe, MESA_SHADER_FRAGMENT, 0, NULL);
ctx->base.pipe->bind_vs_state(ctx->base.pipe, NULL); ctx->base.pipe->bind_vs_state(ctx->base.pipe, NULL);
ctx->base.pipe->set_constant_buffer(ctx->base.pipe, MESA_SHADER_VERTEX, 0, false, NULL); ctx->base.pipe->set_constant_buffer(ctx->base.pipe, MESA_SHADER_VERTEX, 0, NULL);
if (ctx->has_geometry_shader) { if (ctx->has_geometry_shader) {
ctx->base.pipe->bind_gs_state(ctx->base.pipe, NULL); ctx->base.pipe->bind_gs_state(ctx->base.pipe, NULL);
} }
@ -1378,18 +1378,17 @@ cso_restore_vertex_elements(struct cso_context_priv *ctx)
void void
cso_set_vertex_buffers(struct cso_context *cso, cso_set_vertex_buffers(struct cso_context *cso,
unsigned count, unsigned count,
bool take_ownership,
const struct pipe_vertex_buffer *buffers) const struct pipe_vertex_buffer *buffers)
{ {
struct cso_context_priv *ctx = (struct cso_context_priv *)cso; struct cso_context_priv *ctx = (struct cso_context_priv *)cso;
struct u_vbuf *vbuf = ctx->vbuf_current; struct u_vbuf *vbuf = ctx->vbuf_current;
if (vbuf) { if (vbuf) {
u_vbuf_set_vertex_buffers(vbuf, count, take_ownership, buffers); u_vbuf_set_vertex_buffers(vbuf, count, buffers);
return; return;
} }
util_set_vertex_buffers(ctx->base.pipe, count, take_ownership, buffers); ctx->base.pipe->set_vertex_buffers(ctx->base.pipe, count, buffers);
} }
@ -1426,7 +1425,7 @@ cso_set_vertex_buffers_and_elements(struct cso_context *cso,
} }
u_vbuf_set_vertex_elements(vbuf, velems); u_vbuf_set_vertex_elements(vbuf, velems);
u_vbuf_set_vertex_buffers(vbuf, vb_count, true, vbuffers); u_vbuf_set_vertex_buffers(vbuf, vb_count, vbuffers);
return; return;
} }
@ -1832,9 +1831,9 @@ cso_restore_state(struct cso_context *ctx, unsigned unbind)
if (state_mask & CSO_BIT_VIEWPORT) if (state_mask & CSO_BIT_VIEWPORT)
cso_restore_viewport(cso); cso_restore_viewport(cso);
if (unbind & CSO_UNBIND_VS_CONSTANTS) if (unbind & CSO_UNBIND_VS_CONSTANTS)
cso->base.pipe->set_constant_buffer(cso->base.pipe, MESA_SHADER_VERTEX, 0, false, NULL); cso->base.pipe->set_constant_buffer(cso->base.pipe, MESA_SHADER_VERTEX, 0, NULL);
if (unbind & CSO_UNBIND_FS_CONSTANTS) if (unbind & CSO_UNBIND_FS_CONSTANTS)
cso->base.pipe->set_constant_buffer(cso->base.pipe, MESA_SHADER_FRAGMENT, 0, false, NULL); cso->base.pipe->set_constant_buffer(cso->base.pipe, MESA_SHADER_FRAGMENT, 0, NULL);
if (state_mask & CSO_BIT_VERTEX_ELEMENTS) if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
cso_restore_vertex_elements(cso); cso_restore_vertex_elements(cso);
if (state_mask & CSO_BIT_STREAM_OUTPUTS) if (state_mask & CSO_BIT_STREAM_OUTPUTS)

View file

@ -98,7 +98,6 @@ cso_set_vertex_elements(struct cso_context *ctx,
void cso_set_vertex_buffers(struct cso_context *ctx, void cso_set_vertex_buffers(struct cso_context *ctx,
unsigned count, unsigned count,
bool take_ownership,
const struct pipe_vertex_buffer *buffers); const struct pipe_vertex_buffer *buffers);
void cso_set_stream_outputs(struct cso_context *ctx, void cso_set_stream_outputs(struct cso_context *ctx,

View file

@ -417,7 +417,7 @@ draw_set_vertex_buffers(struct draw_context *draw,
util_set_vertex_buffers_count(draw->pt.vertex_buffer, util_set_vertex_buffers_count(draw->pt.vertex_buffer,
&draw->pt.nr_vertex_buffers, &draw->pt.nr_vertex_buffers,
buffers, count, false); buffers, count);
} }

View file

@ -362,7 +362,6 @@ DD_IMM_STATE(polygon_stipple, const struct pipe_poly_stipple, *state, state)
static void static void
dd_context_set_constant_buffer(struct pipe_context *_pipe, dd_context_set_constant_buffer(struct pipe_context *_pipe,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *constant_buffer) const struct pipe_constant_buffer *constant_buffer)
{ {
struct dd_context *dctx = dd_context(_pipe); struct dd_context *dctx = dd_context(_pipe);
@ -370,7 +369,7 @@ dd_context_set_constant_buffer(struct pipe_context *_pipe,
safe_memcpy(&dctx->draw_state.constant_buffers[shader][index], safe_memcpy(&dctx->draw_state.constant_buffers[shader][index],
constant_buffer, sizeof(*constant_buffer)); constant_buffer, sizeof(*constant_buffer));
pipe->set_constant_buffer(pipe, shader, index, take_ownership, constant_buffer); pipe->set_constant_buffer(pipe, shader, index, constant_buffer);
} }
static void static void

View file

@ -414,6 +414,12 @@ static bool noop_is_resource_busy(struct pipe_screen *screen,
return false; return false;
} }
static void
noop_resource_release(struct pipe_context *ctx, struct pipe_resource *resource)
{
pipe_resource_reference(&resource, NULL);
}
static struct pipe_context *noop_create_context(struct pipe_screen *screen, static struct pipe_context *noop_create_context(struct pipe_screen *screen,
void *priv, unsigned flags) void *priv, unsigned flags)
{ {
@ -458,6 +464,7 @@ static struct pipe_context *noop_create_context(struct pipe_screen *screen,
ctx->invalidate_resource = noop_invalidate_resource; ctx->invalidate_resource = noop_invalidate_resource;
ctx->set_context_param = noop_set_context_param; ctx->set_context_param = noop_set_context_param;
ctx->set_frontend_noop = noop_set_frontend_noop; ctx->set_frontend_noop = noop_set_frontend_noop;
ctx->resource_release = noop_resource_release;
noop_init_state_functions(ctx); noop_init_state_functions(ctx);
p_atomic_inc(&screen->num_contexts); p_atomic_inc(&screen->num_contexts);

View file

@ -154,13 +154,8 @@ static void noop_set_framebuffer_state(struct pipe_context *ctx,
static void noop_set_constant_buffer(struct pipe_context *ctx, static void noop_set_constant_buffer(struct pipe_context *ctx,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
if (take_ownership && cb) {
struct pipe_resource *buf = cb->buffer;
pipe_resource_reference(&buf, NULL);
}
} }
static void noop_set_inlinable_constants(struct pipe_context *ctx, static void noop_set_inlinable_constants(struct pipe_context *ctx,

View file

@ -942,7 +942,6 @@ trace_context_set_sample_mask(struct pipe_context *_pipe,
static void static void
trace_context_set_constant_buffer(struct pipe_context *_pipe, trace_context_set_constant_buffer(struct pipe_context *_pipe,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *constant_buffer) const struct pipe_constant_buffer *constant_buffer)
{ {
struct trace_context *tr_ctx = trace_context(_pipe); struct trace_context *tr_ctx = trace_context(_pipe);
@ -953,10 +952,9 @@ trace_context_set_constant_buffer(struct pipe_context *_pipe,
trace_dump_arg(ptr, pipe); trace_dump_arg(ptr, pipe);
trace_dump_arg_enum(mesa_shader_stage, shader); trace_dump_arg_enum(mesa_shader_stage, shader);
trace_dump_arg(uint, index); trace_dump_arg(uint, index);
trace_dump_arg(bool, take_ownership);
trace_dump_arg(constant_buffer, constant_buffer); trace_dump_arg(constant_buffer, constant_buffer);
pipe->set_constant_buffer(pipe, shader, index, take_ownership, constant_buffer); pipe->set_constant_buffer(pipe, shader, index, constant_buffer);
trace_dump_call_end(); trace_dump_call_end();
} }
@ -2464,6 +2462,21 @@ trace_context_get_device_reset_status(struct pipe_context *_pipe)
return status; return status;
} }
static void
trace_context_resource_release(struct pipe_context *_pipe, struct pipe_resource *resource)
{
struct trace_context *tr_ctx = trace_context(_pipe);
struct pipe_context *pipe = tr_ctx->pipe;
trace_dump_call_begin("pipe_context", "resource_release");
trace_dump_arg(ptr, pipe);
trace_dump_arg(ptr, resource);
pipe->resource_release(pipe, resource);
trace_dump_call_end();
}
struct pipe_context * struct pipe_context *
trace_context_create(struct trace_screen *tr_scr, trace_context_create(struct trace_screen *tr_scr,
struct pipe_context *pipe) struct pipe_context *pipe)
@ -2611,6 +2624,7 @@ trace_context_create(struct trace_screen *tr_scr,
TR_CTX_INIT(set_global_binding); TR_CTX_INIT(set_global_binding);
TR_CTX_INIT(set_hw_atomic_buffers); TR_CTX_INIT(set_hw_atomic_buffers);
TR_CTX_INIT(get_device_reset_status); TR_CTX_INIT(get_device_reset_status);
TR_CTX_INIT(resource_release);
#undef TR_CTX_INIT #undef TR_CTX_INIT

View file

@ -108,14 +108,16 @@ hud_draw_colored_prims(struct hud_context *hud, unsigned prim,
hud->constants.scale[1] = yscale * hud_scale; hud->constants.scale[1] = yscale * hud_scale;
pipe_upload_constant_buffer0(pipe, MESA_SHADER_VERTEX, &hud->constbuf); pipe_upload_constant_buffer0(pipe, MESA_SHADER_VERTEX, &hud->constbuf);
struct pipe_resource *releasebuf = NULL;
u_upload_data(hud->pipe->stream_uploader, 0, u_upload_data(hud->pipe->stream_uploader, 0,
num_vertices * 2 * sizeof(float), 16, buffer, num_vertices * 2 * sizeof(float), 16, buffer,
&vbuffer.buffer_offset, &vbuffer.buffer.resource); &vbuffer.buffer_offset, &vbuffer.buffer.resource, &releasebuf);
u_upload_unmap(hud->pipe->stream_uploader); u_upload_unmap(hud->pipe->stream_uploader);
cso_set_vertex_buffers(cso, 1, true, &vbuffer); cso_set_vertex_buffers(cso, 1, &vbuffer);
cso_set_fragment_shader_handle(hud->cso, hud->fs_color); cso_set_fragment_shader_handle(hud->cso, hud->fs_color);
cso_draw_arrays(cso, prim, 0, num_vertices); cso_draw_arrays(cso, prim, 0, num_vertices);
pipe_resource_release(hud->pipe, releasebuf);
} }
static void static void
@ -596,7 +598,7 @@ hud_draw_results(struct hud_context *hud, struct pipe_resource *tex)
pipe_upload_constant_buffer0(pipe, MESA_SHADER_VERTEX, &hud->constbuf); pipe_upload_constant_buffer0(pipe, MESA_SHADER_VERTEX, &hud->constbuf);
cso_set_vertex_buffers(cso, 1, true, &hud->bg.vbuf); cso_set_vertex_buffers(cso, 1, &hud->bg.vbuf);
cso_draw_arrays(cso, MESA_PRIM_QUADS, 0, hud->bg.num_vertices); cso_draw_arrays(cso, MESA_PRIM_QUADS, 0, hud->bg.num_vertices);
hud->bg.vbuf.buffer.resource = NULL; hud->bg.vbuf.buffer.resource = NULL;
} else { } else {
@ -607,7 +609,7 @@ hud_draw_results(struct hud_context *hud, struct pipe_resource *tex)
if (hud->text.num_vertices) { if (hud->text.num_vertices) {
cso_set_vertex_shader_handle(cso, hud->vs_text); cso_set_vertex_shader_handle(cso, hud->vs_text);
cso_set_vertex_elements(cso, &hud->text_velems); cso_set_vertex_elements(cso, &hud->text_velems);
cso_set_vertex_buffers(cso, 1, true, &hud->text.vbuf); cso_set_vertex_buffers(cso, 1, &hud->text.vbuf);
cso_set_fragment_shader_handle(hud->cso, hud->fs_text); cso_set_fragment_shader_handle(hud->cso, hud->fs_text);
cso_draw_arrays(cso, MESA_PRIM_QUADS, 0, hud->text.num_vertices); cso_draw_arrays(cso, MESA_PRIM_QUADS, 0, hud->text.num_vertices);
cso_set_vertex_elements(cso, &hud->velems); cso_set_vertex_elements(cso, &hud->velems);
@ -634,7 +636,7 @@ hud_draw_results(struct hud_context *hud, struct pipe_resource *tex)
if (hud->whitelines.num_vertices) { if (hud->whitelines.num_vertices) {
cso_set_vertex_shader_handle(cso, hud->vs_color); cso_set_vertex_shader_handle(cso, hud->vs_color);
cso_set_vertex_buffers(cso, 1, true, &hud->whitelines.vbuf); cso_set_vertex_buffers(cso, 1, &hud->whitelines.vbuf);
cso_set_fragment_shader_handle(hud->cso, hud->fs_color); cso_set_fragment_shader_handle(hud->cso, hud->fs_color);
cso_draw_arrays(cso, MESA_PRIM_LINES, 0, hud->whitelines.num_vertices); cso_draw_arrays(cso, MESA_PRIM_LINES, 0, hud->whitelines.num_vertices);
hud->whitelines.vbuf.buffer.resource = NULL; hud->whitelines.vbuf.buffer.resource = NULL;
@ -694,17 +696,19 @@ hud_stop_queries(struct hud_context *hud, struct pipe_context *pipe)
/* Allocate everything once and divide the storage into 3 portions /* Allocate everything once and divide the storage into 3 portions
* manually, because u_upload_alloc can unmap memory from previous calls. * manually, because u_upload_alloc can unmap memory from previous calls.
*/ */
struct pipe_resource *pres = NULL, *releasebuf = NULL;
u_upload_alloc(pipe->stream_uploader, 0, u_upload_alloc(pipe->stream_uploader, 0,
hud->bg.buffer_size + hud->bg.buffer_size +
hud->whitelines.buffer_size + hud->whitelines.buffer_size +
hud->text.buffer_size, hud->text.buffer_size,
16, &hud->bg.vbuf.buffer_offset, &hud->bg.vbuf.buffer.resource, 16, &hud->bg.vbuf.buffer_offset, &pres, &releasebuf,
(void**)&hud->bg.vertices); (void**)&hud->bg.vertices);
if (!hud->bg.vertices) if (!hud->bg.vertices)
return; return;
pipe_resource_reference(&hud->bg.vbuf.buffer.resource, pres);
pipe_resource_reference(&hud->whitelines.vbuf.buffer.resource, hud->bg.vbuf.buffer.resource); pipe_resource_reference(&hud->whitelines.vbuf.buffer.resource, hud->bg.vbuf.buffer.resource);
pipe_resource_reference(&hud->text.vbuf.buffer.resource, hud->bg.vbuf.buffer.resource); pipe_resource_reference(&hud->text.vbuf.buffer.resource, hud->bg.vbuf.buffer.resource);
pipe_resource_release(pipe, releasebuf);
hud->whitelines.vbuf.buffer_offset = hud->bg.vbuf.buffer_offset + hud->whitelines.vbuf.buffer_offset = hud->bg.vbuf.buffer_offset +
hud->bg.buffer_size; hud->bg.buffer_size;

View file

@ -107,7 +107,8 @@ primconvert_init_draw(struct primconvert_context *pc,
const struct pipe_draw_info *info, const struct pipe_draw_info *info,
const struct pipe_draw_start_count_bias *draws, const struct pipe_draw_start_count_bias *draws,
struct pipe_draw_info *new_info, struct pipe_draw_info *new_info,
struct pipe_draw_start_count_bias *new_draw) struct pipe_draw_start_count_bias *new_draw,
struct pipe_resource **releasebuf)
{ {
struct pipe_draw_start_count_bias *direct_draws = NULL; struct pipe_draw_start_count_bias *direct_draws = NULL;
unsigned num_direct_draws = 0; unsigned num_direct_draws = 0;
@ -225,7 +226,7 @@ primconvert_init_draw(struct primconvert_context *pc,
if (new_size > UINT_MAX) if (new_size > UINT_MAX)
return false; return false;
u_upload_alloc(pc->pipe->stream_uploader, 0, new_size, 4, u_upload_alloc(pc->pipe->stream_uploader, 0, new_size, 4,
&ib_offset, &new_info->index.resource, &dst); &ib_offset, &new_info->index.resource, releasebuf, &dst);
if (!dst) if (!dst)
return false; return false;
new_draw->start = ib_offset / new_info->index_size; new_draw->start = ib_offset / new_info->index_size;
@ -288,13 +289,13 @@ util_primconvert_draw_single_vbo(struct primconvert_context *pc,
{ {
struct pipe_draw_info new_info; struct pipe_draw_info new_info;
struct pipe_draw_start_count_bias new_draw; struct pipe_draw_start_count_bias new_draw;
struct pipe_resource *releasebuf = NULL;
if (!primconvert_init_draw(pc, info, draw, &new_info, &new_draw)) if (!primconvert_init_draw(pc, info, draw, &new_info, &new_draw, &releasebuf))
return; return;
/* to the translated draw: */ /* to the translated draw: */
pc->pipe->draw_vbo(pc->pipe, &new_info, drawid_offset, NULL, &new_draw, 1); pc->pipe->draw_vbo(pc->pipe, &new_info, drawid_offset, NULL, &new_draw, 1);
pipe_resource_release(pc->pipe, releasebuf);
pipe_resource_reference(&new_info.index.resource, NULL);
} }
void void
@ -312,7 +313,7 @@ util_primconvert_draw_vbo(struct primconvert_context *pc,
unsigned draw_count = 0; unsigned draw_count = 0;
struct u_indirect_params *new_draws = util_draw_indirect_read(pc->pipe, info, indirect, &draw_count); struct u_indirect_params *new_draws = util_draw_indirect_read(pc->pipe, info, indirect, &draw_count);
if (!new_draws) if (!new_draws)
goto cleanup; return;
for (unsigned i = 0; i < draw_count; i++) for (unsigned i = 0; i < draw_count; i++)
util_primconvert_draw_single_vbo(pc, &new_draws[i].info, drawid_offset + i, &new_draws[i].draw); util_primconvert_draw_single_vbo(pc, &new_draws[i].info, drawid_offset + i, &new_draws[i].draw);
@ -326,12 +327,6 @@ util_primconvert_draw_vbo(struct primconvert_context *pc,
drawid++; drawid++;
} }
} }
cleanup:
if (info->take_index_buffer_ownership) {
struct pipe_resource *buffer = info->index.resource;
pipe_resource_reference(&buffer, NULL);
}
} }
void void
@ -344,6 +339,7 @@ util_primconvert_draw_vertex_state(struct primconvert_context *pc,
{ {
struct pipe_draw_info new_info; struct pipe_draw_info new_info;
struct pipe_draw_start_count_bias new_draw; struct pipe_draw_start_count_bias new_draw;
struct pipe_resource *releasebuf = NULL;
if (pc->cfg.primtypes_mask & BITFIELD_BIT(info.mode)) { if (pc->cfg.primtypes_mask & BITFIELD_BIT(info.mode)) {
pc->pipe->draw_vertex_state(pc->pipe, vstate, partial_velem_mask, info, draws, num_draws); pc->pipe->draw_vertex_state(pc->pipe, vstate, partial_velem_mask, info, draws, num_draws);
@ -363,7 +359,7 @@ util_primconvert_draw_vertex_state(struct primconvert_context *pc,
dinfo.index_size = 4; dinfo.index_size = 4;
dinfo.instance_count = 1; dinfo.instance_count = 1;
dinfo.index.resource = vstate->input.indexbuf; dinfo.index.resource = vstate->input.indexbuf;
if (!primconvert_init_draw(pc, &dinfo, draws, &new_info, &new_draw)) if (!primconvert_init_draw(pc, &dinfo, draws, &new_info, &new_draw, &releasebuf))
return; return;
struct pipe_vertex_state *new_state = pc->pipe->screen->create_vertex_state(pc->pipe->screen, struct pipe_vertex_state *new_state = pc->pipe->screen->create_vertex_state(pc->pipe->screen,
@ -381,6 +377,5 @@ util_primconvert_draw_vertex_state(struct primconvert_context *pc,
} }
if (info.take_vertex_state_ownership) if (info.take_vertex_state_ownership)
pipe_vertex_state_reference(&vstate, NULL); pipe_vertex_state_reference(&vstate, NULL);
pipe_resource_release(pc->pipe, releasebuf);
pipe_resource_reference(&new_info.index.resource, NULL);
} }

View file

@ -290,7 +290,7 @@ pp_filter_misc_state(struct pp_program *p)
void void
pp_filter_draw(struct pp_program *p) pp_filter_draw(struct pp_program *p)
{ {
util_draw_vertex_buffer(p->pipe, p->cso, p->vbuf, 0, false, util_draw_vertex_buffer(p->pipe, p->cso, p->vbuf, 0,
MESA_PRIM_QUADS, 4, 2); MESA_PRIM_QUADS, 4, 2);
} }

View file

@ -150,6 +150,7 @@ struct blitter_context_priv
bool cube_as_2darray; bool cube_as_2darray;
bool has_texrect; bool has_texrect;
bool cached_all_shaders; bool cached_all_shaders;
bool must_unset_vbuf;
/* The Draw module overrides these functions. /* The Draw module overrides these functions.
* Always create the blitter before Draw. */ * Always create the blitter before Draw. */
@ -580,12 +581,12 @@ void util_blitter_restore_vertex_states(struct blitter_context *blitter)
ctx->base.saved_velem_state = INVALID_PTR; ctx->base.saved_velem_state = INVALID_PTR;
} }
/* Vertex buffer. */ /* Vertex buffers: must ensure no internal vbs are left on driver. */
if (ctx->base.saved_num_vb) { if (ctx->base.saved_num_vb || ctx->must_unset_vbuf) {
pipe->set_vertex_buffers(pipe, ctx->base.saved_num_vb, pipe->set_vertex_buffers(pipe, ctx->base.saved_num_vb,
ctx->base.saved_vertex_buffers); ctx->base.saved_vertex_buffers);
memset(ctx->base.saved_vertex_buffers, 0, for (unsigned i = 0; i < ctx->base.saved_num_vb; i++)
sizeof(ctx->base.saved_vertex_buffers[0]) * ctx->base.saved_num_vb); pipe_vertex_buffer_unreference(&ctx->base.saved_vertex_buffers[i]);
ctx->base.saved_num_vb = 0; ctx->base.saved_num_vb = 0;
} }
@ -774,8 +775,8 @@ void util_blitter_restore_constant_buffer_state(struct blitter_context *blitter)
struct pipe_context *pipe = blitter->pipe; struct pipe_context *pipe = blitter->pipe;
pipe->set_constant_buffer(pipe, MESA_SHADER_FRAGMENT, blitter->cb_slot, pipe->set_constant_buffer(pipe, MESA_SHADER_FRAGMENT, blitter->cb_slot,
true, &blitter->saved_fs_constant_buffer); &blitter->saved_fs_constant_buffer);
blitter->saved_fs_constant_buffer.buffer = NULL; pipe_resource_reference(&blitter->saved_fs_constant_buffer.buffer, NULL);
} }
static void blitter_set_rectangle(struct blitter_context_priv *ctx, static void blitter_set_rectangle(struct blitter_context_priv *ctx,
@ -1373,11 +1374,12 @@ static void blitter_draw(struct blitter_context_priv *ctx,
{ {
struct pipe_context *pipe = ctx->base.pipe; struct pipe_context *pipe = ctx->base.pipe;
struct pipe_vertex_buffer vb = {0}; struct pipe_vertex_buffer vb = {0};
struct pipe_resource *releasebuf = NULL;
blitter_set_rectangle(ctx, x1, y1, x2, y2, depth); blitter_set_rectangle(ctx, x1, y1, x2, y2, depth);
u_upload_data(pipe->stream_uploader, 0, sizeof(ctx->vertices), 4, ctx->vertices, u_upload_data(pipe->stream_uploader, 0, sizeof(ctx->vertices), 4, ctx->vertices,
&vb.buffer_offset, &vb.buffer.resource); &vb.buffer_offset, &vb.buffer.resource, &releasebuf);
if (!vb.buffer.resource) if (!vb.buffer.resource)
return; return;
u_upload_unmap(pipe->stream_uploader); u_upload_unmap(pipe->stream_uploader);
@ -1385,6 +1387,7 @@ static void blitter_draw(struct blitter_context_priv *ctx,
pipe->bind_vertex_elements_state(pipe, vertex_elements_cso); pipe->bind_vertex_elements_state(pipe, vertex_elements_cso);
pipe->set_vertex_buffers(pipe, 1, &vb); pipe->set_vertex_buffers(pipe, 1, &vb);
pipe->bind_vs_state(pipe, get_vs(&ctx->base)); pipe->bind_vs_state(pipe, get_vs(&ctx->base));
ctx->must_unset_vbuf = true;
if (ctx->base.use_index_buffer) { if (ctx->base.use_index_buffer) {
/* Note that for V3D, /* Note that for V3D,
@ -1399,6 +1402,7 @@ static void blitter_draw(struct blitter_context_priv *ctx,
util_draw_arrays_instanced(pipe, MESA_PRIM_TRIANGLE_FAN, 0, 4, util_draw_arrays_instanced(pipe, MESA_PRIM_TRIANGLE_FAN, 0, 4,
0, num_instances); 0, num_instances);
} }
pipe_resource_release(pipe, releasebuf);
} }
void util_blitter_draw_rectangle(struct blitter_context *blitter, void util_blitter_draw_rectangle(struct blitter_context *blitter,
@ -1542,7 +1546,7 @@ static void util_blitter_clear_custom(struct blitter_context *blitter,
.buffer_size = 4 * sizeof(float), .buffer_size = 4 * sizeof(float),
}; };
pipe->set_constant_buffer(pipe, MESA_SHADER_FRAGMENT, blitter->cb_slot, pipe->set_constant_buffer(pipe, MESA_SHADER_FRAGMENT, blitter->cb_slot,
false, &cb); &cb);
bind_fs_clear_color(ctx, true); bind_fs_clear_color(ctx, true);
} else { } else {
bind_fs_empty(ctx); bind_fs_empty(ctx);
@ -2402,7 +2406,7 @@ void util_blitter_clear_render_target(struct blitter_context *blitter,
.buffer_size = 4 * sizeof(float), .buffer_size = 4 * sizeof(float),
}; };
pipe->set_constant_buffer(pipe, MESA_SHADER_FRAGMENT, blitter->cb_slot, pipe->set_constant_buffer(pipe, MESA_SHADER_FRAGMENT, blitter->cb_slot,
false, &cb); &cb);
num_layers = dstsurf->last_layer - dstsurf->first_layer + 1; num_layers = dstsurf->last_layer - dstsurf->first_layer + 1;
@ -2852,7 +2856,7 @@ util_blitter_stencil_fallback(struct blitter_context *blitter,
.buffer_size = sizeof(mask), .buffer_size = sizeof(mask),
}; };
pipe->set_constant_buffer(pipe, MESA_SHADER_FRAGMENT, blitter->cb_slot, pipe->set_constant_buffer(pipe, MESA_SHADER_FRAGMENT, blitter->cb_slot,
false, &cb); &cb);
pipe->bind_depth_stencil_alpha_state(pipe, pipe->bind_depth_stencil_alpha_state(pipe,
get_stencil_blit_fallback_dsa(ctx, i)); get_stencil_blit_fallback_dsa(ctx, i));

View file

@ -43,7 +43,6 @@ util_draw_vertex_buffer(struct pipe_context *pipe,
struct cso_context *cso, struct cso_context *cso,
struct pipe_resource *vbuf, struct pipe_resource *vbuf,
unsigned offset, unsigned offset,
bool vb_take_ownership,
enum mesa_prim prim_type, enum mesa_prim prim_type,
unsigned num_verts, unsigned num_verts,
unsigned num_attribs) unsigned num_attribs)
@ -60,10 +59,10 @@ util_draw_vertex_buffer(struct pipe_context *pipe,
/* note: vertex elements already set by caller */ /* note: vertex elements already set by caller */
if (cso) { if (cso) {
cso_set_vertex_buffers(cso, 1, vb_take_ownership, &vbuffer); cso_set_vertex_buffers(cso, 1, &vbuffer);
cso_draw_arrays(cso, prim_type, 0, num_verts); cso_draw_arrays(cso, prim_type, 0, num_verts);
} else { } else {
util_set_vertex_buffers(pipe, 1, vb_take_ownership, &vbuffer); pipe->set_vertex_buffers(pipe, 1, &vbuffer);
util_draw_arrays(pipe, prim_type, 0, num_verts); util_draw_arrays(pipe, prim_type, 0, num_verts);
} }
} }
@ -87,7 +86,7 @@ util_draw_user_vertex_buffer(struct cso_context *cso, void *buffer,
/* note: vertex elements already set by caller */ /* note: vertex elements already set by caller */
cso_set_vertex_buffers(cso, 1, false, &vbuffer); cso_set_vertex_buffers(cso, 1, &vbuffer);
cso_draw_arrays(cso, prim_type, 0, num_verts); cso_draw_arrays(cso, prim_type, 0, num_verts);
} }

View file

@ -45,7 +45,7 @@ struct cso_velems_state;
extern void extern void
util_draw_vertex_buffer(struct pipe_context *pipe, struct cso_context *cso, util_draw_vertex_buffer(struct pipe_context *pipe, struct cso_context *cso,
struct pipe_resource *vbuf, unsigned offset, struct pipe_resource *vbuf, unsigned offset,
bool vb_take_ownership, enum mesa_prim prim_type, enum mesa_prim prim_type,
unsigned num_attribs, unsigned num_verts); unsigned num_attribs, unsigned num_verts);
void void

View file

@ -47,8 +47,7 @@
void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst, void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst,
uint32_t *enabled_buffers, uint32_t *enabled_buffers,
const struct pipe_vertex_buffer *src, const struct pipe_vertex_buffer *src,
unsigned count, unsigned count)
bool take_ownership)
{ {
unsigned last_count = util_last_bit(*enabled_buffers); unsigned last_count = util_last_bit(*enabled_buffers);
uint32_t bitmask = 0; uint32_t bitmask = 0;
@ -61,10 +60,7 @@ void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst,
if (src[i].buffer.resource) if (src[i].buffer.resource)
bitmask |= 1 << i; bitmask |= 1 << i;
pipe_vertex_buffer_unreference(&dst[i]); pipe_vertex_buffer_reference(&dst[i], &src[i]);
if (!take_ownership && !src[i].is_user_buffer)
pipe_resource_reference(&dst[i].buffer.resource, src[i].buffer.resource);
} }
/* Copy over the other members of pipe_vertex_buffer. */ /* Copy over the other members of pipe_vertex_buffer. */
@ -84,8 +80,7 @@ void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst,
void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst, void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
unsigned *dst_count, unsigned *dst_count,
const struct pipe_vertex_buffer *src, const struct pipe_vertex_buffer *src,
unsigned count, unsigned count)
bool take_ownership)
{ {
uint32_t enabled_buffers = 0; uint32_t enabled_buffers = 0;
@ -94,8 +89,7 @@ void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
enabled_buffers |= (1ull << i); enabled_buffers |= (1ull << i);
} }
util_set_vertex_buffers_mask(dst, &enabled_buffers, src, count, util_set_vertex_buffers_mask(dst, &enabled_buffers, src, count);
take_ownership);
*dst_count = util_last_bit(enabled_buffers); *dst_count = util_last_bit(enabled_buffers);
} }
@ -149,7 +143,7 @@ util_upload_index_buffer(struct pipe_context *pipe,
{ {
unsigned start_offset = draw->start * info->index_size; unsigned start_offset = draw->start * info->index_size;
u_upload_data(pipe->stream_uploader, start_offset, u_upload_data_ref(pipe->stream_uploader, start_offset,
draw->count * info->index_size, alignment, draw->count * info->index_size, alignment,
(char*)info->index.user + start_offset, (char*)info->index.user + start_offset,
out_offset, out_buffer); out_offset, out_buffer);

View file

@ -40,14 +40,12 @@ extern "C" {
void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst, void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst,
uint32_t *enabled_buffers, uint32_t *enabled_buffers,
const struct pipe_vertex_buffer *src, const struct pipe_vertex_buffer *src,
unsigned count, unsigned count);
bool take_ownership);
void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst, void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
unsigned *dst_count, unsigned *dst_count,
const struct pipe_vertex_buffer *src, const struct pipe_vertex_buffer *src,
unsigned count, unsigned count);
bool take_ownership);
void util_set_shader_buffers_mask(struct pipe_shader_buffer *dst, void util_set_shader_buffers_mask(struct pipe_shader_buffer *dst,
uint32_t *enabled_buffers, uint32_t *enabled_buffers,

View file

@ -196,6 +196,19 @@ pipe_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
*dst = src; *dst = src;
} }
static inline void
pipe_resource_release(struct pipe_context *pipe, struct pipe_resource *resource)
{
if (resource)
pipe->resource_release(pipe, resource);
}
static inline void
u_default_resource_release(struct pipe_context *pipe, struct pipe_resource *pres)
{
pipe_resource_reference(&pres, NULL);
}
/** /**
* Subtract the given number of references. * Subtract the given number of references.
*/ */
@ -686,9 +699,9 @@ pipe_set_constant_buffer(struct pipe_context *pipe,
cb.buffer_offset = 0; cb.buffer_offset = 0;
cb.buffer_size = buf->width0; cb.buffer_size = buf->width0;
cb.user_buffer = NULL; cb.user_buffer = NULL;
pipe->set_constant_buffer(pipe, shader, index, false, &cb); pipe->set_constant_buffer(pipe, shader, index, &cb);
} else { } else {
pipe->set_constant_buffer(pipe, shader, index, false, NULL); pipe->set_constant_buffer(pipe, shader, index, NULL);
} }
} }
@ -699,17 +712,19 @@ pipe_upload_constant_buffer0(struct pipe_context *pipe, mesa_shader_stage stage,
cbuf.buffer = NULL; cbuf.buffer = NULL;
const unsigned alignment = MAX2(pipe->screen->caps.constant_buffer_offset_alignment, 64); const unsigned alignment = MAX2(pipe->screen->caps.constant_buffer_offset_alignment, 64);
void *ptr; void *ptr;
struct pipe_resource *releasebuf = NULL;
if (pipe->screen->caps.prefer_real_buffer_in_constbuf0) { if (pipe->screen->caps.prefer_real_buffer_in_constbuf0) {
u_upload_alloc(pipe->const_uploader, 0, cbuf.buffer_size, u_upload_alloc(pipe->const_uploader, 0, cbuf.buffer_size,
alignment, &cbuf.buffer_offset, &cbuf.buffer, (void**)&ptr); alignment, &cbuf.buffer_offset, &cbuf.buffer, &releasebuf, (void**)&ptr);
memcpy(ptr, cbuf.user_buffer, cbuf.buffer_size); memcpy(ptr, cbuf.user_buffer, cbuf.buffer_size);
cbuf.user_buffer = NULL; cbuf.user_buffer = NULL;
u_upload_unmap(pipe->const_uploader); u_upload_unmap(pipe->const_uploader);
pipe->set_constant_buffer(pipe, stage, 0, true, &cbuf); pipe->set_constant_buffer(pipe, stage, 0, &cbuf);
pipe_resource_release(pipe, releasebuf);
} else { } else {
pipe->set_constant_buffer(pipe, stage, 0, false, cb); pipe->set_constant_buffer(pipe, stage, 0, cb);
} }
} }
@ -778,16 +793,10 @@ util_query_clear_result(union pipe_query_result *result, unsigned type)
static inline void static inline void
util_copy_constant_buffer(struct pipe_constant_buffer *dst, util_copy_constant_buffer(struct pipe_constant_buffer *dst,
const struct pipe_constant_buffer *src, const struct pipe_constant_buffer *src)
bool take_ownership)
{ {
if (src) { if (src) {
if (take_ownership) {
pipe_resource_reference(&dst->buffer, NULL);
dst->buffer = src->buffer;
} else {
pipe_resource_reference(&dst->buffer, src->buffer); pipe_resource_reference(&dst->buffer, src->buffer);
}
dst->buffer_offset = src->buffer_offset; dst->buffer_offset = src->buffer_offset;
dst->buffer_size = src->buffer_size; dst->buffer_size = src->buffer_size;
dst->user_buffer = src->user_buffer; dst->user_buffer = src->user_buffer;
@ -985,24 +994,6 @@ static inline unsigned util_res_sample_count(const struct pipe_resource *res)
return res->nr_samples > 0 ? res->nr_samples : 1; return res->nr_samples > 0 ? res->nr_samples : 1;
} }
static inline void
util_set_vertex_buffers(struct pipe_context *pipe,
unsigned num_buffers, bool take_ownership,
const struct pipe_vertex_buffer *buffers)
{
/* set_vertex_buffers requires that reference counts are incremented
* by the caller.
*/
if (!take_ownership) {
for (unsigned i = 0; i < num_buffers; i++) {
if (!buffers[i].is_user_buffer && buffers[i].buffer.resource)
p_atomic_inc(&buffers[i].buffer.resource->reference.count);
}
}
pipe->set_vertex_buffers(pipe, num_buffers, buffers);
}
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View file

@ -1664,18 +1664,17 @@ tc_call_set_constant_buffer(struct pipe_context *pipe, void *call)
struct tc_constant_buffer *p = (struct tc_constant_buffer *)call; struct tc_constant_buffer *p = (struct tc_constant_buffer *)call;
if (unlikely(p->base.is_null)) { if (unlikely(p->base.is_null)) {
pipe->set_constant_buffer(pipe, p->base.shader, p->base.index, false, NULL); pipe->set_constant_buffer(pipe, p->base.shader, p->base.index, NULL);
return call_size(tc_constant_buffer_base); return call_size(tc_constant_buffer_base);
} }
pipe->set_constant_buffer(pipe, p->base.shader, p->base.index, true, &p->cb); pipe->set_constant_buffer(pipe, p->base.shader, p->base.index, &p->cb);
return call_size(tc_constant_buffer); return call_size(tc_constant_buffer);
} }
static void static void
tc_set_constant_buffer(struct pipe_context *_pipe, tc_set_constant_buffer(struct pipe_context *_pipe,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
struct threaded_context *tc = threaded_context(_pipe); struct threaded_context *tc = threaded_context(_pipe);
@ -1958,6 +1957,33 @@ tc_sampler_view_release(struct pipe_context *_pipe, struct pipe_sampler_view *vi
p->view = view; p->view = view;
} }
struct tc_resource_release {
struct tc_call_base base;
struct pipe_resource *resource;
};
static uint16_t ALWAYS_INLINE
tc_call_resource_release(struct pipe_context *pipe, void *call)
{
struct tc_resource_release *p = (struct tc_resource_release *)call;
pipe->resource_release(pipe, p->resource);
return call_size(tc_resource_release);
}
static void
tc_resource_release(struct pipe_context *_pipe, struct pipe_resource *resource)
{
if (!resource)
return;
struct threaded_context *tc = threaded_context(_pipe);
struct tc_resource_release *p =
tc_add_call(tc, TC_CALL_resource_release, tc_resource_release);
p->resource = resource;
}
struct tc_shader_images { struct tc_shader_images {
struct tc_call_base base; struct tc_call_base base;
uint8_t shader, start, count; uint8_t shader, start, count;
@ -2747,7 +2773,7 @@ tc_buffer_map(struct pipe_context *_pipe,
struct threaded_transfer *ttrans = slab_zalloc(&tc->pool_transfers); struct threaded_transfer *ttrans = slab_zalloc(&tc->pool_transfers);
uint8_t *map; uint8_t *map;
u_upload_alloc(tc->base.stream_uploader, 0, u_upload_alloc_ref(tc->base.stream_uploader, 0,
box->width + (box->x % tc->map_buffer_alignment), box->width + (box->x % tc->map_buffer_alignment),
tc->map_buffer_alignment, &ttrans->b.offset, tc->map_buffer_alignment, &ttrans->b.offset,
&ttrans->staging, (void**)&map); &ttrans->staging, (void**)&map);
@ -3699,11 +3725,8 @@ tc_call_draw_single_drawid(struct pipe_context *pipe, void *call)
info->info.index_bounds_valid = false; info->info.index_bounds_valid = false;
info->info.has_user_indices = false; info->info.has_user_indices = false;
info->info.take_index_buffer_ownership = false;
pipe->draw_vbo(pipe, &info->info, info_drawid->drawid_offset, NULL, &draw, 1); pipe->draw_vbo(pipe, &info->info, info_drawid->drawid_offset, NULL, &draw, 1);
if (info->info.index_size)
tc_drop_resource_reference(info->info.index.resource);
return call_size(tc_draw_single_drawid); return call_size(tc_draw_single_drawid);
} }
@ -3716,7 +3739,6 @@ simplify_draw_info(struct pipe_draw_info *info)
*/ */
info->has_user_indices = false; info->has_user_indices = false;
info->index_bounds_valid = false; info->index_bounds_valid = false;
info->take_index_buffer_ownership = false;
info->index_bias_varies = false; info->index_bias_varies = false;
info->_pad = 0; info->_pad = 0;
@ -3788,10 +3810,6 @@ tc_call_draw_single(struct pipe_context *pipe, void *call)
first->info.index_bias_varies = index_bias_varies; first->info.index_bias_varies = index_bias_varies;
pipe->draw_vbo(pipe, &first->info, 0, NULL, multi, num_draws); pipe->draw_vbo(pipe, &first->info, 0, NULL, multi, num_draws);
/* Since all draws use the same index buffer, drop all references at once. */
if (first->info.index_size)
pipe_drop_resource_references(first->info.index.resource, num_draws);
return call_size(tc_draw_single) * num_draws; return call_size(tc_draw_single) * num_draws;
} }
} }
@ -3806,11 +3824,8 @@ tc_call_draw_single(struct pipe_context *pipe, void *call)
first->info.index_bounds_valid = false; first->info.index_bounds_valid = false;
first->info.has_user_indices = false; first->info.has_user_indices = false;
first->info.take_index_buffer_ownership = false;
pipe->draw_vbo(pipe, &first->info, 0, NULL, &draw, 1); pipe->draw_vbo(pipe, &first->info, 0, NULL, &draw, 1);
if (first->info.index_size)
tc_drop_resource_reference(first->info.index.resource);
return call_size(tc_draw_single); return call_size(tc_draw_single);
} }
@ -3828,11 +3843,8 @@ tc_call_draw_indirect(struct pipe_context *pipe, void *call)
struct tc_draw_indirect *info = to_call(call, tc_draw_indirect); struct tc_draw_indirect *info = to_call(call, tc_draw_indirect);
info->info.index_bounds_valid = false; info->info.index_bounds_valid = false;
info->info.take_index_buffer_ownership = false;
pipe->draw_vbo(pipe, &info->info, 0, &info->indirect, &info->draw, 1); pipe->draw_vbo(pipe, &info->info, 0, &info->indirect, &info->draw, 1);
if (info->info.index_size)
tc_drop_resource_reference(info->info.index.resource);
tc_drop_resource_reference(info->indirect.buffer); tc_drop_resource_reference(info->indirect.buffer);
tc_drop_resource_reference(info->indirect.indirect_draw_count); tc_drop_resource_reference(info->indirect.indirect_draw_count);
@ -3854,11 +3866,8 @@ tc_call_draw_multi(struct pipe_context *pipe, void *call)
info->info.has_user_indices = false; info->info.has_user_indices = false;
info->info.index_bounds_valid = false; info->info.index_bounds_valid = false;
info->info.take_index_buffer_ownership = false;
pipe->draw_vbo(pipe, &info->info, 0, NULL, info->slot, info->num_draws); pipe->draw_vbo(pipe, &info->info, 0, NULL, info->slot, info->num_draws);
if (info->info.index_size)
tc_drop_resource_reference(info->info.index.resource);
return info->base.num_slots; return info->base.num_slots;
} }
@ -3879,10 +3888,6 @@ tc_draw_single(struct pipe_context *_pipe, const struct pipe_draw_info *info,
tc_add_call(tc, TC_CALL_draw_single, tc_draw_single); tc_add_call(tc, TC_CALL_draw_single, tc_draw_single);
if (info->index_size) { if (info->index_size) {
if (!info->take_index_buffer_ownership) {
tc_set_resource_reference(&p->info.index.resource,
info->index.resource);
}
tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], info->index.resource); tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], info->index.resource);
} }
memcpy(&p->info, info, DRAW_INFO_SIZE_WITHOUT_MIN_MAX_INDEX); memcpy(&p->info, info, DRAW_INFO_SIZE_WITHOUT_MIN_MAX_INDEX);
@ -3907,10 +3912,6 @@ tc_draw_single_draw_id(struct pipe_context *_pipe,
&tc_add_call(tc, TC_CALL_draw_single_drawid, tc_draw_single_drawid)->base; &tc_add_call(tc, TC_CALL_draw_single_drawid, tc_draw_single_drawid)->base;
if (info->index_size) { if (info->index_size) {
if (!info->take_index_buffer_ownership) {
tc_set_resource_reference(&p->info.index.resource,
info->index.resource);
}
tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], info->index.resource); tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], info->index.resource);
} }
((struct tc_draw_single_drawid*)p)->drawid_offset = drawid_offset; ((struct tc_draw_single_drawid*)p)->drawid_offset = drawid_offset;
@ -3935,6 +3936,7 @@ tc_draw_user_indices_single(struct pipe_context *_pipe,
unsigned index_size = info->index_size; unsigned index_size = info->index_size;
unsigned size = draws[0].count * index_size; unsigned size = draws[0].count * index_size;
struct pipe_resource *buffer = NULL; struct pipe_resource *buffer = NULL;
struct pipe_resource *releasebuf = NULL;
unsigned offset; unsigned offset;
if (!size) if (!size)
@ -3946,7 +3948,7 @@ tc_draw_user_indices_single(struct pipe_context *_pipe,
*/ */
u_upload_data(tc->base.stream_uploader, 0, size, 4, u_upload_data(tc->base.stream_uploader, 0, size, 4,
(uint8_t*)info->index.user + draws[0].start * index_size, (uint8_t*)info->index.user + draws[0].start * index_size,
&offset, &buffer); &offset, &buffer, &releasebuf);
if (unlikely(!buffer)) if (unlikely(!buffer))
return; return;
@ -3959,6 +3961,7 @@ tc_draw_user_indices_single(struct pipe_context *_pipe,
p->info.max_index = draws[0].count; p->info.max_index = draws[0].count;
p->index_bias = draws[0].index_bias; p->index_bias = draws[0].index_bias;
simplify_draw_info(&p->info); simplify_draw_info(&p->info);
pipe_resource_release(_pipe, releasebuf);
} }
/* Single draw with user indices and drawid_offset > 0. */ /* Single draw with user indices and drawid_offset > 0. */
@ -3974,6 +3977,7 @@ tc_draw_user_indices_single_draw_id(struct pipe_context *_pipe,
unsigned index_size = info->index_size; unsigned index_size = info->index_size;
unsigned size = draws[0].count * index_size; unsigned size = draws[0].count * index_size;
struct pipe_resource *buffer = NULL; struct pipe_resource *buffer = NULL;
struct pipe_resource *releasebuf = NULL;
unsigned offset; unsigned offset;
if (!size) if (!size)
@ -3985,7 +3989,7 @@ tc_draw_user_indices_single_draw_id(struct pipe_context *_pipe,
*/ */
u_upload_data(tc->base.stream_uploader, 0, size, 4, u_upload_data(tc->base.stream_uploader, 0, size, 4,
(uint8_t*)info->index.user + draws[0].start * index_size, (uint8_t*)info->index.user + draws[0].start * index_size,
&offset, &buffer); &offset, &buffer, &releasebuf);
if (unlikely(!buffer)) if (unlikely(!buffer))
return; return;
@ -3999,6 +4003,7 @@ tc_draw_user_indices_single_draw_id(struct pipe_context *_pipe,
p->info.max_index = draws[0].count; p->info.max_index = draws[0].count;
p->index_bias = draws[0].index_bias; p->index_bias = draws[0].index_bias;
simplify_draw_info(&p->info); simplify_draw_info(&p->info);
pipe_resource_release(_pipe, releasebuf);
} }
#define DRAW_OVERHEAD_BYTES sizeof(struct tc_draw_multi) #define DRAW_OVERHEAD_BYTES sizeof(struct tc_draw_multi)
@ -4017,7 +4022,6 @@ tc_draw_multi(struct pipe_context *_pipe, const struct pipe_draw_info *info,
{ {
struct threaded_context *tc = threaded_context(_pipe); struct threaded_context *tc = threaded_context(_pipe);
int total_offset = 0; int total_offset = 0;
bool take_index_buffer_ownership = info->take_index_buffer_ownership;
while (num_draws) { while (num_draws) {
struct tc_batch *next = &tc->batch_slots[tc->next]; struct tc_batch *next = &tc->batch_slots[tc->next];
@ -4037,13 +4041,8 @@ tc_draw_multi(struct pipe_context *_pipe, const struct pipe_draw_info *info,
tc_add_slot_based_call(tc, TC_CALL_draw_multi, tc_draw_multi, tc_add_slot_based_call(tc, TC_CALL_draw_multi, tc_draw_multi,
dr); dr);
if (info->index_size) { if (info->index_size) {
if (!take_index_buffer_ownership) {
tc_set_resource_reference(&p->info.index.resource,
info->index.resource);
}
tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], info->index.resource); tc_add_to_buffer_list(&tc->buffer_lists[tc->next_buf_list], info->index.resource);
} }
take_index_buffer_ownership = false;
memcpy(&p->info, info, DRAW_INFO_SIZE_WITHOUT_MIN_MAX_INDEX); memcpy(&p->info, info, DRAW_INFO_SIZE_WITHOUT_MIN_MAX_INDEX);
p->num_draws = dr; p->num_draws = dr;
memcpy(p->slot, &draws[total_offset], sizeof(draws[0]) * dr); memcpy(p->slot, &draws[total_offset], sizeof(draws[0]) * dr);
@ -4080,7 +4079,7 @@ tc_draw_user_indices_multi(struct pipe_context *_pipe,
* e.g. transfer_unmap and flush partially-uninitialized draw_vbo * e.g. transfer_unmap and flush partially-uninitialized draw_vbo
* to the driver if it was done afterwards. * to the driver if it was done afterwards.
*/ */
u_upload_alloc(tc->base.stream_uploader, 0, u_upload_alloc_ref(tc->base.stream_uploader, 0,
total_count << index_size_shift, 4, total_count << index_size_shift, 4,
&buffer_offset, &buffer, (void**)&ptr); &buffer_offset, &buffer, (void**)&ptr);
if (unlikely(!buffer)) if (unlikely(!buffer))
@ -4106,12 +4105,7 @@ tc_draw_user_indices_multi(struct pipe_context *_pipe,
dr); dr);
memcpy(&p->info, info, DRAW_INFO_SIZE_WITHOUT_INDEXBUF_AND_MIN_MAX_INDEX); memcpy(&p->info, info, DRAW_INFO_SIZE_WITHOUT_INDEXBUF_AND_MIN_MAX_INDEX);
if (total_offset == 0)
/* the first slot inherits the reference from u_upload_alloc() */
p->info.index.resource = buffer; p->info.index.resource = buffer;
else
/* all following slots need a new reference */
tc_set_resource_reference(&p->info.index.resource, buffer);
p->num_draws = dr; p->num_draws = dr;
@ -4157,10 +4151,6 @@ tc_draw_indirect(struct pipe_context *_pipe, const struct pipe_draw_info *info,
struct tc_buffer_list *next = &tc->buffer_lists[tc->next_buf_list]; struct tc_buffer_list *next = &tc->buffer_lists[tc->next_buf_list];
if (info->index_size) { if (info->index_size) {
if (!info->take_index_buffer_ownership) {
tc_set_resource_reference(&p->info.index.resource,
info->index.resource);
}
tc_add_to_buffer_list(next, info->index.resource); tc_add_to_buffer_list(next, info->index.resource);
} }
memcpy(&p->info, info, DRAW_INFO_SIZE_WITHOUT_MIN_MAX_INDEX); memcpy(&p->info, info, DRAW_INFO_SIZE_WITHOUT_MIN_MAX_INDEX);
@ -5676,6 +5666,7 @@ threaded_context_create(struct pipe_context *pipe,
CTX_INIT(set_global_binding); CTX_INIT(set_global_binding);
CTX_INIT(get_sample_position); CTX_INIT(get_sample_position);
CTX_INIT(invalidate_resource); CTX_INIT(invalidate_resource);
CTX_INIT(resource_release);
CTX_INIT(get_device_reset_status); CTX_INIT(get_device_reset_status);
CTX_INIT(set_device_reset_callback); CTX_INIT(set_device_reset_callback);
CTX_INIT(dump_debug_state); CTX_INIT(dump_debug_state);

View file

@ -78,6 +78,7 @@ CALL(blit)
CALL(resolve) CALL(resolve)
CALL(generate_mipmap) CALL(generate_mipmap)
CALL(invalidate_resource) CALL(invalidate_resource)
CALL(resource_release)
CALL(clear_render_target) CALL(clear_render_target)
CALL(clear_depth_stencil) CALL(clear_depth_stencil)
CALL(clear_buffer) CALL(clear_buffer)

View file

@ -54,7 +54,6 @@ struct u_upload_mgr {
unsigned buffer_size; /* Same as buffer->width0. */ unsigned buffer_size; /* Same as buffer->width0. */
unsigned offset; /* Aligned offset to the upload buffer, pointing unsigned offset; /* Aligned offset to the upload buffer, pointing
* at the first unused byte. */ * at the first unused byte. */
int buffer_private_refcount;
}; };
@ -151,16 +150,6 @@ u_upload_release_buffer(struct u_upload_mgr *upload)
{ {
/* Unmap and unreference the upload buffer. */ /* Unmap and unreference the upload buffer. */
upload_unmap_internal(upload, true); upload_unmap_internal(upload, true);
if (upload->buffer_private_refcount) {
/* Subtract the remaining private references before unreferencing
* the buffer. The mega comment below explains it.
*/
assert(upload->buffer_private_refcount > 0);
p_atomic_add(&upload->buffer->reference.count,
-upload->buffer_private_refcount);
upload->buffer_private_refcount = 0;
}
pipe_resource_reference(&upload->buffer, NULL);
upload->buffer_size = 0; upload->buffer_size = 0;
} }
@ -169,12 +158,13 @@ void
u_upload_destroy(struct u_upload_mgr *upload) u_upload_destroy(struct u_upload_mgr *upload)
{ {
u_upload_release_buffer(upload); u_upload_release_buffer(upload);
pipe_resource_release(upload->pipe, upload->buffer);
FREE(upload); FREE(upload);
} }
/* Return the allocated buffer size or 0 if it failed. */ /* Return the allocated buffer size or 0 if it failed. */
static unsigned static unsigned
u_upload_alloc_buffer(struct u_upload_mgr *upload, unsigned min_size) u_upload_alloc_buffer(struct u_upload_mgr *upload, unsigned min_size, struct pipe_resource **releasebuf)
{ {
struct pipe_screen *screen = upload->pipe->screen; struct pipe_screen *screen = upload->pipe->screen;
struct pipe_resource buffer; struct pipe_resource buffer;
@ -183,6 +173,8 @@ u_upload_alloc_buffer(struct u_upload_mgr *upload, unsigned min_size)
/* Release the old buffer, if present: /* Release the old buffer, if present:
*/ */
u_upload_release_buffer(upload); u_upload_release_buffer(upload);
*releasebuf = upload->buffer;
upload->buffer = NULL;
/* Allocate a new one: /* Allocate a new one:
*/ */
@ -208,39 +200,13 @@ u_upload_alloc_buffer(struct u_upload_mgr *upload, unsigned min_size)
if (upload->buffer == NULL) if (upload->buffer == NULL)
return 0; return 0;
/* Since atomic operations are very very slow when 2 threads are not
* sharing the same L3 cache (which happens on AMD Zen), eliminate all
* atomics in u_upload_alloc as follows:
*
* u_upload_alloc has to return a buffer reference to the caller.
* Instead of atomic_inc for every call, it does all possible future
* increments in advance here. The maximum number of times u_upload_alloc
* can be called per upload buffer is "size", because the minimum
* allocation size is 1, thus u_upload_alloc can only return "size" number
* of suballocations at most, so we will never need more. This is
* the number that is added to reference.count here.
*
* buffer_private_refcount tracks how many buffer references we can return
* without using atomics. If the buffer is full and there are still
* references left, they are atomically subtracted from reference.count
* before the buffer is unreferenced.
*
* This technique can increase CPU performance by 10%.
*
* The caller of u_upload_alloc_buffer will consume min_size bytes,
* so init the buffer_private_refcount to 1 + size - min_size, instead
* of size to avoid overflowing reference.count when size is huge.
*/
upload->buffer_private_refcount = 1 + (size - min_size);
assert(upload->buffer_private_refcount < INT32_MAX / 2);
p_atomic_add(&upload->buffer->reference.count, upload->buffer_private_refcount);
/* Map the new buffer. */ /* Map the new buffer. */
upload->map = pipe_buffer_map_range(upload->pipe, upload->buffer, upload->map = pipe_buffer_map_range(upload->pipe, upload->buffer,
0, size, upload->map_flags, 0, size, upload->map_flags,
&upload->transfer); &upload->transfer);
if (upload->map == NULL) { if (upload->map == NULL) {
u_upload_release_buffer(upload); u_upload_release_buffer(upload);
pipe_resource_release(upload->pipe, upload->buffer);
return 0; return 0;
} }
@ -256,6 +222,7 @@ u_upload_alloc(struct u_upload_mgr *upload,
unsigned alignment, unsigned alignment,
unsigned *out_offset, unsigned *out_offset,
struct pipe_resource **outbuf, struct pipe_resource **outbuf,
struct pipe_resource **releasebuf,
void **ptr) void **ptr)
{ {
unsigned buffer_size = upload->buffer_size; unsigned buffer_size = upload->buffer_size;
@ -269,14 +236,16 @@ u_upload_alloc(struct u_upload_mgr *upload,
if (unlikely(offset + size > buffer_size)) { if (unlikely(offset + size > buffer_size)) {
/* Allocate a new buffer and set the offset to the smallest one. */ /* Allocate a new buffer and set the offset to the smallest one. */
offset = align(min_out_offset, alignment); offset = align(min_out_offset, alignment);
buffer_size = u_upload_alloc_buffer(upload, offset + size); buffer_size = u_upload_alloc_buffer(upload, offset + size, releasebuf);
if (unlikely(!buffer_size)) { if (unlikely(!buffer_size)) {
*out_offset = ~0; *out_offset = ~0;
pipe_resource_reference(outbuf, NULL);
*ptr = NULL; *ptr = NULL;
*releasebuf = NULL;
return; return;
} }
} else {
*releasebuf = NULL;
} }
if (unlikely(!upload->map)) { if (unlikely(!upload->map)) {
@ -288,8 +257,8 @@ u_upload_alloc(struct u_upload_mgr *upload,
if (unlikely(!upload->map)) { if (unlikely(!upload->map)) {
upload->transfer = NULL; upload->transfer = NULL;
*out_offset = ~0; *out_offset = ~0;
pipe_resource_reference(outbuf, NULL);
*ptr = NULL; *ptr = NULL;
*releasebuf = NULL;
return; return;
} }
@ -305,15 +274,29 @@ u_upload_alloc(struct u_upload_mgr *upload,
*out_offset = offset; *out_offset = offset;
if (*outbuf != upload->buffer) { if (*outbuf != upload->buffer) {
pipe_resource_reference(outbuf, NULL);
*outbuf = upload->buffer; *outbuf = upload->buffer;
assert (upload->buffer_private_refcount > 0);
upload->buffer_private_refcount--;
} }
upload->offset = offset + size; upload->offset = offset + size;
} }
void
u_upload_alloc_ref(struct u_upload_mgr *upload,
unsigned min_out_offset,
unsigned size,
unsigned alignment,
unsigned *out_offset,
struct pipe_resource **outbuf,
void **ptr)
{
struct pipe_resource *pres = NULL;
struct pipe_resource *releasebuf = NULL;
u_upload_alloc(upload, min_out_offset, size, alignment, out_offset, &pres, &releasebuf, ptr);
pipe_resource_release(upload->pipe, releasebuf);
pipe_resource_reference(outbuf, pres);
}
void void
u_upload_data(struct u_upload_mgr *upload, u_upload_data(struct u_upload_mgr *upload,
unsigned min_out_offset, unsigned min_out_offset,
@ -321,13 +304,31 @@ u_upload_data(struct u_upload_mgr *upload,
unsigned alignment, unsigned alignment,
const void *data, const void *data,
unsigned *out_offset, unsigned *out_offset,
struct pipe_resource **outbuf) struct pipe_resource **outbuf,
struct pipe_resource **releasebuf)
{ {
uint8_t *ptr; uint8_t *ptr;
u_upload_alloc(upload, min_out_offset, size, alignment, u_upload_alloc(upload, min_out_offset, size, alignment,
out_offset, outbuf, out_offset, outbuf, releasebuf,
(void**)&ptr); (void**)&ptr);
if (ptr) if (ptr)
memcpy(ptr, data, size); memcpy(ptr, data, size);
} }
void
u_upload_data_ref(struct u_upload_mgr *upload,
unsigned min_out_offset,
unsigned size,
unsigned alignment,
const void *data,
unsigned *out_offset,
struct pipe_resource **outbuf)
{
struct pipe_resource *pres = NULL;
struct pipe_resource *releasebuf = NULL;
u_upload_data(upload, min_out_offset, size, alignment, data, out_offset, &pres, &releasebuf);
pipe_resource_release(upload->pipe, releasebuf);
pipe_resource_reference(outbuf, pres);
}

View file

@ -99,6 +99,7 @@ void u_upload_unmap( struct u_upload_mgr *upload );
* \param alignment Alignment of the suballocation within the buffer * \param alignment Alignment of the suballocation within the buffer
* \param out_offset Pointer to where the new buffer offset will be returned. * \param out_offset Pointer to where the new buffer offset will be returned.
* \param outbuf Pointer to where the upload buffer will be returned. * \param outbuf Pointer to where the upload buffer will be returned.
* \param releasebuf If non-null, this buffer must be released by the caller
* \param ptr Pointer to the allocated memory that is returned. * \param ptr Pointer to the allocated memory that is returned.
*/ */
void u_upload_alloc(struct u_upload_mgr *upload, void u_upload_alloc(struct u_upload_mgr *upload,
@ -107,9 +108,20 @@ void u_upload_alloc(struct u_upload_mgr *upload,
unsigned alignment, unsigned alignment,
unsigned *out_offset, unsigned *out_offset,
struct pipe_resource **outbuf, struct pipe_resource **outbuf,
struct pipe_resource **releasebuf,
void **ptr); void **ptr);
/* same as above, but outbuf gains a ref */
void
u_upload_alloc_ref(struct u_upload_mgr *upload,
unsigned min_out_offset,
unsigned size,
unsigned alignment,
unsigned *out_offset,
struct pipe_resource **outbuf,
void **ptr);
/** /**
* Allocate and write data to the upload buffer. * Allocate and write data to the upload buffer.
* *
@ -117,6 +129,16 @@ void u_upload_alloc(struct u_upload_mgr *upload,
* to the pointer returned from u_upload_alloc. * to the pointer returned from u_upload_alloc.
*/ */
void u_upload_data(struct u_upload_mgr *upload, void u_upload_data(struct u_upload_mgr *upload,
unsigned min_out_offset,
unsigned size,
unsigned alignment,
const void *data,
unsigned *out_offset,
struct pipe_resource **outbuf,
struct pipe_resource **releasebuf);
/* same as above, but outbuf gains a ref */
void u_upload_data_ref(struct u_upload_mgr *upload,
unsigned min_out_offset, unsigned min_out_offset,
unsigned size, unsigned size,
unsigned alignment, unsigned alignment,

View file

@ -440,15 +440,8 @@ void u_vbuf_unset_vertex_elements(struct u_vbuf *mgr)
void u_vbuf_destroy(struct u_vbuf *mgr) void u_vbuf_destroy(struct u_vbuf *mgr)
{ {
unsigned i;
mgr->pipe->set_vertex_buffers(mgr->pipe, 0, NULL); mgr->pipe->set_vertex_buffers(mgr->pipe, 0, NULL);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
pipe_vertex_buffer_unreference(&mgr->vertex_buffer[i]);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[i]);
if (mgr->pc) if (mgr->pc)
util_primconvert_destroy(mgr->pc); util_primconvert_destroy(mgr->pc);
@ -463,7 +456,8 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
const struct pipe_draw_start_count_bias *draw, const struct pipe_draw_start_count_bias *draw,
unsigned vb_mask, unsigned out_vb, unsigned vb_mask, unsigned out_vb,
int start_vertex, unsigned num_vertices, int start_vertex, unsigned num_vertices,
int min_index, bool unroll_indices) int min_index, bool unroll_indices,
struct pipe_resource **releasebuf)
{ {
struct translate *tr; struct translate *tr;
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}; struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0};
@ -550,7 +544,7 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
/* Create and map the output buffer. */ /* Create and map the output buffer. */
u_upload_alloc(mgr->pipe->stream_uploader, 0, u_upload_alloc(mgr->pipe->stream_uploader, 0,
key->output_stride * draw->count, 4, key->output_stride * draw->count, 4,
&out_offset, &out_buffer, &out_offset, &out_buffer, releasebuf,
(void**)&out_map); (void**)&out_map);
if (!out_buffer) if (!out_buffer)
return PIPE_ERROR_OUT_OF_MEMORY; return PIPE_ERROR_OUT_OF_MEMORY;
@ -584,7 +578,7 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
mgr->has_signed_vb_offset ? mgr->has_signed_vb_offset ?
0 : key->output_stride * start_vertex, 0 : key->output_stride * start_vertex,
key->output_stride * num_vertices, 4, key->output_stride * num_vertices, 4,
&out_offset, &out_buffer, &out_offset, &out_buffer, releasebuf,
(void**)&out_map); (void**)&out_map);
if (!out_buffer) if (!out_buffer)
return PIPE_ERROR_OUT_OF_MEMORY; return PIPE_ERROR_OUT_OF_MEMORY;
@ -608,7 +602,6 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
mgr->real_vertex_buffer[out_vb].buffer_offset = out_offset; mgr->real_vertex_buffer[out_vb].buffer_offset = out_offset;
/* Move the buffer reference. */ /* Move the buffer reference. */
pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[out_vb]);
mgr->real_vertex_buffer[out_vb].buffer.resource = out_buffer; mgr->real_vertex_buffer[out_vb].buffer.resource = out_buffer;
mgr->real_vertex_buffer[out_vb].is_user_buffer = false; mgr->real_vertex_buffer[out_vb].is_user_buffer = false;
@ -627,6 +620,7 @@ u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr,
~mgr->enabled_vb_mask; ~mgr->enabled_vb_mask;
uint32_t unused_vb_mask_orig; uint32_t unused_vb_mask_orig;
bool insufficient_buffers = false; bool insufficient_buffers = false;
uint32_t prev_mask = mgr->fallback_vbs_mask;
/* No vertex buffers available at all */ /* No vertex buffers available at all */
if (!unused_vb_mask) if (!unused_vb_mask)
@ -648,6 +642,9 @@ u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr,
index = ffs(unused_vb_mask) - 1; index = ffs(unused_vb_mask) - 1;
fallback_vbs[type] = index; fallback_vbs[type] = index;
if (prev_mask & BITFIELD_BIT(index)) {
memset(&mgr->real_vertex_buffer[index], 0, sizeof(mgr->real_vertex_buffer[index]));
}
mgr->fallback_vbs_mask |= 1 << index; mgr->fallback_vbs_mask |= 1 << index;
unused_vb_mask &= ~(1 << index); unused_vb_mask &= ~(1 << index);
/*printf("found slot=%i for type=%i\n", index, type);*/ /*printf("found slot=%i for type=%i\n", index, type);*/
@ -660,6 +657,9 @@ u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr,
uint32_t index = ffs(unused_vb_mask_orig) - 1; uint32_t index = ffs(unused_vb_mask_orig) - 1;
/* When sharing one vertex buffer use per-vertex frequency for everything. */ /* When sharing one vertex buffer use per-vertex frequency for everything. */
fallback_vbs[VB_VERTEX] = index; fallback_vbs[VB_VERTEX] = index;
if (prev_mask & BITFIELD_BIT(index)) {
memset(&mgr->real_vertex_buffer[index], 0, sizeof(mgr->real_vertex_buffer[index]));
}
mgr->fallback_vbs_mask = 1 << index; mgr->fallback_vbs_mask = 1 << index;
mask[VB_VERTEX] = mask[VB_VERTEX] | mask[VB_CONST] | mask[VB_INSTANCE]; mask[VB_VERTEX] = mask[VB_VERTEX] | mask[VB_CONST] | mask[VB_INSTANCE];
mask[VB_CONST] = 0; mask[VB_CONST] = 0;
@ -684,7 +684,7 @@ u_vbuf_translate_begin(struct u_vbuf *mgr,
const struct pipe_draw_start_count_bias *draw, const struct pipe_draw_start_count_bias *draw,
int start_vertex, unsigned num_vertices, int start_vertex, unsigned num_vertices,
int min_index, bool unroll_indices, int min_index, bool unroll_indices,
uint32_t misaligned) uint32_t misaligned, struct pipe_resource **releasebuf)
{ {
unsigned mask[VB_NUM] = {0}; unsigned mask[VB_NUM] = {0};
struct translate_key key[VB_NUM]; struct translate_key key[VB_NUM];
@ -803,7 +803,7 @@ u_vbuf_translate_begin(struct u_vbuf *mgr,
err = u_vbuf_translate_buffers(mgr, &key[type], info, draw, err = u_vbuf_translate_buffers(mgr, &key[type], info, draw,
mask[type], mgr->fallback_vbs[type], mask[type], mgr->fallback_vbs[type],
start[type], num[type], min_index, start[type], num[type], min_index,
unroll_indices && type == VB_VERTEX); unroll_indices && type == VB_VERTEX, releasebuf);
if (err != PIPE_OK) if (err != PIPE_OK)
return false; return false;
} }
@ -853,11 +853,11 @@ static void u_vbuf_translate_end(struct u_vbuf *mgr)
mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->ve->driver_cso); mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->ve->driver_cso);
mgr->using_translate = false; mgr->using_translate = false;
/* Unreference the now-unused VBOs. */ /* Release the now-unused VBOs. */
for (i = 0; i < VB_NUM; i++) { for (i = 0; i < VB_NUM; i++) {
unsigned vb = mgr->fallback_vbs[i]; unsigned vb = mgr->fallback_vbs[i];
if (vb != ~0u) { if (vb != ~0u) {
pipe_resource_reference(&mgr->real_vertex_buffer[vb].buffer.resource, NULL); memset(&mgr->real_vertex_buffer[vb], 0, sizeof(mgr->real_vertex_buffer[vb]));
mgr->fallback_vbs[i] = ~0; mgr->fallback_vbs[i] = ~0;
} }
} }
@ -991,12 +991,10 @@ static void u_vbuf_delete_vertex_elements(void *ctx, void *state,
void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
unsigned count, unsigned count,
bool take_ownership,
const struct pipe_vertex_buffer *bufs) const struct pipe_vertex_buffer *bufs)
{ {
if (!count) { if (!count) {
struct pipe_context *pipe = mgr->pipe; struct pipe_context *pipe = mgr->pipe;
unsigned last_count = mgr->num_vertex_buffers;
/* Unbind. */ /* Unbind. */
mgr->num_vertex_buffers = 0; mgr->num_vertex_buffers = 0;
@ -1008,11 +1006,6 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
mgr->unaligned_vb_mask[1] = 0; mgr->unaligned_vb_mask[1] = 0;
mgr->vertex_buffers_dirty = false; mgr->vertex_buffers_dirty = false;
for (unsigned i = 0; i < last_count; i++) {
pipe_vertex_buffer_unreference(&mgr->vertex_buffer[i]);
pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[i]);
}
pipe->set_vertex_buffers(pipe, 0, NULL); pipe->set_vertex_buffers(pipe, 0, NULL);
return; return;
} }
@ -1036,8 +1029,8 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[i]; struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[i];
if (!vb->buffer.resource) { if (!vb->buffer.resource) {
pipe_vertex_buffer_unreference(orig_vb); memset(orig_vb, 0, sizeof(*orig_vb));
pipe_vertex_buffer_unreference(real_vb); memset(real_vb, 0, sizeof(*real_vb));
continue; continue;
} }
@ -1047,19 +1040,14 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
orig_vb->buffer.resource == vb->buffer.resource) orig_vb->buffer.resource == vb->buffer.resource)
num_identical++; num_identical++;
if (take_ownership) { *orig_vb = *vb;
pipe_vertex_buffer_unreference(orig_vb);
memcpy(orig_vb, vb, sizeof(*vb));
} else {
pipe_vertex_buffer_reference(orig_vb, vb);
}
enabled_vb_mask |= 1 << i; enabled_vb_mask |= 1 << i;
if ((!mgr->caps.attrib_4byte_unaligned && vb->buffer_offset % 4 != 0)) { if ((!mgr->caps.attrib_4byte_unaligned && vb->buffer_offset % 4 != 0)) {
incompatible_vb_mask |= 1 << i; incompatible_vb_mask |= 1 << i;
real_vb->buffer_offset = vb->buffer_offset; real_vb->buffer_offset = vb->buffer_offset;
pipe_vertex_buffer_unreference(real_vb); memset(real_vb, 0, sizeof(*real_vb));
real_vb->is_user_buffer = false; real_vb->is_user_buffer = false;
continue; continue;
} }
@ -1074,12 +1062,12 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
if (!mgr->caps.user_vertex_buffers && vb->is_user_buffer) { if (!mgr->caps.user_vertex_buffers && vb->is_user_buffer) {
user_vb_mask |= 1 << i; user_vb_mask |= 1 << i;
real_vb->buffer_offset = vb->buffer_offset; real_vb->buffer_offset = vb->buffer_offset;
pipe_vertex_buffer_unreference(real_vb); memset(real_vb, 0, sizeof(*real_vb));
real_vb->is_user_buffer = false; real_vb->is_user_buffer = false;
continue; continue;
} }
pipe_vertex_buffer_reference(real_vb, vb); *real_vb = *vb;
} }
unsigned last_count = mgr->num_vertex_buffers; unsigned last_count = mgr->num_vertex_buffers;
@ -1088,8 +1076,8 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
return; return;
for (; i < last_count; i++) { for (; i < last_count; i++) {
pipe_vertex_buffer_unreference(&mgr->vertex_buffer[i]); memset(&mgr->vertex_buffer[i], 0, sizeof(struct pipe_vertex_buffer));
pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[i]); memset(&mgr->real_vertex_buffer[i], 0, sizeof(struct pipe_vertex_buffer));
} }
mgr->num_vertex_buffers = count; mgr->num_vertex_buffers = count;
@ -1148,13 +1136,15 @@ get_upload_offset_size(struct u_vbuf *mgr,
static enum pipe_error static enum pipe_error
u_vbuf_upload_buffers(struct u_vbuf *mgr, u_vbuf_upload_buffers(struct u_vbuf *mgr,
int start_vertex, unsigned num_vertices, int start_vertex, unsigned num_vertices,
int start_instance, unsigned num_instances) int start_instance, unsigned num_instances,
unsigned *release_count, struct pipe_resource **releasebufs)
{ {
unsigned i; unsigned i;
struct u_vbuf_elements *ve = mgr->ve; struct u_vbuf_elements *ve = mgr->ve;
unsigned nr_velems = ve->count; unsigned nr_velems = ve->count;
const struct pipe_vertex_element *velems = const struct pipe_vertex_element *velems =
mgr->using_translate ? mgr->fallback_velems.velems : ve->ve; mgr->using_translate ? mgr->fallback_velems.velems : ve->ve;
unsigned rcount = 0;
/* Faster path when no vertex attribs are interleaved. */ /* Faster path when no vertex attribs are interleaved. */
if ((ve->interleaved_vb_mask & mgr->user_vb_mask) == 0) { if ((ve->interleaved_vb_mask & mgr->user_vb_mask) == 0) {
@ -1175,12 +1165,16 @@ u_vbuf_upload_buffers(struct u_vbuf *mgr,
u_upload_data(mgr->pipe->stream_uploader, u_upload_data(mgr->pipe->stream_uploader,
mgr->has_signed_vb_offset ? 0 : offset, mgr->has_signed_vb_offset ? 0 : offset,
size, 4, ptr + offset, &real_vb->buffer_offset, size, 4, ptr + offset, &real_vb->buffer_offset,
&real_vb->buffer.resource); &real_vb->buffer.resource, &releasebufs[rcount]);
if (!real_vb->buffer.resource) if (!real_vb->buffer.resource)
return PIPE_ERROR_OUT_OF_MEMORY; return PIPE_ERROR_OUT_OF_MEMORY;
if (releasebufs[rcount])
rcount++;
real_vb->buffer_offset -= offset; real_vb->buffer_offset -= offset;
} }
*release_count = rcount;
return PIPE_OK; return PIPE_OK;
} }
@ -1235,13 +1229,18 @@ u_vbuf_upload_buffers(struct u_vbuf *mgr,
u_upload_data(mgr->pipe->stream_uploader, u_upload_data(mgr->pipe->stream_uploader,
mgr->has_signed_vb_offset ? 0 : start, mgr->has_signed_vb_offset ? 0 : start,
end - start, 4, end - start, 4,
ptr + start, &real_vb->buffer_offset, &real_vb->buffer.resource); ptr + start, &real_vb->buffer_offset, &real_vb->buffer.resource, &releasebufs[rcount]);
if (!real_vb->buffer.resource) if (!real_vb->buffer.resource)
return PIPE_ERROR_OUT_OF_MEMORY; return PIPE_ERROR_OUT_OF_MEMORY;
if (releasebufs[rcount])
rcount++;
real_vb->buffer_offset -= start; real_vb->buffer_offset -= start;
} }
*release_count = rcount;
return PIPE_OK; return PIPE_OK;
} }
@ -1393,10 +1392,6 @@ static void u_vbuf_set_driver_vertex_buffers(struct u_vbuf *mgr)
assert(mgr->vertex_buffers_dirty); assert(mgr->vertex_buffers_dirty);
if (mgr->user_vb_mask == BITFIELD_MASK(count)) { if (mgr->user_vb_mask == BITFIELD_MASK(count)) {
/* Fast path that allows us to transfer the VBO references to the driver
* to skip atomic reference counting there. These are freshly uploaded
* user buffers that can be discarded after this call.
*/
pipe->set_vertex_buffers(pipe, count, mgr->real_vertex_buffer); pipe->set_vertex_buffers(pipe, count, mgr->real_vertex_buffer);
/* We don't own the VBO references now. Set them to NULL. */ /* We don't own the VBO references now. Set them to NULL. */
@ -1405,8 +1400,7 @@ static void u_vbuf_set_driver_vertex_buffers(struct u_vbuf *mgr)
mgr->real_vertex_buffer[i].buffer.resource = NULL; mgr->real_vertex_buffer[i].buffer.resource = NULL;
} }
} else { } else {
/* Slow path where we have to keep VBO references. */ pipe->set_vertex_buffers(pipe, count, mgr->real_vertex_buffer);
util_set_vertex_buffers(pipe, count, false, mgr->real_vertex_buffer);
} }
mgr->vertex_buffers_dirty = false; mgr->vertex_buffers_dirty = false;
} }
@ -1417,12 +1411,6 @@ u_vbuf_split_indexed_multidraw(struct u_vbuf *mgr, struct pipe_draw_info *info,
unsigned *indirect_data, unsigned stride, unsigned *indirect_data, unsigned stride,
unsigned draw_count) unsigned draw_count)
{ {
/* Increase refcount to be able to use take_index_buffer_ownership with
* all draws.
*/
if (draw_count > 1 && info->take_index_buffer_ownership)
p_atomic_add(&info->index.resource->reference.count, draw_count - 1);
assert(info->index_size); assert(info->index_size);
for (unsigned i = 0; i < draw_count; i++) { for (unsigned i = 0; i < draw_count; i++) {
@ -1453,6 +1441,9 @@ void u_vbuf_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *inf
const uint32_t used_vb_mask = mgr->ve->used_vb_mask; const uint32_t used_vb_mask = mgr->ve->used_vb_mask;
uint32_t user_vb_mask = mgr->user_vb_mask & used_vb_mask; uint32_t user_vb_mask = mgr->user_vb_mask & used_vb_mask;
unsigned fixed_restart_index = info->index_size ? util_prim_restart_index_from_size(info->index_size) : 0; unsigned fixed_restart_index = info->index_size ? util_prim_restart_index_from_size(info->index_size) : 0;
struct pipe_resource *releasebuf = NULL;
struct pipe_resource *releasebufs[PIPE_MAX_ATTRIBS];
unsigned release_count = 0;
uint32_t misaligned = 0; uint32_t misaligned = 0;
if (!mgr->caps.attrib_element_unaligned) { if (!mgr->caps.attrib_element_unaligned) {
@ -1483,12 +1474,6 @@ void u_vbuf_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *inf
return; return;
} }
/* Increase refcount to be able to use take_index_buffer_ownership with
* all draws.
*/
if (num_draws > 1 && info->take_index_buffer_ownership)
p_atomic_add(&info->index.resource->reference.count, num_draws - 1);
for (unsigned d = 0; d < num_draws; d++) { for (unsigned d = 0; d < num_draws; d++) {
struct pipe_draw_info new_info = *info; struct pipe_draw_info new_info = *info;
struct pipe_draw_start_count_bias new_draw = draws[d]; struct pipe_draw_start_count_bias new_draw = draws[d];
@ -1510,13 +1495,13 @@ void u_vbuf_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *inf
} }
if (!draw_count) if (!draw_count)
goto cleanup; return;
unsigned data_size = (draw_count - 1) * indirect->stride + unsigned data_size = (draw_count - 1) * indirect->stride +
(new_info.index_size ? 20 : 16); (new_info.index_size ? 20 : 16);
unsigned *data = malloc(data_size); unsigned *data = malloc(data_size);
if (!data) if (!data)
goto cleanup; /* report an error? */ return; /* report an error? */
/* Read the used buffer range only once, because the read can be /* Read the used buffer range only once, because the read can be
* uncached. * uncached.
@ -1616,7 +1601,7 @@ void u_vbuf_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *inf
new_info.instance_count = end_instance - new_info.start_instance; new_info.instance_count = end_instance - new_info.start_instance;
if (new_info.start_instance == ~0u || !new_info.instance_count) if (new_info.start_instance == ~0u || !new_info.instance_count)
goto cleanup; return;
} else { } else {
/* Non-indexed multidraw. /* Non-indexed multidraw.
* *
@ -1653,11 +1638,11 @@ void u_vbuf_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *inf
new_info.instance_count = end_instance - new_info.start_instance; new_info.instance_count = end_instance - new_info.start_instance;
if (new_draw.start == ~0u || !new_draw.count || !new_info.instance_count) if (new_draw.start == ~0u || !new_draw.count || !new_info.instance_count)
goto cleanup; return;
} }
} else { } else {
if ((!indirect && !new_draw.count) || !new_info.instance_count) if ((!indirect && !new_draw.count) || !new_info.instance_count)
goto cleanup; return;
} }
if (new_info.index_size) { if (new_info.index_size) {
@ -1708,14 +1693,12 @@ void u_vbuf_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *inf
mgr->ve->incompatible_elem_mask) { mgr->ve->incompatible_elem_mask) {
if (!u_vbuf_translate_begin(mgr, &new_info, &new_draw, if (!u_vbuf_translate_begin(mgr, &new_info, &new_draw,
start_vertex, num_vertices, start_vertex, num_vertices,
min_index, unroll_indices, misaligned)) { min_index, unroll_indices, misaligned, &releasebuf)) {
debug_warn_once("u_vbuf_translate_begin() failed"); debug_warn_once("u_vbuf_translate_begin() failed");
goto cleanup; goto out;
} }
if (unroll_indices) { if (unroll_indices) {
if (!new_info.has_user_indices && info->take_index_buffer_ownership)
pipe_drop_resource_references(new_info.index.resource, 1);
new_info.index_size = 0; new_info.index_size = 0;
new_draw.index_bias = 0; new_draw.index_bias = 0;
new_info.index_bounds_valid = true; new_info.index_bounds_valid = true;
@ -1733,9 +1716,10 @@ void u_vbuf_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *inf
if (user_vb_mask) { if (user_vb_mask) {
if (u_vbuf_upload_buffers(mgr, start_vertex, num_vertices, if (u_vbuf_upload_buffers(mgr, start_vertex, num_vertices,
new_info.start_instance, new_info.start_instance,
new_info.instance_count) != PIPE_OK) { new_info.instance_count,
&release_count, releasebufs) != PIPE_OK) {
debug_warn_once("u_vbuf_upload_buffers() failed"); debug_warn_once("u_vbuf_upload_buffers() failed");
goto cleanup; goto out;
} }
mgr->vertex_buffers_dirty = true; mgr->vertex_buffers_dirty = true;
@ -1782,13 +1766,11 @@ void u_vbuf_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *inf
if (mgr->using_translate) { if (mgr->using_translate) {
u_vbuf_translate_end(mgr); u_vbuf_translate_end(mgr);
} }
return;
cleanup: out:
if (info->take_index_buffer_ownership) { pipe_resource_release(pipe, releasebuf);
struct pipe_resource *indexbuf = info->index.resource; for (unsigned i = 0; i < release_count; i++)
pipe_resource_reference(&indexbuf, NULL); pipe_resource_release(pipe, releasebufs[i]);
}
} }
void u_vbuf_save_vertex_elements(struct u_vbuf *mgr) void u_vbuf_save_vertex_elements(struct u_vbuf *mgr)

View file

@ -82,7 +82,6 @@ void u_vbuf_set_vertex_elements(struct u_vbuf *mgr,
void u_vbuf_unset_vertex_elements(struct u_vbuf *mgr); void u_vbuf_unset_vertex_elements(struct u_vbuf *mgr);
void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr, void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
unsigned count, unsigned count,
bool take_ownership,
const struct pipe_vertex_buffer *bufs); const struct pipe_vertex_buffer *bufs);
void u_vbuf_draw_vbo(struct pipe_context *pipe, void u_vbuf_draw_vbo(struct pipe_context *pipe,
const struct pipe_draw_info *info, const struct pipe_draw_info *info,

View file

@ -428,9 +428,10 @@ vl_bicubic_filter_render(struct vl_bicubic_filter *filter,
struct pipe_constant_buffer cb = {0}; struct pipe_constant_buffer cb = {0};
float *ptr = NULL; float *ptr = NULL;
struct pipe_resource *releasebuf = NULL;
u_upload_alloc(filter->pipe->const_uploader, 0, 2 * sizeof(float), 256, u_upload_alloc(filter->pipe->const_uploader, 0, 2 * sizeof(float), 256,
&cb.buffer_offset, &cb.buffer, (void**)&ptr); &cb.buffer_offset, &cb.buffer, &releasebuf, (void**)&ptr);
cb.buffer_size = 2 * sizeof(float); cb.buffer_size = 2 * sizeof(float);
if (ptr) { if (ptr) {
@ -450,7 +451,7 @@ vl_bicubic_filter_render(struct vl_bicubic_filter *filter,
0, 0, pipe_surface_width(dst), 0, 0, pipe_surface_width(dst),
pipe_surface_height(dst), false); pipe_surface_height(dst), false);
filter->pipe->set_constant_buffer(filter->pipe, MESA_SHADER_FRAGMENT, filter->pipe->set_constant_buffer(filter->pipe, MESA_SHADER_FRAGMENT,
0, false, &cb); 0, &cb);
filter->pipe->bind_rasterizer_state(filter->pipe, filter->rs_state); filter->pipe->bind_rasterizer_state(filter->pipe, filter->rs_state);
filter->pipe->bind_blend_state(filter->pipe, filter->blend); filter->pipe->bind_blend_state(filter->pipe, filter->blend);
filter->pipe->bind_sampler_states(filter->pipe, MESA_SHADER_FRAGMENT, filter->pipe->bind_sampler_states(filter->pipe, MESA_SHADER_FRAGMENT,
@ -462,7 +463,8 @@ vl_bicubic_filter_render(struct vl_bicubic_filter *filter,
filter->pipe->set_framebuffer_state(filter->pipe, &fb_state); filter->pipe->set_framebuffer_state(filter->pipe, &fb_state);
filter->pipe->set_viewport_states(filter->pipe, 0, 1, &viewport); filter->pipe->set_viewport_states(filter->pipe, 0, 1, &viewport);
filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves); filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves);
util_set_vertex_buffers(filter->pipe, 1, false, &filter->quad); filter->pipe->set_vertex_buffers(filter->pipe, 1, &filter->quad);
util_draw_arrays(filter->pipe, MESA_PRIM_QUADS, 0, 4); util_draw_arrays(filter->pipe, MESA_PRIM_QUADS, 0, 4);
pipe_resource_release(filter->pipe, releasebuf);
} }

View file

@ -286,7 +286,7 @@ cleanup_buffers(struct vl_compositor *c)
if (c->pipe_gfx_supported) { if (c->pipe_gfx_supported) {
c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state); c->pipe->delete_vertex_elements_state(c->pipe, c->vertex_elems_state);
} }
pipe_resource_reference(&c->vertex_buf.buffer.resource, NULL); c->vertex_buf.buffer.resource = NULL;
} }
static inline struct u_rect static inline struct u_rect
@ -909,5 +909,5 @@ vl_compositor_cleanup_state(struct vl_compositor_state *s)
assert(s); assert(s);
vl_compositor_clear_layers(s); vl_compositor_clear_layers(s);
pipe_resource_reference(&s->shader_params, NULL); pipe_resource_release(s->pipe, s->shader_params);
} }

View file

@ -863,7 +863,7 @@ draw_layers(struct vl_compositor *c,
/* Unbind. */ /* Unbind. */
c->pipe->set_shader_images(c->pipe, MESA_SHADER_COMPUTE, 0, 0, 1, NULL); c->pipe->set_shader_images(c->pipe, MESA_SHADER_COMPUTE, 0, 0, 1, NULL);
c->pipe->set_constant_buffer(c->pipe, MESA_SHADER_COMPUTE, 0, false, NULL); c->pipe->set_constant_buffer(c->pipe, MESA_SHADER_COMPUTE, 0, NULL);
c->pipe->set_sampler_views(c->pipe, MESA_SHADER_COMPUTE, 0, 0, c->pipe->set_sampler_views(c->pipe, MESA_SHADER_COMPUTE, 0, 0,
num_sampler_views, NULL); num_sampler_views, NULL);
c->pipe->bind_compute_state(c->pipe, NULL); c->pipe->bind_compute_state(c->pipe, NULL);

View file

@ -599,7 +599,7 @@ calc_drawn_area(struct vl_compositor_state *s, struct vl_compositor_layer *layer
} }
static void static void
gen_vertex_data(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty) gen_vertex_data(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty, struct pipe_resource **releasebuf)
{ {
struct vertex2f *vb; struct vertex2f *vb;
unsigned i; unsigned i;
@ -611,6 +611,7 @@ gen_vertex_data(struct vl_compositor *c, struct vl_compositor_state *s, struct u
VL_COMPOSITOR_VB_STRIDE * VL_COMPOSITOR_MAX_LAYERS * 4, /* size */ VL_COMPOSITOR_VB_STRIDE * VL_COMPOSITOR_MAX_LAYERS * 4, /* size */
4, /* alignment */ 4, /* alignment */
&c->vertex_buf.buffer_offset, &c->vertex_buf.buffer.resource, &c->vertex_buf.buffer_offset, &c->vertex_buf.buffer.resource,
releasebuf,
(void **)&vb); (void **)&vb);
for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) { for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) {
@ -724,7 +725,8 @@ vl_compositor_gfx_render(struct vl_compositor_state *s,
} }
c->pipe->set_scissor_states(c->pipe, 0, 1, &s->scissor); c->pipe->set_scissor_states(c->pipe, 0, 1, &s->scissor);
gen_vertex_data(c, s, dirty_area); struct pipe_resource *releasebuf = NULL;
gen_vertex_data(c, s, dirty_area, &releasebuf);
set_csc_matrix(s); set_csc_matrix(s);
if (clear_dirty && dirty_area && if (clear_dirty && dirty_area &&
@ -739,9 +741,10 @@ vl_compositor_gfx_render(struct vl_compositor_state *s,
c->pipe->set_framebuffer_state(c->pipe, &c->fb_state); c->pipe->set_framebuffer_state(c->pipe, &c->fb_state);
c->pipe->bind_vs_state(c->pipe, c->vs); c->pipe->bind_vs_state(c->pipe, c->vs);
c->pipe->bind_vertex_elements_state(c->pipe, c->vertex_elems_state); c->pipe->bind_vertex_elements_state(c->pipe, c->vertex_elems_state);
util_set_vertex_buffers(c->pipe, 1, false, &c->vertex_buf); c->pipe->set_vertex_buffers(c->pipe, 1, &c->vertex_buf);
pipe_set_constant_buffer(c->pipe, MESA_SHADER_FRAGMENT, 0, s->shader_params); pipe_set_constant_buffer(c->pipe, MESA_SHADER_FRAGMENT, 0, s->shader_params);
c->pipe->bind_rasterizer_state(c->pipe, c->rast); c->pipe->bind_rasterizer_state(c->pipe, c->rast);
draw_layers(c, s, dirty_area); draw_layers(c, s, dirty_area);
pipe_resource_release(c->pipe, releasebuf);
} }

View file

@ -494,7 +494,7 @@ vl_deint_filter_render(struct vl_deint_filter *filter,
/* set up pipe state */ /* set up pipe state */
filter->pipe->bind_rasterizer_state(filter->pipe, filter->rs_state); filter->pipe->bind_rasterizer_state(filter->pipe, filter->rs_state);
filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves); filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves);
util_set_vertex_buffers(filter->pipe, 1, false, &filter->quad); filter->pipe->set_vertex_buffers(filter->pipe, 1, &filter->quad);
filter->pipe->bind_vs_state(filter->pipe, filter->vs); filter->pipe->bind_vs_state(filter->pipe, filter->vs);
filter->pipe->bind_sampler_states(filter->pipe, MESA_SHADER_FRAGMENT, filter->pipe->bind_sampler_states(filter->pipe, MESA_SHADER_FRAGMENT,
0, 4, filter->sampler); 0, 4, filter->sampler);

View file

@ -308,7 +308,7 @@ vl_matrix_filter_render(struct vl_matrix_filter *filter,
filter->pipe->set_framebuffer_state(filter->pipe, &fb_state); filter->pipe->set_framebuffer_state(filter->pipe, &fb_state);
filter->pipe->set_viewport_states(filter->pipe, 0, 1, &viewport); filter->pipe->set_viewport_states(filter->pipe, 0, 1, &viewport);
filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves); filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves);
util_set_vertex_buffers(filter->pipe, 1, false, &filter->quad); filter->pipe->set_vertex_buffers(filter->pipe, 1, &filter->quad);
util_draw_arrays(filter->pipe, MESA_PRIM_QUADS, 0, 4); util_draw_arrays(filter->pipe, MESA_PRIM_QUADS, 0, 4);
} }

View file

@ -426,7 +426,7 @@ vl_median_filter_render(struct vl_median_filter *filter,
filter->pipe->set_framebuffer_state(filter->pipe, &fb_state); filter->pipe->set_framebuffer_state(filter->pipe, &fb_state);
filter->pipe->set_viewport_states(filter->pipe, 0, 1, &viewport); filter->pipe->set_viewport_states(filter->pipe, 0, 1, &viewport);
filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves); filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves);
util_set_vertex_buffers(filter->pipe, 1, false, &filter->quad); filter->pipe->set_vertex_buffers(filter->pipe, 1, &filter->quad);
util_draw_arrays(filter->pipe, MESA_PRIM_QUADS, 0, 4); util_draw_arrays(filter->pipe, MESA_PRIM_QUADS, 0, 4);
} }

View file

@ -786,7 +786,7 @@ vl_mpeg12_end_frame(struct pipe_video_codec *decoder,
if (!ref_frames[j] || !ref_frames[j][i]) continue; if (!ref_frames[j] || !ref_frames[j][i]) continue;
vb[2] = vl_vb_get_mv(&buf->vertex_stream, j); vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);
util_set_vertex_buffers(dec->context, 3, false, vb); dec->context->set_vertex_buffers(dec->context, 3, vb);
vl_mc_render_ref(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], ref_frames[j][i]); vl_mc_render_ref(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], ref_frames[j][i]);
} }
@ -797,7 +797,7 @@ vl_mpeg12_end_frame(struct pipe_video_codec *decoder,
if (!buf->num_ycbcr_blocks[i]) continue; if (!buf->num_ycbcr_blocks[i]) continue;
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i); vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
util_set_vertex_buffers(dec->context, 2, false, vb); dec->context->set_vertex_buffers(dec->context, 2, vb);
vl_zscan_render(i ? &dec->zscan_c : & dec->zscan_y, &buf->zscan[i] , buf->num_ycbcr_blocks[i]); vl_zscan_render(i ? &dec->zscan_c : & dec->zscan_y, &buf->zscan[i] , buf->num_ycbcr_blocks[i]);
@ -816,7 +816,7 @@ vl_mpeg12_end_frame(struct pipe_video_codec *decoder,
if (!buf->num_ycbcr_blocks[plane]) continue; if (!buf->num_ycbcr_blocks[plane]) continue;
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, plane); vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, plane);
util_set_vertex_buffers(dec->context, 2, false, vb); dec->context->set_vertex_buffers(dec->context, 2, vb);
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT) if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
vl_idct_prepare_stage2(i ? &dec->idct_c : &dec->idct_y, &buf->idct[plane]); vl_idct_prepare_stage2(i ? &dec->idct_c : &dec->idct_y, &buf->idct[plane]);

View file

@ -284,7 +284,7 @@ asahi_compute_restore(struct agx_context *ctx)
} }
/* take_ownership=true so do not unreference */ /* take_ownership=true so do not unreference */
pctx->set_constant_buffer(pctx, MESA_SHADER_COMPUTE, 0, true, pctx->set_constant_buffer(pctx, MESA_SHADER_COMPUTE, 0,
&blitter->saved_cb); &blitter->saved_cb);
blitter->saved_cb.buffer = NULL; blitter->saved_cb.buffer = NULL;
@ -366,7 +366,7 @@ asahi_compute_blit(struct pipe_context *ctx, const struct pipe_blit_info *info,
.buffer_size = sizeof(data), .buffer_size = sizeof(data),
.user_buffer = data, .user_buffer = data,
}; };
ctx->set_constant_buffer(ctx, MESA_SHADER_COMPUTE, 0, false, &cb); ctx->set_constant_buffer(ctx, MESA_SHADER_COMPUTE, 0, &cb);
struct pipe_image_view image = { struct pipe_image_view image = {
.resource = dst, .resource = dst,
@ -448,7 +448,7 @@ asahi_compute_blit(struct pipe_context *ctx, const struct pipe_blit_info *info,
}; };
ctx->launch_grid(ctx, &grid_info); ctx->launch_grid(ctx, &grid_info);
ctx->set_shader_images(ctx, MESA_SHADER_COMPUTE, 0, 0, 1, NULL); ctx->set_shader_images(ctx, MESA_SHADER_COMPUTE, 0, 0, 1, NULL);
ctx->set_constant_buffer(ctx, MESA_SHADER_COMPUTE, 0, false, NULL); ctx->set_constant_buffer(ctx, MESA_SHADER_COMPUTE, 0, NULL);
ctx->set_sampler_views(ctx, MESA_SHADER_COMPUTE, 0, 0, 1, NULL); ctx->set_sampler_views(ctx, MESA_SHADER_COMPUTE, 0, 0, 1, NULL);
asahi_compute_restore(agx_context(ctx)); asahi_compute_restore(agx_context(ctx));

View file

@ -1300,18 +1300,18 @@ agx_batch_upload_pbe(struct agx_batch *batch, struct agx_pbe_packed *out,
static void static void
agx_set_constant_buffer(struct pipe_context *pctx, mesa_shader_stage shader, agx_set_constant_buffer(struct pipe_context *pctx, mesa_shader_stage shader,
uint index, bool take_ownership, uint index,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
struct agx_context *ctx = agx_context(pctx); struct agx_context *ctx = agx_context(pctx);
struct agx_stage *s = &ctx->stage[shader]; struct agx_stage *s = &ctx->stage[shader];
struct pipe_constant_buffer *constants = &s->cb[index]; struct pipe_constant_buffer *constants = &s->cb[index];
util_copy_constant_buffer(&s->cb[index], cb, take_ownership); util_copy_constant_buffer(&s->cb[index], cb);
/* Upload user buffer immediately */ /* Upload user buffer immediately */
if (constants->user_buffer && !constants->buffer) { if (constants->user_buffer && !constants->buffer) {
u_upload_data(ctx->base.const_uploader, 0, constants->buffer_size, 64, u_upload_data_ref(ctx->base.const_uploader, 0, constants->buffer_size, 64,
constants->user_buffer, &constants->buffer_offset, constants->user_buffer, &constants->buffer_offset,
&constants->buffer); &constants->buffer);
} }
@ -1341,7 +1341,7 @@ agx_set_vertex_buffers(struct pipe_context *pctx, unsigned count,
struct agx_context *ctx = agx_context(pctx); struct agx_context *ctx = agx_context(pctx);
util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers, util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers,
count, true); count);
ctx->dirty |= AGX_DIRTY_VERTEX; ctx->dirty |= AGX_DIRTY_VERTEX;
} }
@ -4209,7 +4209,7 @@ agx_draw_without_restart(struct agx_batch *batch,
uint32_t desc[5] = {draw->count, info->instance_count, 0, uint32_t desc[5] = {draw->count, info->instance_count, 0,
draw->index_bias, info->start_instance}; draw->index_bias, info->start_instance};
u_upload_data(ctx->base.const_uploader, 0, sizeof(desc), 4, &desc, u_upload_data_ref(ctx->base.const_uploader, 0, sizeof(desc), 4, &desc,
&indirect_synthesized.offset, &indirect_synthesized.buffer); &indirect_synthesized.offset, &indirect_synthesized.buffer);
indirect = &indirect_synthesized; indirect = &indirect_synthesized;
@ -4426,7 +4426,7 @@ util_draw_multi_upload_indirect(struct pipe_context *pctx,
const struct pipe_draw_start_count_bias *draws) const struct pipe_draw_start_count_bias *draws)
{ {
struct pipe_draw_indirect_info indirect_ = *indirect; struct pipe_draw_indirect_info indirect_ = *indirect;
u_upload_data(pctx->const_uploader, 0, 4, 4, &indirect->draw_count, u_upload_data_ref(pctx->const_uploader, 0, 4, 4, &indirect->draw_count,
&indirect_.indirect_draw_count_offset, &indirect_.indirect_draw_count_offset,
&indirect_.indirect_draw_count); &indirect_.indirect_draw_count);
@ -5589,6 +5589,7 @@ agx_init_state_functions(struct pipe_context *ctx)
ctx->set_viewport_states = agx_set_viewport_states; ctx->set_viewport_states = agx_set_viewport_states;
ctx->sampler_view_destroy = agx_sampler_view_destroy; ctx->sampler_view_destroy = agx_sampler_view_destroy;
ctx->sampler_view_release = u_default_sampler_view_release; ctx->sampler_view_release = u_default_sampler_view_release;
ctx->resource_release = u_default_resource_release;
ctx->draw_vbo = agx_draw_vbo; ctx->draw_vbo = agx_draw_vbo;
ctx->launch_grid = agx_launch_grid; ctx->launch_grid = agx_launch_grid;
ctx->set_global_binding = agx_set_global_binding; ctx->set_global_binding = agx_set_global_binding;

View file

@ -229,7 +229,7 @@ crocus_update_draw_parameters(struct crocus_context *ice,
ice->draw.params.baseinstance = info->start_instance; ice->draw.params.baseinstance = info->start_instance;
ice->draw.params_valid = true; ice->draw.params_valid = true;
u_upload_data(ice->ctx.stream_uploader, 0, u_upload_data_ref(ice->ctx.stream_uploader, 0,
sizeof(ice->draw.params), 4, &ice->draw.params, sizeof(ice->draw.params), 4, &ice->draw.params,
&draw_params->offset, &draw_params->res); &draw_params->offset, &draw_params->res);
} }
@ -247,7 +247,7 @@ crocus_update_draw_parameters(struct crocus_context *ice,
ice->draw.derived_params.drawid = drawid_offset; ice->draw.derived_params.drawid = drawid_offset;
ice->draw.derived_params.is_indexed_draw = is_indexed_draw; ice->draw.derived_params.is_indexed_draw = is_indexed_draw;
u_upload_data(ice->ctx.stream_uploader, 0, u_upload_data_ref(ice->ctx.stream_uploader, 0,
sizeof(ice->draw.derived_params), 4, sizeof(ice->draw.derived_params), 4,
&ice->draw.derived_params, &derived_params->offset, &ice->draw.derived_params, &derived_params->offset,
&derived_params->res); &derived_params->res);
@ -463,7 +463,7 @@ crocus_update_grid_size_resource(struct crocus_context *ice,
memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid)); memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
} else if (memcmp(ice->state.last_grid, grid->grid, sizeof(grid->grid)) != 0) { } else if (memcmp(ice->state.last_grid, grid->grid, sizeof(grid->grid)) != 0) {
memcpy(ice->state.last_grid, grid->grid, sizeof(grid->grid)); memcpy(ice->state.last_grid, grid->grid, sizeof(grid->grid));
u_upload_data(ice->ctx.const_uploader, 0, sizeof(grid->grid), 4, u_upload_data_ref(ice->ctx.const_uploader, 0, sizeof(grid->grid), 4,
grid->grid, &grid_ref->offset, &grid_ref->res); grid->grid, &grid_ref->offset, &grid_ref->res);
} }

View file

@ -5,7 +5,7 @@
static void static void
crocus_fine_fence_reset(struct crocus_batch *batch) crocus_fine_fence_reset(struct crocus_batch *batch)
{ {
u_upload_alloc(batch->fine_fences.uploader, u_upload_alloc_ref(batch->fine_fences.uploader,
0, sizeof(uint64_t), sizeof(uint64_t), 0, sizeof(uint64_t), sizeof(uint64_t),
&batch->fine_fences.ref.offset, &batch->fine_fences.ref.res, &batch->fine_fences.ref.offset, &batch->fine_fences.ref.res,
(void **)&batch->fine_fences.map); (void **)&batch->fine_fences.map);

View file

@ -539,7 +539,7 @@ crocus_begin_query(struct pipe_context *ctx, struct pipe_query *query)
else else
size = sizeof(struct crocus_query_snapshots); size = sizeof(struct crocus_query_snapshots);
u_upload_alloc(ice->query_buffer_uploader, 0, u_upload_alloc_ref(ice->query_buffer_uploader, 0,
size, util_next_power_of_two(size), &q->query_state_ref.offset, size, util_next_power_of_two(size), &q->query_state_ref.offset,
&q->query_state_ref.res, &ptr); &q->query_state_ref.res, &ptr);

View file

@ -970,7 +970,7 @@ gen4_upload_curbe(struct crocus_batch *batch)
goto emit; goto emit;
uint32_t *map; uint32_t *map;
u_upload_alloc(ice->ctx.const_uploader, 0, buf_sz, 64, u_upload_alloc_ref(ice->ctx.const_uploader, 0, buf_sz, 64,
&ice->curbe.curbe_offset, (struct pipe_resource **)&ice->curbe.curbe_res, (void **) &map); &ice->curbe.curbe_offset, (struct pipe_resource **)&ice->curbe.curbe_res, (void **) &map);
/* fragment shader constants */ /* fragment shader constants */
@ -3505,7 +3505,6 @@ crocus_set_framebuffer_state(struct pipe_context *ctx,
static void static void
crocus_set_constant_buffer(struct pipe_context *ctx, crocus_set_constant_buffer(struct pipe_context *ctx,
mesa_shader_stage p_stage, unsigned index, mesa_shader_stage p_stage, unsigned index,
bool take_ownership,
const struct pipe_constant_buffer *input) const struct pipe_constant_buffer *input)
{ {
struct crocus_context *ice = (struct crocus_context *) ctx; struct crocus_context *ice = (struct crocus_context *) ctx;
@ -3513,7 +3512,7 @@ crocus_set_constant_buffer(struct pipe_context *ctx,
struct crocus_shader_state *shs = &ice->state.shaders[stage]; struct crocus_shader_state *shs = &ice->state.shaders[stage];
struct pipe_constant_buffer *cbuf = &shs->constbufs[index]; struct pipe_constant_buffer *cbuf = &shs->constbufs[index];
util_copy_constant_buffer(&shs->constbufs[index], input, take_ownership); util_copy_constant_buffer(&shs->constbufs[index], input);
if (input && input->buffer_size && (input->buffer || input->user_buffer)) { if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
shs->bound_cbufs |= 1u << index; shs->bound_cbufs |= 1u << index;
@ -3521,12 +3520,12 @@ crocus_set_constant_buffer(struct pipe_context *ctx,
if (input->user_buffer) { if (input->user_buffer) {
void *map = NULL; void *map = NULL;
pipe_resource_reference(&cbuf->buffer, NULL); pipe_resource_reference(&cbuf->buffer, NULL);
u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64, u_upload_alloc_ref(ice->ctx.const_uploader, 0, input->buffer_size, 64,
&cbuf->buffer_offset, &cbuf->buffer, (void **) &map); &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
if (!cbuf->buffer) { if (!cbuf->buffer) {
/* Allocation was unsuccessful - just unbind */ /* Allocation was unsuccessful - just unbind */
crocus_set_constant_buffer(ctx, p_stage, index, false, NULL); crocus_set_constant_buffer(ctx, p_stage, index, NULL);
return; return;
} }
@ -3566,7 +3565,7 @@ upload_sysvals(struct crocus_context *ice,
uint32_t *map = NULL; uint32_t *map = NULL;
assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS); assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS);
u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64, u_upload_alloc_ref(ice->ctx.const_uploader, 0, upload_size, 64,
&cbuf->buffer_offset, &cbuf->buffer, (void **) &map); &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
for (int i = 0; i < shader->num_system_values; i++) { for (int i = 0; i < shader->num_system_values; i++) {
@ -3693,7 +3692,7 @@ crocus_set_vertex_buffers(struct pipe_context *ctx,
(GFX_VERx10 < 75 && screen->devinfo.platform != INTEL_PLATFORM_BYT) * 2; (GFX_VERx10 < 75 && screen->devinfo.platform != INTEL_PLATFORM_BYT) * 2;
util_set_vertex_buffers_mask(ice->state.vertex_buffers, &ice->state.bound_vertex_buffers, util_set_vertex_buffers_mask(ice->state.vertex_buffers, &ice->state.bound_vertex_buffers,
buffers, count, true); buffers, count);
for (unsigned i = 0; i < count; i++) { for (unsigned i = 0; i < count; i++) {
struct pipe_vertex_buffer *state = struct pipe_vertex_buffer *state =
@ -4056,7 +4055,7 @@ crocus_create_stream_output_target(struct pipe_context *ctx,
#if GFX_VER >= 7 #if GFX_VER >= 7
struct crocus_context *ice = (struct crocus_context *) ctx; struct crocus_context *ice = (struct crocus_context *) ctx;
void *temp; void *temp;
u_upload_alloc(ice->ctx.stream_uploader, 0, sizeof(uint32_t), 4, u_upload_alloc_ref(ice->ctx.stream_uploader, 0, sizeof(uint32_t), 4,
&cso->offset_offset, &cso->offset_offset,
(struct pipe_resource **)&cso->offset_res, (struct pipe_resource **)&cso->offset_res,
&temp); &temp);
@ -4105,7 +4104,7 @@ crocus_stream_store_prims_written(struct crocus_batch *batch,
struct crocus_stream_output_target *tgt) struct crocus_stream_output_target *tgt)
{ {
if (!tgt->offset_res) { if (!tgt->offset_res) {
u_upload_alloc(batch->ice->ctx.stream_uploader, 0, 4096, 4, u_upload_alloc_ref(batch->ice->ctx.stream_uploader, 0, 4096, 4,
&tgt->offset_offset, &tgt->offset_offset,
(struct pipe_resource **)&tgt->offset_res, (struct pipe_resource **)&tgt->offset_res,
&tgt->prim_map); &tgt->prim_map);
@ -7829,7 +7828,7 @@ crocus_upload_render_state(struct crocus_context *ice,
if (draw->has_user_indices) { if (draw->has_user_indices) {
unsigned start_offset = draw->index_size * sc->start; unsigned start_offset = draw->index_size * sc->start;
u_upload_data(ice->ctx.stream_uploader, 0, u_upload_data_ref(ice->ctx.stream_uploader, 0,
sc->count * draw->index_size, 4, sc->count * draw->index_size, 4,
(char *)draw->index.user + start_offset, (char *)draw->index.user + start_offset,
&offset, &ice->state.index_buffer.res); &offset, &ice->state.index_buffer.res);
@ -9298,6 +9297,7 @@ genX(crocus_init_state)(struct crocus_context *ice)
ctx->set_viewport_states = crocus_set_viewport_states; ctx->set_viewport_states = crocus_set_viewport_states;
ctx->sampler_view_destroy = crocus_sampler_view_destroy; ctx->sampler_view_destroy = crocus_sampler_view_destroy;
ctx->sampler_view_release = u_default_sampler_view_release; ctx->sampler_view_release = u_default_sampler_view_release;
ctx->resource_release = u_default_resource_release;
ctx->surface_destroy = crocus_surface_destroy; ctx->surface_destroy = crocus_surface_destroy;
ctx->draw_vbo = crocus_draw_vbo; ctx->draw_vbo = crocus_draw_vbo;
ctx->launch_grid = crocus_launch_grid; ctx->launch_grid = crocus_launch_grid;

View file

@ -505,7 +505,7 @@ d3d12_restore_compute_transform_state(struct d3d12_context *ctx, d3d12_compute_t
ctx->base.bind_compute_state(&ctx->base, save->cs); ctx->base.bind_compute_state(&ctx->base, save->cs);
ctx->base.set_constant_buffer(&ctx->base, MESA_SHADER_COMPUTE, 1, true, &save->cbuf0); ctx->base.set_constant_buffer(&ctx->base, MESA_SHADER_COMPUTE, 1, &save->cbuf0);
ctx->base.set_shader_buffers(&ctx->base, MESA_SHADER_COMPUTE, 0, ARRAY_SIZE(save->ssbos), save->ssbos, (1u << ARRAY_SIZE(save->ssbos)) - 1); ctx->base.set_shader_buffers(&ctx->base, MESA_SHADER_COMPUTE, 0, ARRAY_SIZE(save->ssbos), save->ssbos, (1u << ARRAY_SIZE(save->ssbos)) - 1);
if (ctx->current_predication) if (ctx->current_predication)

View file

@ -1264,7 +1264,7 @@ d3d12_set_vertex_buffers(struct pipe_context *pctx,
const struct pipe_vertex_buffer *buffers) const struct pipe_vertex_buffer *buffers)
{ {
struct d3d12_context *ctx = d3d12_context(pctx); struct d3d12_context *ctx = d3d12_context(pctx);
util_set_vertex_buffers_count(ctx->vbs, &ctx->num_vbs, buffers, num_buffers, true); util_set_vertex_buffers_count(ctx->vbs, &ctx->num_vbs, buffers, num_buffers);
for (unsigned i = 0; i < ctx->num_vbs; ++i) { for (unsigned i = 0; i < ctx->num_vbs; ++i) {
const struct pipe_vertex_buffer* buf = ctx->vbs + i; const struct pipe_vertex_buffer* buf = ctx->vbs + i;
@ -1367,7 +1367,6 @@ d3d12_increment_constant_buffer_bind_count(struct d3d12_context *ctx,
static void static void
d3d12_set_constant_buffer(struct pipe_context *pctx, d3d12_set_constant_buffer(struct pipe_context *pctx,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *buf) const struct pipe_constant_buffer *buf)
{ {
struct d3d12_context *ctx = d3d12_context(pctx); struct d3d12_context *ctx = d3d12_context(pctx);
@ -1378,7 +1377,7 @@ d3d12_set_constant_buffer(struct pipe_context *pctx,
if (buf) { if (buf) {
unsigned offset = buf->buffer_offset; unsigned offset = buf->buffer_offset;
if (buf->user_buffer) { if (buf->user_buffer) {
u_upload_data(pctx->const_uploader, 0, buf->buffer_size, u_upload_data_ref(pctx->const_uploader, 0, buf->buffer_size,
D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT,
buf->user_buffer, &offset, &ctx->cbufs[shader][index].buffer); buf->user_buffer, &offset, &ctx->cbufs[shader][index].buffer);
d3d12_increment_constant_buffer_bind_count(ctx, shader, d3d12_increment_constant_buffer_bind_count(ctx, shader,
@ -1388,13 +1387,8 @@ d3d12_set_constant_buffer(struct pipe_context *pctx,
if (buffer) if (buffer)
d3d12_increment_constant_buffer_bind_count(ctx, shader, d3d12_resource(buffer)); d3d12_increment_constant_buffer_bind_count(ctx, shader, d3d12_resource(buffer));
if (take_ownership) {
pipe_resource_reference(&ctx->cbufs[shader][index].buffer, NULL);
ctx->cbufs[shader][index].buffer = buffer;
} else {
pipe_resource_reference(&ctx->cbufs[shader][index].buffer, buffer); pipe_resource_reference(&ctx->cbufs[shader][index].buffer, buffer);
} }
}
ctx->cbufs[shader][index].buffer_offset = offset; ctx->cbufs[shader][index].buffer_offset = offset;
ctx->cbufs[shader][index].buffer_size = buf->buffer_size; ctx->cbufs[shader][index].buffer_size = buf->buffer_size;
@ -1922,7 +1916,7 @@ d3d12_disable_fake_so_buffers(struct d3d12_context *ctx)
cbuf.buffer = fake_target->fill_buffer; cbuf.buffer = fake_target->fill_buffer;
cbuf.buffer_offset = fake_target->fill_buffer_offset; cbuf.buffer_offset = fake_target->fill_buffer_offset;
cbuf.buffer_size = fake_target->fill_buffer->width0 - cbuf.buffer_offset; cbuf.buffer_size = fake_target->fill_buffer->width0 - cbuf.buffer_offset;
ctx->base.set_constant_buffer(&ctx->base, MESA_SHADER_COMPUTE, 1, false, &cbuf); ctx->base.set_constant_buffer(&ctx->base, MESA_SHADER_COMPUTE, 1, &cbuf);
grid.indirect = fake_target->fill_buffer; grid.indirect = fake_target->fill_buffer;
grid.indirect_offset = fake_target->fill_buffer_offset + 4; grid.indirect_offset = fake_target->fill_buffer_offset + 4;
@ -2277,6 +2271,7 @@ d3d12_init_graphics_context_functions(struct d3d12_context *ctx)
ctx->base.create_sampler_view = d3d12_create_sampler_view; ctx->base.create_sampler_view = d3d12_create_sampler_view;
ctx->base.sampler_view_destroy = d3d12_destroy_sampler_view; ctx->base.sampler_view_destroy = d3d12_destroy_sampler_view;
ctx->base.sampler_view_release = u_default_sampler_view_release; ctx->base.sampler_view_release = u_default_sampler_view_release;
ctx->base.resource_release = u_default_resource_release;
ctx->base.create_vertex_elements_state = d3d12_create_vertex_elements_state; ctx->base.create_vertex_elements_state = d3d12_create_vertex_elements_state;
ctx->base.bind_vertex_elements_state = d3d12_bind_vertex_elements_state; ctx->base.bind_vertex_elements_state = d3d12_bind_vertex_elements_state;

View file

@ -795,7 +795,7 @@ update_draw_indirect_with_sysvals(struct d3d12_context *ctx,
draw_count_cbuf.buffer_offset = indirect_in->indirect_draw_count_offset; draw_count_cbuf.buffer_offset = indirect_in->indirect_draw_count_offset;
draw_count_cbuf.buffer_size = 4; draw_count_cbuf.buffer_size = 4;
draw_count_cbuf.user_buffer = nullptr; draw_count_cbuf.user_buffer = nullptr;
ctx->base.set_constant_buffer(&ctx->base, MESA_SHADER_COMPUTE, 1, false, &draw_count_cbuf); ctx->base.set_constant_buffer(&ctx->base, MESA_SHADER_COMPUTE, 1, &draw_count_cbuf);
} }
pipe_shader_buffer new_cs_ssbos[2]; pipe_shader_buffer new_cs_ssbos[2];

View file

@ -78,7 +78,7 @@ etna_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
static void static void
etna_set_constant_buffer(struct pipe_context *pctx, etna_set_constant_buffer(struct pipe_context *pctx,
mesa_shader_stage shader, uint index, bool take_ownership, mesa_shader_stage shader, uint index,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
struct etna_context *ctx = etna_context(pctx); struct etna_context *ctx = etna_context(pctx);
@ -86,7 +86,7 @@ etna_set_constant_buffer(struct pipe_context *pctx,
assert(index < ETNA_MAX_CONST_BUF); assert(index < ETNA_MAX_CONST_BUF);
util_copy_constant_buffer(&so->cb[index], cb, take_ownership); util_copy_constant_buffer(&so->cb[index], cb);
/* Note that the gallium frontends can unbind constant buffers by /* Note that the gallium frontends can unbind constant buffers by
* passing NULL here. */ * passing NULL here. */
@ -99,7 +99,7 @@ etna_set_constant_buffer(struct pipe_context *pctx,
if (!cb->buffer) { if (!cb->buffer) {
struct pipe_constant_buffer *cb = &so->cb[index]; struct pipe_constant_buffer *cb = &so->cb[index];
u_upload_data(pctx->const_uploader, 0, cb->buffer_size, 16, cb->user_buffer, &cb->buffer_offset, &cb->buffer); u_upload_data_ref(pctx->const_uploader, 0, cb->buffer_size, 16, cb->user_buffer, &cb->buffer_offset, &cb->buffer);
ctx->dirty |= ETNA_DIRTY_SHADER_CACHES; ctx->dirty |= ETNA_DIRTY_SHADER_CACHES;
} }
@ -518,8 +518,7 @@ etna_set_vertex_buffers(struct pipe_context *pctx, unsigned num_buffers,
struct etna_context *ctx = etna_context(pctx); struct etna_context *ctx = etna_context(pctx);
struct etna_vertexbuf_state *so = &ctx->vertex_buffer; struct etna_vertexbuf_state *so = &ctx->vertex_buffer;
util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, num_buffers, util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, num_buffers);
true);
so->count = util_last_bit(so->enabled_mask); so->count = util_last_bit(so->enabled_mask);
if (!num_buffers) { if (!num_buffers) {

View file

@ -388,6 +388,7 @@ etna_texture_desc_init(struct pipe_context *pctx)
ctx->base.create_sampler_view = etna_create_sampler_view_desc; ctx->base.create_sampler_view = etna_create_sampler_view_desc;
ctx->base.sampler_view_destroy = etna_sampler_view_desc_destroy; ctx->base.sampler_view_destroy = etna_sampler_view_desc_destroy;
ctx->base.sampler_view_release = u_default_sampler_view_release; ctx->base.sampler_view_release = u_default_sampler_view_release;
ctx->base.resource_release = u_default_resource_release;
ctx->emit_texture_state = etna_emit_texture_desc; ctx->emit_texture_state = etna_emit_texture_desc;
ctx->ts_for_sampler_view = etna_ts_for_sampler_view_state; ctx->ts_for_sampler_view = etna_ts_for_sampler_view_state;
} }

View file

@ -617,6 +617,7 @@ etna_texture_state_init(struct pipe_context *pctx)
ctx->base.create_sampler_view = etna_create_sampler_view_state; ctx->base.create_sampler_view = etna_create_sampler_view_state;
ctx->base.sampler_view_destroy = etna_sampler_view_state_destroy; ctx->base.sampler_view_destroy = etna_sampler_view_state_destroy;
ctx->base.sampler_view_release = u_default_sampler_view_release; ctx->base.sampler_view_release = u_default_sampler_view_release;
ctx->base.resource_release = u_default_resource_release;
ctx->ts_for_sampler_view = etna_ts_for_sampler_view_state; ctx->ts_for_sampler_view = etna_ts_for_sampler_view_state;
STATIC_ASSERT(VIVS_TE_SAMPLER_LOD_ADDR__LEN == VIVS_NTE_SAMPLER_ADDR_LOD__LEN); STATIC_ASSERT(VIVS_TE_SAMPLER_LOD_ADDR__LEN == VIVS_NTE_SAMPLER_ADDR_LOD__LEN);

View file

@ -235,7 +235,7 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
unsigned off; unsigned off;
void *ptr; void *ptr;
u_upload_alloc(fd3_ctx->border_color_uploader, 0, u_upload_alloc_ref(fd3_ctx->border_color_uploader, 0,
BORDER_COLOR_UPLOAD_SIZE, BORDER_COLOR_UPLOAD_SIZE, &off, BORDER_COLOR_UPLOAD_SIZE, BORDER_COLOR_UPLOAD_SIZE, &off,
&fd3_ctx->border_color_buf, &ptr); &fd3_ctx->border_color_buf, &ptr);

View file

@ -333,7 +333,7 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
unsigned off; unsigned off;
void *ptr; void *ptr;
u_upload_alloc(fd4_ctx->border_color_uploader, 0, u_upload_alloc_ref(fd4_ctx->border_color_uploader, 0,
BORDER_COLOR_UPLOAD_SIZE, BORDER_COLOR_UPLOAD_SIZE, &off, BORDER_COLOR_UPLOAD_SIZE, BORDER_COLOR_UPLOAD_SIZE, &off,
&fd4_ctx->border_color_buf, &ptr); &fd4_ctx->border_color_buf, &ptr);

View file

@ -294,7 +294,7 @@ emit_border_color(struct fd_context *ctx, struct fd_ringbuffer *ring) assert_dt
const unsigned int alignment = const unsigned int alignment =
util_next_power_of_two(FD5_BORDER_COLOR_UPLOAD_SIZE); util_next_power_of_two(FD5_BORDER_COLOR_UPLOAD_SIZE);
u_upload_alloc(fd5_ctx->border_color_uploader, 0, u_upload_alloc_ref(fd5_ctx->border_color_uploader, 0,
FD5_BORDER_COLOR_UPLOAD_SIZE, alignment, FD5_BORDER_COLOR_UPLOAD_SIZE, alignment,
&off, &fd5_ctx->border_color_buf, &ptr); &off, &fd5_ctx->border_color_buf, &ptr);

View file

@ -51,7 +51,7 @@ fd6_upload_emit_driver_ubo(struct fd_context *ctx, fd_cs &cs,
unsigned buffer_offset; unsigned buffer_offset;
struct pipe_resource *buffer = NULL; struct pipe_resource *buffer = NULL;
u_upload_data(pctx->const_uploader, 0, sizedwords * sizeof(uint32_t), u_upload_data_ref(pctx->const_uploader, 0, sizedwords * sizeof(uint32_t),
16, dwords, &buffer_offset, &buffer); 16, dwords, &buffer_offset, &buffer);
if (!buffer) if (!buffer)
return; /* nothing good will come of this.. */ return; /* nothing good will come of this.. */
@ -490,7 +490,7 @@ fd6_emit_cs_driver_params(struct fd_context *ctx, fd_cs &cs,
struct pipe_resource *buffer = NULL; struct pipe_resource *buffer = NULL;
unsigned buffer_offset; unsigned buffer_offset;
u_upload_data(ctx->base.const_uploader, 0, sizeof(compute_params), u_upload_data_ref(ctx->base.const_uploader, 0, sizeof(compute_params),
16, &compute_params, &buffer_offset, &buffer); 16, &compute_params, &buffer_offset, &buffer);
if (info->indirect) { if (info->indirect) {

View file

@ -292,7 +292,7 @@ fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
.buffer_size = 16, .buffer_size = 16,
.user_buffer = &color->ui, .user_buffer = &color->ui,
}; };
pctx->set_constant_buffer(pctx, MESA_SHADER_FRAGMENT, 0, false, &cb); pctx->set_constant_buffer(pctx, MESA_SHADER_FRAGMENT, 0, &cb);
unsigned rs_idx = pfb->samples > 1 ? 1 : 0; unsigned rs_idx = pfb->samples > 1 ? 1 : 0;
if (!ctx->clear_rs_state[rs_idx]) { if (!ctx->clear_rs_state[rs_idx]) {

View file

@ -110,7 +110,7 @@ fd_set_min_samples(struct pipe_context *pctx, unsigned min_samples) in_dt
static void static void
upload_user_buffer(struct pipe_context *pctx, struct pipe_constant_buffer *cb) upload_user_buffer(struct pipe_context *pctx, struct pipe_constant_buffer *cb)
{ {
u_upload_data(pctx->stream_uploader, 0, cb->buffer_size, 64, u_upload_data_ref(pctx->stream_uploader, 0, cb->buffer_size, 64,
cb->user_buffer, &cb->buffer_offset, &cb->buffer); cb->user_buffer, &cb->buffer_offset, &cb->buffer);
cb->user_buffer = NULL; cb->user_buffer = NULL;
} }
@ -125,13 +125,13 @@ upload_user_buffer(struct pipe_context *pctx, struct pipe_constant_buffer *cb)
*/ */
static void static void
fd_set_constant_buffer(struct pipe_context *pctx, mesa_shader_stage shader, fd_set_constant_buffer(struct pipe_context *pctx, mesa_shader_stage shader,
uint index, bool take_ownership, uint index,
const struct pipe_constant_buffer *cb) in_dt const struct pipe_constant_buffer *cb) in_dt
{ {
struct fd_context *ctx = fd_context(pctx); struct fd_context *ctx = fd_context(pctx);
struct fd_constbuf_stateobj *so = &ctx->constbuf[shader]; struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
util_copy_constant_buffer(&so->cb[index], cb, take_ownership); util_copy_constant_buffer(&so->cb[index], cb);
/* Note that gallium frontends can unbind constant buffers by /* Note that gallium frontends can unbind constant buffers by
* passing a NULL cb, or a cb with no buffer: * passing a NULL cb, or a cb with no buffer:
@ -472,8 +472,7 @@ fd_set_vertex_buffers(struct pipe_context *pctx, unsigned count,
} }
} }
util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, count, util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, count);
true);
so->count = util_last_bit(so->enabled_mask); so->count = util_last_bit(so->enabled_mask);
if (!vb) if (!vb)

View file

@ -95,6 +95,7 @@ fd_texture_init(struct pipe_context *pctx)
if (!pctx->sampler_view_destroy) if (!pctx->sampler_view_destroy)
pctx->sampler_view_destroy = fd_sampler_view_destroy; pctx->sampler_view_destroy = fd_sampler_view_destroy;
pctx->sampler_view_release = u_default_sampler_view_release; pctx->sampler_view_release = u_default_sampler_view_release;
pctx->resource_release = u_default_resource_release;
} }
/* helper for setting up border-color buffer for a3xx/a4xx: */ /* helper for setting up border-color buffer for a3xx/a4xx: */

View file

@ -220,7 +220,7 @@ ir3_emit_ubos(struct fd_context *ctx, const struct ir3_shader_variant *v,
*/ */
if (cb->user_buffer) { if (cb->user_buffer) {
struct pipe_context *pctx = &ctx->base; struct pipe_context *pctx = &ctx->base;
u_upload_data(pctx->stream_uploader, 0, cb->buffer_size, 64, u_upload_data_ref(pctx->stream_uploader, 0, cb->buffer_size, 64,
cb->user_buffer, &cb->buffer_offset, &cb->buffer); cb->user_buffer, &cb->buffer_offset, &cb->buffer);
cb->user_buffer = NULL; cb->user_buffer = NULL;
} }
@ -640,7 +640,7 @@ ir3_emit_cs_driver_params(const struct ir3_shader_variant *v,
struct pipe_resource *buffer = NULL; struct pipe_resource *buffer = NULL;
unsigned buffer_offset; unsigned buffer_offset;
u_upload_data(ctx->base.const_uploader, 0, sizeof(compute_params), u_upload_data_ref(ctx->base.const_uploader, 0, sizeof(compute_params),
16, &compute_params, &buffer_offset, &buffer); 16, &compute_params, &buffer_offset, &buffer);
/* Copy the indirect params into the driver param buffer. The layout /* Copy the indirect params into the driver param buffer. The layout

View file

@ -719,7 +719,6 @@ i915_delete_vs_state(struct pipe_context *pipe, void *shader)
static void static void
i915_set_constant_buffer(struct pipe_context *pipe, i915_set_constant_buffer(struct pipe_context *pipe,
mesa_shader_stage shader, uint32_t index, mesa_shader_stage shader, uint32_t index,
bool take_ownership,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
struct i915_context *i915 = i915_context(pipe); struct i915_context *i915 = i915_context(pipe);
@ -761,12 +760,7 @@ i915_set_constant_buffer(struct pipe_context *pipe,
diff = i915->current.num_user_constants[shader] != 0; diff = i915->current.num_user_constants[shader] != 0;
} }
if (take_ownership) {
pipe_resource_reference(&i915->constants[shader], NULL);
i915->constants[shader] = buf;
} else {
pipe_resource_reference(&i915->constants[shader], buf); pipe_resource_reference(&i915->constants[shader], buf);
}
i915->current.num_user_constants[shader] = new_num; i915->current.num_user_constants[shader] = new_num;
if (diff) if (diff)
@ -1057,8 +1051,7 @@ i915_set_vertex_buffers(struct pipe_context *pipe, unsigned count,
assert(count <= PIPE_MAX_ATTRIBS); assert(count <= PIPE_MAX_ATTRIBS);
util_set_vertex_buffers_count(draw->pt.vertex_buffer, util_set_vertex_buffers_count(draw->pt.vertex_buffer,
&draw->pt.nr_vertex_buffers, buffers, count, &draw->pt.nr_vertex_buffers, buffers, count);
true);
} }
static void * static void *
@ -1148,6 +1141,7 @@ i915_init_state_functions(struct i915_context *i915)
i915->base.create_sampler_view = i915_create_sampler_view; i915->base.create_sampler_view = i915_create_sampler_view;
i915->base.sampler_view_destroy = i915_sampler_view_destroy; i915->base.sampler_view_destroy = i915_sampler_view_destroy;
i915->base.sampler_view_release = u_default_sampler_view_release; i915->base.sampler_view_release = u_default_sampler_view_release;
i915->base.resource_release = u_default_resource_release;
i915->base.set_viewport_states = i915_set_viewport_states; i915->base.set_viewport_states = i915_set_viewport_states;
i915->base.set_vertex_buffers = i915_set_vertex_buffers; i915->base.set_vertex_buffers = i915_set_vertex_buffers;
} }

View file

@ -63,7 +63,7 @@ stream_state(struct iris_batch *batch,
struct pipe_resource *res = NULL; struct pipe_resource *res = NULL;
void *ptr = NULL; void *ptr = NULL;
u_upload_alloc(uploader, 0, size, alignment, out_offset, &res, &ptr); u_upload_alloc_ref(uploader, 0, size, alignment, out_offset, &res, &ptr);
struct iris_bo *bo = iris_resource_bo(res); struct iris_bo *bo = iris_resource_bo(res);
iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE); iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);

View file

@ -146,7 +146,7 @@ iris_update_draw_parameters(struct iris_context *ice,
ice->draw.params.baseinstance = info->start_instance; ice->draw.params.baseinstance = info->start_instance;
ice->draw.params_valid = true; ice->draw.params_valid = true;
u_upload_data(ice->ctx.const_uploader, 0, u_upload_data_ref(ice->ctx.const_uploader, 0,
sizeof(ice->draw.params), 4, &ice->draw.params, sizeof(ice->draw.params), 4, &ice->draw.params,
&draw_params->offset, &draw_params->res); &draw_params->offset, &draw_params->res);
} }
@ -164,7 +164,7 @@ iris_update_draw_parameters(struct iris_context *ice,
ice->draw.derived_params.drawid = drawid_offset; ice->draw.derived_params.drawid = drawid_offset;
ice->draw.derived_params.is_indexed_draw = is_indexed_draw; ice->draw.derived_params.is_indexed_draw = is_indexed_draw;
u_upload_data(ice->ctx.const_uploader, 0, u_upload_data_ref(ice->ctx.const_uploader, 0,
sizeof(ice->draw.derived_params), 4, sizeof(ice->draw.derived_params), 4,
&ice->draw.derived_params, &ice->draw.derived_params,
&derived_params->offset, &derived_params->res); &derived_params->offset, &derived_params->res);
@ -376,7 +376,7 @@ iris_update_grid_size_resource(struct iris_context *ice,
grid_updated = true; grid_updated = true;
} else if (memcmp(ice->state.last_grid, grid->grid, sizeof(grid->grid)) != 0) { } else if (memcmp(ice->state.last_grid, grid->grid, sizeof(grid->grid)) != 0) {
memcpy(ice->state.last_grid, grid->grid, sizeof(grid->grid)); memcpy(ice->state.last_grid, grid->grid, sizeof(grid->grid));
u_upload_data(ice->state.dynamic_uploader, 0, sizeof(grid->grid), 4, u_upload_data_ref(ice->state.dynamic_uploader, 0, sizeof(grid->grid), 4,
grid->grid, &grid_ref->offset, &grid_ref->res); grid->grid, &grid_ref->offset, &grid_ref->res);
grid_updated = true; grid_updated = true;
} }
@ -392,7 +392,7 @@ iris_update_grid_size_resource(struct iris_context *ice,
struct iris_bo *grid_bo = iris_resource_bo(grid_ref->res); struct iris_bo *grid_bo = iris_resource_bo(grid_ref->res);
void *surf_map = NULL; void *surf_map = NULL;
u_upload_alloc(ice->state.surface_uploader, 0, isl_dev->ss.size, u_upload_alloc_ref(ice->state.surface_uploader, 0, isl_dev->ss.size,
isl_dev->ss.align, &state_ref->offset, &state_ref->res, isl_dev->ss.align, &state_ref->offset, &state_ref->res,
&surf_map); &surf_map);
state_ref->offset += state_ref->offset +=

View file

@ -5,7 +5,7 @@
static void static void
iris_fine_fence_reset(struct iris_batch *batch) iris_fine_fence_reset(struct iris_batch *batch)
{ {
u_upload_alloc(batch->fine_fences.uploader, u_upload_alloc_ref(batch->fine_fences.uploader,
0, sizeof(uint64_t), sizeof(uint64_t), 0, sizeof(uint64_t), sizeof(uint64_t),
&batch->fine_fences.ref.offset, &batch->fine_fences.ref.res, &batch->fine_fences.ref.offset, &batch->fine_fences.ref.res,
(void **)&batch->fine_fences.map); (void **)&batch->fine_fences.map);

View file

@ -116,7 +116,7 @@ upload_state(struct iris_batch *batch,
unsigned alignment) unsigned alignment)
{ {
void *p = NULL; void *p = NULL;
u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p); u_upload_alloc_ref(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
iris_use_pinned_bo(batch, iris_resource_bo(ref->res), false, IRIS_DOMAIN_NONE); iris_use_pinned_bo(batch, iris_resource_bo(ref->res), false, IRIS_DOMAIN_NONE);
return p; return p;
} }
@ -131,7 +131,7 @@ stream_state(struct iris_batch *batch,
{ {
void *ptr = NULL; void *ptr = NULL;
u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr); u_upload_alloc_ref(uploader, 0, size, alignment, out_offset, out_res, &ptr);
struct iris_bo *bo = iris_resource_bo(*out_res); struct iris_bo *bo = iris_resource_bo(*out_res);
iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE); iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);

View file

@ -696,7 +696,7 @@ upload_state(struct u_upload_mgr *uploader,
unsigned alignment) unsigned alignment)
{ {
void *p = NULL; void *p = NULL;
u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p); u_upload_alloc_ref(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
return p; return p;
} }

View file

@ -165,7 +165,7 @@ iris_upload_shader(struct iris_screen *screen,
{ {
const struct intel_device_info *devinfo = screen->devinfo; const struct intel_device_info *devinfo = screen->devinfo;
u_upload_alloc(uploader, 0, shader->program_size, 64, u_upload_alloc_ref(uploader, 0, shader->program_size, 64,
&shader->assembly.offset, &shader->assembly.res, &shader->assembly.offset, &shader->assembly.res,
&shader->map); &shader->map);
memcpy(shader->map, assembly, shader->program_size); memcpy(shader->map, assembly, shader->program_size);

View file

@ -518,7 +518,7 @@ iris_begin_query(struct pipe_context *ctx, struct pipe_query *query)
else else
size = sizeof(struct iris_query_snapshots); size = sizeof(struct iris_query_snapshots);
u_upload_alloc(ice->query_buffer_uploader, 0, u_upload_alloc_ref(ice->query_buffer_uploader, 0,
size, util_next_power_of_two(size), size, util_next_power_of_two(size),
&q->query_state_ref.offset, &q->query_state_ref.offset,
&q->query_state_ref.res, &ptr); &q->query_state_ref.res, &ptr);

View file

@ -326,7 +326,7 @@ upload_state(struct u_upload_mgr *uploader,
unsigned alignment) unsigned alignment)
{ {
void *p = NULL; void *p = NULL;
u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p); u_upload_alloc_ref(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
return p; return p;
} }
@ -347,7 +347,7 @@ stream_state(struct iris_batch *batch,
{ {
void *ptr = NULL; void *ptr = NULL;
u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr); u_upload_alloc_ref(uploader, 0, size, alignment, out_offset, out_res, &ptr);
struct iris_bo *bo = iris_resource_bo(*out_res); struct iris_bo *bo = iris_resource_bo(*out_res);
iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE); iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
@ -3963,7 +3963,6 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
static void static void
iris_set_constant_buffer(struct pipe_context *ctx, iris_set_constant_buffer(struct pipe_context *ctx,
mesa_shader_stage stage, unsigned index, mesa_shader_stage stage, unsigned index,
bool take_ownership,
const struct pipe_constant_buffer *input) const struct pipe_constant_buffer *input)
{ {
struct iris_context *ice = (struct iris_context *) ctx; struct iris_context *ice = (struct iris_context *) ctx;
@ -3979,12 +3978,12 @@ iris_set_constant_buffer(struct pipe_context *ctx,
if (input->user_buffer) { if (input->user_buffer) {
void *map = NULL; void *map = NULL;
pipe_resource_reference(&cbuf->buffer, NULL); pipe_resource_reference(&cbuf->buffer, NULL);
u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64, u_upload_alloc_ref(ice->ctx.const_uploader, 0, input->buffer_size, 64,
&cbuf->buffer_offset, &cbuf->buffer, (void **) &map); &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
if (!cbuf->buffer) { if (!cbuf->buffer) {
/* Allocation was unsuccessful - just unbind */ /* Allocation was unsuccessful - just unbind */
iris_set_constant_buffer(ctx, stage, index, false, NULL); iris_set_constant_buffer(ctx, stage, index, NULL);
return; return;
} }
@ -3997,12 +3996,7 @@ iris_set_constant_buffer(struct pipe_context *ctx,
shs->dirty_cbufs |= 1u << index; shs->dirty_cbufs |= 1u << index;
} }
if (take_ownership) {
pipe_resource_reference(&cbuf->buffer, NULL);
cbuf->buffer = input->buffer;
} else {
pipe_resource_reference(&cbuf->buffer, input->buffer); pipe_resource_reference(&cbuf->buffer, input->buffer);
}
cbuf->buffer_offset = input->buffer_offset; cbuf->buffer_offset = input->buffer_offset;
} }
@ -4042,7 +4036,7 @@ upload_sysvals(struct iris_context *ice,
void *map = NULL; void *map = NULL;
assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS); assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS);
u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64, u_upload_alloc_ref(ice->ctx.const_uploader, 0, upload_size, 64,
&cbuf->buffer_offset, &cbuf->buffer, &map); &cbuf->buffer_offset, &cbuf->buffer, &map);
uint32_t *sysval_map = map; uint32_t *sysval_map = map;
@ -4209,8 +4203,7 @@ iris_set_vertex_buffers(struct pipe_context *ctx,
state->resource != buffer->buffer.resource) state->resource != buffer->buffer.resource)
ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFER_FLUSHES; ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFER_FLUSHES;
pipe_resource_reference(&state->resource, NULL); pipe_resource_reference(&state->resource, buffer->buffer.resource);
state->resource = buffer->buffer.resource;
struct iris_resource *res = (void *) state->resource; struct iris_resource *res = (void *) state->resource;
@ -8492,7 +8485,7 @@ iris_emit_index_buffer(struct iris_context *ice,
if (draw->has_user_indices) { if (draw->has_user_indices) {
unsigned start_offset = draw->index_size * sc->start; unsigned start_offset = draw->index_size * sc->start;
u_upload_data(ice->ctx.const_uploader, start_offset, u_upload_data_ref(ice->ctx.const_uploader, start_offset,
sc->count * draw->index_size, 4, sc->count * draw->index_size, 4,
(char*)draw->index.user + start_offset, (char*)draw->index.user + start_offset,
&offset, &ice->state.last_res.index_buffer); &offset, &ice->state.last_res.index_buffer);
@ -10671,6 +10664,7 @@ genX(init_state)(struct iris_context *ice)
ctx->set_viewport_states = iris_set_viewport_states; ctx->set_viewport_states = iris_set_viewport_states;
ctx->sampler_view_destroy = iris_sampler_view_destroy; ctx->sampler_view_destroy = iris_sampler_view_destroy;
ctx->sampler_view_release = u_default_sampler_view_release; ctx->sampler_view_release = u_default_sampler_view_release;
ctx->resource_release = u_default_resource_release;
ctx->surface_destroy = iris_surface_destroy; ctx->surface_destroy = iris_surface_destroy;
ctx->draw_vbo = iris_draw_vbo; ctx->draw_vbo = iris_draw_vbo;
ctx->launch_grid = iris_launch_grid; ctx->launch_grid = iris_launch_grid;

View file

@ -76,7 +76,7 @@ lima_ctx_buff_alloc(struct lima_context *ctx, enum lima_ctx_buff buff,
cbs->size = align(size, 0x40); cbs->size = align(size, 0x40);
u_upload_alloc(ctx->uploader, 0, cbs->size, 0x40, &cbs->offset, u_upload_alloc_ref(ctx->uploader, 0, cbs->size, 0x40, &cbs->offset,
&cbs->res, &ret); &cbs->res, &ret);
return ret; return ret;

View file

@ -310,7 +310,7 @@ lima_job_create_stream_bo(struct lima_job *job, int pipe,
void *cpu; void *cpu;
unsigned offset; unsigned offset;
struct pipe_resource *pres = NULL; struct pipe_resource *pres = NULL;
u_upload_alloc(ctx->uploader, 0, size, 0x40, &offset, &pres, &cpu); u_upload_alloc_ref(ctx->uploader, 0, size, 0x40, &offset, &pres, &cpu);
struct lima_resource *res = lima_resource(pres); struct lima_resource *res = lima_resource(pres);
*va = res->bo->va + offset; *va = res->bo->va + offset;

View file

@ -193,7 +193,7 @@ lima_set_vertex_buffers(struct pipe_context *pctx,
struct lima_context_vertex_buffer *so = &ctx->vertex_buffers; struct lima_context_vertex_buffer *so = &ctx->vertex_buffers;
util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, util_set_vertex_buffers_mask(so->vb, &so->enabled_mask,
vb, count, true); vb, count);
so->count = util_last_bit(so->enabled_mask); so->count = util_last_bit(so->enabled_mask);
ctx->dirty |= LIMA_CONTEXT_DIRTY_VERTEX_BUFF; ctx->dirty |= LIMA_CONTEXT_DIRTY_VERTEX_BUFF;
@ -264,7 +264,6 @@ lima_set_stencil_ref(struct pipe_context *pctx,
static void static void
lima_set_constant_buffer(struct pipe_context *pctx, lima_set_constant_buffer(struct pipe_context *pctx,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool pass_reference,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
struct lima_context *ctx = lima_context(pctx); struct lima_context *ctx = lima_context(pctx);
@ -441,6 +440,7 @@ lima_state_init(struct lima_context *ctx)
ctx->base.create_sampler_view = lima_create_sampler_view; ctx->base.create_sampler_view = lima_create_sampler_view;
ctx->base.sampler_view_destroy = lima_sampler_view_destroy; ctx->base.sampler_view_destroy = lima_sampler_view_destroy;
ctx->base.sampler_view_release = u_default_sampler_view_release; ctx->base.sampler_view_release = u_default_sampler_view_release;
ctx->base.resource_release = u_default_resource_release;
ctx->base.set_sampler_views = lima_set_sampler_views; ctx->base.set_sampler_views = lima_set_sampler_views;
ctx->base.set_sample_mask = lima_set_sample_mask; ctx->base.set_sample_mask = lima_set_sample_mask;
@ -452,5 +452,5 @@ lima_state_fini(struct lima_context *ctx)
struct lima_context_vertex_buffer *so = &ctx->vertex_buffers; struct lima_context_vertex_buffer *so = &ctx->vertex_buffers;
util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, NULL, util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, NULL,
0, false); 0);
} }

View file

@ -639,10 +639,10 @@ lp_setup_set_fs_constants(struct lp_setup_context *setup,
unsigned i; unsigned i;
for (i = 0; i < num; ++i) { for (i = 0; i < num; ++i) {
util_copy_constant_buffer(&setup->constants[i].current, util_copy_constant_buffer(&setup->constants[i].current,
&buffers[i], false); &buffers[i]);
} }
for (; i < ARRAY_SIZE(setup->constants); i++) { for (; i < ARRAY_SIZE(setup->constants); i++) {
util_copy_constant_buffer(&setup->constants[i].current, NULL, false); util_copy_constant_buffer(&setup->constants[i].current, NULL);
} }
setup->dirty |= LP_SETUP_NEW_CONSTANTS; setup->dirty |= LP_SETUP_NEW_CONSTANTS;
} }

View file

@ -1545,10 +1545,10 @@ lp_csctx_set_cs_constants(struct lp_cs_context *csctx,
assert(num <= ARRAY_SIZE(csctx->constants)); assert(num <= ARRAY_SIZE(csctx->constants));
for (i = 0; i < num; ++i) { for (i = 0; i < num; ++i) {
util_copy_constant_buffer(&csctx->constants[i].current, &buffers[i], false); util_copy_constant_buffer(&csctx->constants[i].current, &buffers[i]);
} }
for (; i < ARRAY_SIZE(csctx->constants); i++) { for (; i < ARRAY_SIZE(csctx->constants); i++) {
util_copy_constant_buffer(&csctx->constants[i].current, NULL, false); util_copy_constant_buffer(&csctx->constants[i].current, NULL);
} }
} }

View file

@ -4229,7 +4229,6 @@ llvmpipe_delete_fs_state(struct pipe_context *pipe, void *fs)
static void static void
llvmpipe_set_constant_buffer(struct pipe_context *pipe, llvmpipe_set_constant_buffer(struct pipe_context *pipe,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe); struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
@ -4239,15 +4238,14 @@ llvmpipe_set_constant_buffer(struct pipe_context *pipe,
assert(index < ARRAY_SIZE(llvmpipe->constants[shader])); assert(index < ARRAY_SIZE(llvmpipe->constants[shader]));
/* note: reference counting */ /* note: reference counting */
util_copy_constant_buffer(&llvmpipe->constants[shader][index], cb, util_copy_constant_buffer(&llvmpipe->constants[shader][index], cb);
take_ownership);
/* user_buffer is only valid until the next set_constant_buffer (at most, /* user_buffer is only valid until the next set_constant_buffer (at most,
* possibly until shader deletion), so we need to upload it now to make * possibly until shader deletion), so we need to upload it now to make
* sure it doesn't get updated/freed out from under us. * sure it doesn't get updated/freed out from under us.
*/ */
if (constants->user_buffer) { if (constants->user_buffer) {
u_upload_data(llvmpipe->pipe.const_uploader, 0, constants->buffer_size, u_upload_data_ref(llvmpipe->pipe.const_uploader, 0, constants->buffer_size,
16, constants->user_buffer, &constants->buffer_offset, 16, constants->user_buffer, &constants->buffer_offset,
&constants->buffer); &constants->buffer);
} }

View file

@ -622,5 +622,6 @@ llvmpipe_init_sampler_funcs(struct llvmpipe_context *llvmpipe)
llvmpipe->pipe.set_sampler_views = llvmpipe_set_sampler_views; llvmpipe->pipe.set_sampler_views = llvmpipe_set_sampler_views;
llvmpipe->pipe.sampler_view_destroy = llvmpipe_sampler_view_destroy; llvmpipe->pipe.sampler_view_destroy = llvmpipe_sampler_view_destroy;
llvmpipe->pipe.sampler_view_release = u_default_sampler_view_release; llvmpipe->pipe.sampler_view_release = u_default_sampler_view_release;
llvmpipe->pipe.resource_release = u_default_resource_release;
llvmpipe->pipe.delete_sampler_state = llvmpipe_delete_sampler_state; llvmpipe->pipe.delete_sampler_state = llvmpipe_delete_sampler_state;
} }

View file

@ -89,7 +89,7 @@ llvmpipe_set_vertex_buffers(struct pipe_context *pipe,
util_set_vertex_buffers_count(llvmpipe->vertex_buffer, util_set_vertex_buffers_count(llvmpipe->vertex_buffer,
&llvmpipe->num_vertex_buffers, &llvmpipe->num_vertex_buffers,
buffers, count, true); buffers, count);
llvmpipe->dirty |= LP_NEW_VERTEX; llvmpipe->dirty |= LP_NEW_VERTEX;

View file

@ -328,7 +328,6 @@ nv30_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask)
static void static void
nv30_set_constant_buffer(struct pipe_context *pipe, nv30_set_constant_buffer(struct pipe_context *pipe,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool pass_reference,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
struct nv30_context *nv30 = nv30_context(pipe); struct nv30_context *nv30 = nv30_context(pipe);
@ -346,22 +345,12 @@ nv30_set_constant_buffer(struct pipe_context *pipe,
size = buf->width0 / (4 * sizeof(float)); size = buf->width0 / (4 * sizeof(float));
if (shader == MESA_SHADER_VERTEX) { if (shader == MESA_SHADER_VERTEX) {
if (pass_reference) {
pipe_resource_reference(&nv30->vertprog.constbuf, NULL);
nv30->vertprog.constbuf = buf;
} else {
pipe_resource_reference(&nv30->vertprog.constbuf, buf); pipe_resource_reference(&nv30->vertprog.constbuf, buf);
}
nv30->vertprog.constbuf_nr = size; nv30->vertprog.constbuf_nr = size;
nv30->dirty |= NV30_NEW_VERTCONST; nv30->dirty |= NV30_NEW_VERTCONST;
} else } else
if (shader == MESA_SHADER_FRAGMENT) { if (shader == MESA_SHADER_FRAGMENT) {
if (pass_reference) {
pipe_resource_reference(&nv30->fragprog.constbuf, NULL);
nv30->fragprog.constbuf = buf;
} else {
pipe_resource_reference(&nv30->fragprog.constbuf, buf); pipe_resource_reference(&nv30->fragprog.constbuf, buf);
}
nv30->fragprog.constbuf_nr = size; nv30->fragprog.constbuf_nr = size;
nv30->dirty |= NV30_NEW_FRAGCONST; nv30->dirty |= NV30_NEW_FRAGCONST;
} }
@ -446,7 +435,7 @@ nv30_set_vertex_buffers(struct pipe_context *pipe,
nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF); nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
util_set_vertex_buffers_count(nv30->vtxbuf, &nv30->num_vtxbufs, util_set_vertex_buffers_count(nv30->vtxbuf, &nv30->num_vtxbufs,
vb, count, true); vb, count);
nv30->dirty |= NV30_NEW_ARRAYS; nv30->dirty |= NV30_NEW_ARRAYS;
} }

View file

@ -325,4 +325,5 @@ nv30_texture_init(struct pipe_context *pipe)
pipe->create_sampler_view = nv30_sampler_view_create; pipe->create_sampler_view = nv30_sampler_view_create;
pipe->sampler_view_destroy = nv30_sampler_view_destroy; pipe->sampler_view_destroy = nv30_sampler_view_destroy;
pipe->sampler_view_release = u_default_sampler_view_release; pipe->sampler_view_release = u_default_sampler_view_release;
pipe->resource_release = u_default_resource_release;
} }

View file

@ -885,7 +885,6 @@ nv50_get_compute_state_info(struct pipe_context *pipe, void *hwcso,
static void static void
nv50_set_constant_buffer(struct pipe_context *pipe, nv50_set_constant_buffer(struct pipe_context *pipe,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
struct nv50_context *nv50 = nv50_context(pipe); struct nv50_context *nv50 = nv50_context(pipe);
@ -915,12 +914,7 @@ nv50_set_constant_buffer(struct pipe_context *pipe,
if (nv50->constbuf[s][i].u.buf) if (nv50->constbuf[s][i].u.buf)
nv04_resource(nv50->constbuf[s][i].u.buf)->cb_bindings[s] &= ~(1 << i); nv04_resource(nv50->constbuf[s][i].u.buf)->cb_bindings[s] &= ~(1 << i);
if (take_ownership) {
pipe_resource_reference(&nv50->constbuf[s][i].u.buf, NULL);
nv50->constbuf[s][i].u.buf = res;
} else {
pipe_resource_reference(&nv50->constbuf[s][i].u.buf, res); pipe_resource_reference(&nv50->constbuf[s][i].u.buf, res);
}
nv50->constbuf[s][i].user = (cb && cb->user_buffer) ? true : false; nv50->constbuf[s][i].user = (cb && cb->user_buffer) ? true : false;
if (nv50->constbuf[s][i].user) { if (nv50->constbuf[s][i].user) {
@ -1089,7 +1083,7 @@ nv50_set_vertex_buffers(struct pipe_context *pipe,
unsigned last_count = nv50->num_vtxbufs; unsigned last_count = nv50->num_vtxbufs;
util_set_vertex_buffers_count(nv50->vtxbuf, &nv50->num_vtxbufs, vb, util_set_vertex_buffers_count(nv50->vtxbuf, &nv50->num_vtxbufs, vb,
count, true); count);
unsigned clear_mask = unsigned clear_mask =
last_count > count ? BITFIELD_RANGE(count, last_count - count) : 0; last_count > count ? BITFIELD_RANGE(count, last_count - count) : 0;
@ -1475,6 +1469,7 @@ nv50_init_state_functions(struct nv50_context *nv50)
pipe->create_sampler_view = nv50_create_sampler_view; pipe->create_sampler_view = nv50_create_sampler_view;
pipe->sampler_view_destroy = nv50_sampler_view_destroy; pipe->sampler_view_destroy = nv50_sampler_view_destroy;
pipe->sampler_view_release = u_default_sampler_view_release; pipe->sampler_view_release = u_default_sampler_view_release;
pipe->resource_release = u_default_resource_release;
pipe->set_sampler_views = nv50_set_sampler_views; pipe->set_sampler_views = nv50_set_sampler_views;
pipe->create_vs_state = nv50_vp_state_create; pipe->create_vs_state = nv50_vp_state_create;

View file

@ -789,7 +789,6 @@ nvc0_get_compute_state_info(struct pipe_context *pipe, void *hwcso,
static void static void
nvc0_set_constant_buffer(struct pipe_context *pipe, nvc0_set_constant_buffer(struct pipe_context *pipe,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
struct nvc0_context *nvc0 = nvc0_context(pipe); struct nvc0_context *nvc0 = nvc0_context(pipe);
@ -819,12 +818,7 @@ nvc0_set_constant_buffer(struct pipe_context *pipe,
if (nvc0->constbuf[s][i].u.buf) if (nvc0->constbuf[s][i].u.buf)
nv04_resource(nvc0->constbuf[s][i].u.buf)->cb_bindings[s] &= ~(1 << i); nv04_resource(nvc0->constbuf[s][i].u.buf)->cb_bindings[s] &= ~(1 << i);
if (take_ownership) {
pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, NULL);
nvc0->constbuf[s][i].u.buf = res;
} else {
pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, res); pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, res);
}
nvc0->constbuf[s][i].user = (cb && cb->user_buffer) ? true : false; nvc0->constbuf[s][i].user = (cb && cb->user_buffer) ? true : false;
if (nvc0->constbuf[s][i].user) { if (nvc0->constbuf[s][i].user) {
@ -1030,7 +1024,7 @@ nvc0_set_vertex_buffers(struct pipe_context *pipe,
unsigned last_count = nvc0->num_vtxbufs; unsigned last_count = nvc0->num_vtxbufs;
util_set_vertex_buffers_count(nvc0->vtxbuf, &nvc0->num_vtxbufs, vb, util_set_vertex_buffers_count(nvc0->vtxbuf, &nvc0->num_vtxbufs, vb,
count, true); count);
unsigned clear_mask = unsigned clear_mask =
last_count > count ? BITFIELD_RANGE(count, last_count - count) : 0; last_count > count ? BITFIELD_RANGE(count, last_count - count) : 0;
@ -1432,6 +1426,7 @@ nvc0_init_state_functions(struct nvc0_context *nvc0)
pipe->create_sampler_view = nvc0_create_sampler_view; pipe->create_sampler_view = nvc0_create_sampler_view;
pipe->sampler_view_destroy = nvc0_sampler_view_destroy; pipe->sampler_view_destroy = nvc0_sampler_view_destroy;
pipe->sampler_view_release = u_default_sampler_view_release; pipe->sampler_view_release = u_default_sampler_view_release;
pipe->resource_release = u_default_resource_release;
pipe->set_sampler_views = nvc0_set_sampler_views; pipe->set_sampler_views = nvc0_set_sampler_views;
pipe->create_vs_state = nvc0_vp_state_create; pipe->create_vs_state = nvc0_vp_state_create;

View file

@ -3585,15 +3585,15 @@ panfrost_launch_afbc_conv_shader(struct panfrost_batch *batch, void *cso,
struct panfrost_constant_buffer *pbuf = struct panfrost_constant_buffer *pbuf =
&batch->ctx->constant_buffer[MESA_SHADER_COMPUTE]; &batch->ctx->constant_buffer[MESA_SHADER_COMPUTE];
saved_cso = batch->ctx->uncompiled[MESA_SHADER_COMPUTE]; saved_cso = batch->ctx->uncompiled[MESA_SHADER_COMPUTE];
util_copy_constant_buffer(&pbuf->cb[0], &saved_const, true); util_copy_constant_buffer(&pbuf->cb[0], &saved_const);
pctx->bind_compute_state(pctx, cso); pctx->bind_compute_state(pctx, cso);
pctx->set_constant_buffer(pctx, MESA_SHADER_COMPUTE, 0, false, cbuf); pctx->set_constant_buffer(pctx, MESA_SHADER_COMPUTE, 0, cbuf);
panfrost_launch_grid_on_batch(pctx, batch, &grid); panfrost_launch_grid_on_batch(pctx, batch, &grid);
pctx->bind_compute_state(pctx, saved_cso); pctx->bind_compute_state(pctx, saved_cso);
pctx->set_constant_buffer(pctx, MESA_SHADER_COMPUTE, 0, true, &saved_const); pctx->set_constant_buffer(pctx, MESA_SHADER_COMPUTE, 0, &saved_const);
} }
#define LAUNCH_AFBC_CONV_SHADER(name, batch, rsrc, consts, nr_blocks) \ #define LAUNCH_AFBC_CONV_SHADER(name, batch, rsrc, consts, nr_blocks) \
@ -3792,15 +3792,15 @@ panfrost_mtk_detile_compute(struct panfrost_context *ctx, struct pipe_blit_info
&batch->ctx->constant_buffer[MESA_SHADER_COMPUTE]; &batch->ctx->constant_buffer[MESA_SHADER_COMPUTE];
void *saved_cso = batch->ctx->uncompiled[MESA_SHADER_COMPUTE]; void *saved_cso = batch->ctx->uncompiled[MESA_SHADER_COMPUTE];
void *cso = shader->mtk_tiled.detile_cso; void *cso = shader->mtk_tiled.detile_cso;
util_copy_constant_buffer(&pbuf->cb[0], &saved_const, true); util_copy_constant_buffer(&pbuf->cb[0], &saved_const);
pipe->bind_compute_state(pipe, cso); pipe->bind_compute_state(pipe, cso);
pipe->set_constant_buffer(pipe, MESA_SHADER_COMPUTE, 0, false, &cbuf); pipe->set_constant_buffer(pipe, MESA_SHADER_COMPUTE, 0, &cbuf);
panfrost_launch_grid_on_batch(pipe, batch, &grid_info); panfrost_launch_grid_on_batch(pipe, batch, &grid_info);
pipe->bind_compute_state(pipe, saved_cso); pipe->bind_compute_state(pipe, saved_cso);
pipe->set_constant_buffer(pipe, MESA_SHADER_COMPUTE, 0, true, &saved_const); pipe->set_constant_buffer(pipe, MESA_SHADER_COMPUTE, 0, &saved_const);
panfrost_resource_restore_format(pan_resource(y_src), &y_src_save); panfrost_resource_restore_format(pan_resource(y_src), &y_src_save);
panfrost_resource_restore_format(pan_resource(uv_src), &uv_src_save); panfrost_resource_restore_format(pan_resource(uv_src), &uv_src_save);
@ -4327,6 +4327,7 @@ context_populate_vtbl(struct pipe_context *pipe)
pipe->create_sampler_view = panfrost_create_sampler_view; pipe->create_sampler_view = panfrost_create_sampler_view;
pipe->sampler_view_destroy = panfrost_sampler_view_destroy; pipe->sampler_view_destroy = panfrost_sampler_view_destroy;
pipe->sampler_view_release = u_default_sampler_view_release; pipe->sampler_view_release = u_default_sampler_view_release;
pipe->resource_release = u_default_resource_release;
pipe->create_sampler_state = panfrost_create_sampler_state; pipe->create_sampler_state = panfrost_create_sampler_state;
pipe->create_blend_state = panfrost_create_blend_state; pipe->create_blend_state = panfrost_create_blend_state;

View file

@ -338,7 +338,7 @@ panfrost_set_vertex_buffers(struct pipe_context *pctx, unsigned num_buffers,
struct panfrost_context *ctx = pan_context(pctx); struct panfrost_context *ctx = pan_context(pctx);
util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers, util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers,
num_buffers, true); num_buffers);
ctx->dirty |= PAN_DIRTY_VERTEX; ctx->dirty |= PAN_DIRTY_VERTEX;
} }
@ -346,13 +346,12 @@ panfrost_set_vertex_buffers(struct pipe_context *pctx, unsigned num_buffers,
static void static void
panfrost_set_constant_buffer(struct pipe_context *pctx, panfrost_set_constant_buffer(struct pipe_context *pctx,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *buf) const struct pipe_constant_buffer *buf)
{ {
struct panfrost_context *ctx = pan_context(pctx); struct panfrost_context *ctx = pan_context(pctx);
struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader]; struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
util_copy_constant_buffer(&pbuf->cb[index], buf, take_ownership); util_copy_constant_buffer(&pbuf->cb[index], buf);
unsigned mask = (1 << index); unsigned mask = (1 << index);

View file

@ -1050,7 +1050,7 @@ static void r300_render_draw_elements(struct vbuf_render* render,
CS_LOCALS(r300); CS_LOCALS(r300);
DBG(r300, DBG_DRAW, "r300: render_draw_elements (count: %d)\n", count); DBG(r300, DBG_DRAW, "r300: render_draw_elements (count: %d)\n", count);
u_upload_data(r300->uploader, 0, count * 2, 4, indices, u_upload_data_ref(r300->uploader, 0, count * 2, 4, indices,
&index_buffer_offset, &index_buffer); &index_buffer_offset, &index_buffer);
if (!index_buffer) { if (!index_buffer) {
return; return;

View file

@ -21,7 +21,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
switch (*index_size) { switch (*index_size) {
case 1: case 1:
*out_buffer = NULL; *out_buffer = NULL;
u_upload_alloc(r300->uploader, 0, count * 2, 4, u_upload_alloc_ref(r300->uploader, 0, count * 2, 4,
&out_offset, out_buffer, ptr); &out_offset, out_buffer, ptr);
util_shorten_ubyte_elts_to_userptr( util_shorten_ubyte_elts_to_userptr(
@ -35,7 +35,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
case 2: case 2:
if (index_offset) { if (index_offset) {
*out_buffer = NULL; *out_buffer = NULL;
u_upload_alloc(r300->uploader, 0, count * 2, 4, u_upload_alloc_ref(r300->uploader, 0, count * 2, 4,
&out_offset, out_buffer, ptr); &out_offset, out_buffer, ptr);
util_rebuild_ushort_elts_to_userptr(&r300->context, info, util_rebuild_ushort_elts_to_userptr(&r300->context, info,
@ -50,7 +50,7 @@ void r300_translate_index_buffer(struct r300_context *r300,
case 4: case 4:
if (index_offset) { if (index_offset) {
*out_buffer = NULL; *out_buffer = NULL;
u_upload_alloc(r300->uploader, 0, count * 4, 4, u_upload_alloc_ref(r300->uploader, 0, count * 4, 4,
&out_offset, out_buffer, ptr); &out_offset, out_buffer, ptr);
util_rebuild_uint_elts_to_userptr(&r300->context, info, util_rebuild_uint_elts_to_userptr(&r300->context, info,

View file

@ -22,7 +22,7 @@ void r300_upload_index_buffer(struct r300_context *r300,
*index_buffer = NULL; *index_buffer = NULL;
u_upload_data(r300->uploader, u_upload_data_ref(r300->uploader,
0, count * index_size, 4, 0, count * index_size, 4,
ptr + (*start * index_size), ptr + (*start * index_size),
&index_offset, &index_offset,

View file

@ -1792,14 +1792,13 @@ static void r300_set_vertex_buffers_hwtcl(struct pipe_context* pipe,
struct r300_context* r300 = r300_context(pipe); struct r300_context* r300 = r300_context(pipe);
util_set_vertex_buffers_count(r300->vertex_buffer, util_set_vertex_buffers_count(r300->vertex_buffer,
&r300->nr_vertex_buffers, buffers, count, &r300->nr_vertex_buffers, buffers, count);
true);
/* There must be at least one vertex buffer set, otherwise it locks up. */ /* There must be at least one vertex buffer set, otherwise it locks up. */
if (!r300->nr_vertex_buffers) { if (!r300->nr_vertex_buffers) {
util_set_vertex_buffers_count(r300->vertex_buffer, util_set_vertex_buffers_count(r300->vertex_buffer,
&r300->nr_vertex_buffers, &r300->nr_vertex_buffers,
&r300->dummy_vb, 1, false); &r300->dummy_vb, 1);
} }
r300->vertex_arrays_dirty = true; r300->vertex_arrays_dirty = true;
@ -1813,8 +1812,7 @@ static void r300_set_vertex_buffers_swtcl(struct pipe_context* pipe,
unsigned i; unsigned i;
util_set_vertex_buffers_count(r300->vertex_buffer, util_set_vertex_buffers_count(r300->vertex_buffer,
&r300->nr_vertex_buffers, buffers, count, &r300->nr_vertex_buffers, buffers, count);
true);
draw_set_vertex_buffers(r300->draw, count, buffers); draw_set_vertex_buffers(r300->draw, count, buffers);
if (!buffers) if (!buffers)
@ -2063,7 +2061,6 @@ static void r300_delete_vs_state(struct pipe_context* pipe, void* shader)
static void r300_set_constant_buffer(struct pipe_context *pipe, static void r300_set_constant_buffer(struct pipe_context *pipe,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
struct r300_context* r300 = r300_context(pipe); struct r300_context* r300 = r300_context(pipe);
@ -2178,6 +2175,7 @@ void r300_init_state_functions(struct r300_context* r300)
r300->context.create_sampler_view = r300_create_sampler_view; r300->context.create_sampler_view = r300_create_sampler_view;
r300->context.sampler_view_destroy = r300_sampler_view_destroy; r300->context.sampler_view_destroy = r300_sampler_view_destroy;
r300->context.sampler_view_release = u_default_sampler_view_release; r300->context.sampler_view_release = u_default_sampler_view_release;
r300->context.resource_release = u_default_resource_release;
r300->context.set_scissor_states = r300_set_scissor_states; r300->context.set_scissor_states = r300_set_scissor_states;

View file

@ -146,7 +146,7 @@ static void evergreen_cs_set_constant_buffer(struct r600_context *rctx,
cb.buffer = buffer; cb.buffer = buffer;
cb.user_buffer = NULL; cb.user_buffer = NULL;
rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_COMPUTE, cb_index, false, &cb); rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_COMPUTE, cb_index, &cb);
} }
/* We need to define these R600 registers here, because we can't include /* We need to define these R600 registers here, because we can't include

View file

@ -1760,7 +1760,7 @@ evergreen_emit_arb_shader_image_load_store_incomplete(struct r600_context *rctx,
assert(ARRAY_SIZE(immed_resource_words) == 8); assert(ARRAY_SIZE(immed_resource_words) == 8);
assert(ARRAY_SIZE(resource_words) == 8); assert(ARRAY_SIZE(resource_words) == 8);
u_upload_alloc(rctx->b.b.stream_uploader, 0, u_upload_alloc_ref(rctx->b.b.stream_uploader, 0,
4, 4,
256, 256,
&dummy_offset, &dummy_offset,
@ -4942,17 +4942,17 @@ void evergreen_setup_tess_constants(struct r600_context *rctx,
if (unlikely(vertexid)) if (unlikely(vertexid))
rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_VERTEX, rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_VERTEX,
R600_LDS_INFO_CONST_BUFFER, false, R600_LDS_INFO_CONST_BUFFER,
&rctx->lds_constbuf_pipe); &rctx->lds_constbuf_pipe);
else else
rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_VERTEX, rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_VERTEX,
R600_LDS_INFO_CONST_BUFFER, false, R600_LDS_INFO_CONST_BUFFER,
NULL); NULL);
rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_CTRL, rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_CTRL,
R600_LDS_INFO_CONST_BUFFER, false, NULL); R600_LDS_INFO_CONST_BUFFER, NULL);
rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_EVAL, rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_EVAL,
R600_LDS_INFO_CONST_BUFFER, false, NULL); R600_LDS_INFO_CONST_BUFFER, NULL);
return; return;
} }
@ -5009,13 +5009,13 @@ void evergreen_setup_tess_constants(struct r600_context *rctx,
rctx->last_num_tcs_input_cp = num_tcs_input_cp; rctx->last_num_tcs_input_cp = num_tcs_input_cp;
rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_VERTEX, rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_VERTEX,
R600_LDS_INFO_CONST_BUFFER, false, R600_LDS_INFO_CONST_BUFFER,
&rctx->lds_constbuf_pipe); &rctx->lds_constbuf_pipe);
rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_CTRL, rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_CTRL,
R600_LDS_INFO_CONST_BUFFER, false, R600_LDS_INFO_CONST_BUFFER,
&rctx->lds_constbuf_pipe); &rctx->lds_constbuf_pipe);
rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_EVAL, rctx->b.b.set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_EVAL,
R600_LDS_INFO_CONST_BUFFER, true, R600_LDS_INFO_CONST_BUFFER,
&rctx->lds_constbuf_pipe); &rctx->lds_constbuf_pipe);
} }

View file

@ -807,7 +807,7 @@ static void r600_blitter_clear_buffer(struct r600_context *rctx,
return; return;
} }
u_upload_data(pipe->stream_uploader, 0, num_channels*4, 4, clear_value, u_upload_data_ref(pipe->stream_uploader, 0, num_channels*4, 4, clear_value,
&vb.buffer_offset, &vb.buffer.resource); &vb.buffer_offset, &vb.buffer.resource);
if (!vb.buffer.resource) if (!vb.buffer.resource)
goto out; goto out;

View file

@ -374,7 +374,7 @@ void *r600_buffer_transfer_map(struct pipe_context *ctx,
unsigned offset; unsigned offset;
struct r600_resource *staging = NULL; struct r600_resource *staging = NULL;
u_upload_alloc(ctx->stream_uploader, 0, u_upload_alloc_ref(ctx->stream_uploader, 0,
box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT), box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),
rctx->screen->info.tcc_cache_line_size, rctx->screen->info.tcc_cache_line_size,
&offset, (struct pipe_resource**)&staging, &offset, (struct pipe_resource**)&staging,

View file

@ -53,7 +53,7 @@ static void r600_destroy_context(struct pipe_context *context)
if (rctx->append_fence) if (rctx->append_fence)
pipe_resource_reference((struct pipe_resource**)&rctx->append_fence, NULL); pipe_resource_reference((struct pipe_resource**)&rctx->append_fence, NULL);
for (sh = 0; sh < MESA_SHADER_STAGES; sh++) { for (sh = 0; sh < MESA_SHADER_STAGES; sh++) {
rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, false, NULL); rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, NULL);
free(rctx->driver_consts[sh].constants); free(rctx->driver_consts[sh].constants);
} }
@ -85,7 +85,7 @@ static void r600_destroy_context(struct pipe_context *context)
for (sh = 0; sh < MESA_SHADER_STAGES; ++sh) for (sh = 0; sh < MESA_SHADER_STAGES; ++sh)
for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; ++i) for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; ++i)
rctx->b.b.set_constant_buffer(context, sh, i, false, NULL); rctx->b.b.set_constant_buffer(context, sh, i, NULL);
if (rctx->blitter) { if (rctx->blitter) {
util_blitter_destroy(rctx->blitter); util_blitter_destroy(rctx->blitter);

View file

@ -158,7 +158,7 @@ void r600_draw_rectangle(struct blitter_context *blitter,
/* Upload vertices. The hw rectangle has only 3 vertices, /* Upload vertices. The hw rectangle has only 3 vertices,
* The 4th one is derived from the first 3. * The 4th one is derived from the first 3.
* The vertex specification should match u_blitter's vertex element state. */ * The vertex specification should match u_blitter's vertex element state. */
u_upload_alloc(rctx->b.stream_uploader, 0, sizeof(float) * 24, u_upload_alloc_ref(rctx->b.stream_uploader, 0, sizeof(float) * 24,
rctx->screen->info.tcc_cache_line_size, rctx->screen->info.tcc_cache_line_size,
&offset, &buf, (void**)&vb); &offset, &buf, (void**)&vb);
if (!buf) if (!buf)

View file

@ -1579,7 +1579,7 @@ static void r600_restore_qbo_state(struct r600_common_context *rctx,
struct r600_qbo_state *st) struct r600_qbo_state *st)
{ {
rctx->b.bind_compute_state(&rctx->b, st->saved_compute); rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
rctx->b.set_constant_buffer(&rctx->b, MESA_SHADER_COMPUTE, 0, true, &st->saved_const0); rctx->b.set_constant_buffer(&rctx->b, MESA_SHADER_COMPUTE, 0, &st->saved_const0);
rctx->b.set_shader_buffers(&rctx->b, MESA_SHADER_COMPUTE, 0, 3, st->saved_ssbo, ~0); rctx->b.set_shader_buffers(&rctx->b, MESA_SHADER_COMPUTE, 0, 3, st->saved_ssbo, ~0);
for (unsigned i = 0; i < 3; ++i) for (unsigned i = 0; i < 3; ++i)
pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL); pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
@ -1712,7 +1712,7 @@ static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
} else } else
consts.buffer_offset = 0; consts.buffer_offset = 0;
rctx->b.set_constant_buffer(&rctx->b, MESA_SHADER_COMPUTE, 0, false, &constant_buffer); rctx->b.set_constant_buffer(&rctx->b, MESA_SHADER_COMPUTE, 0, &constant_buffer);
rctx->b.set_shader_buffers(&rctx->b, MESA_SHADER_COMPUTE, 0, 3, ssbo, ~0); rctx->b.set_shader_buffers(&rctx->b, MESA_SHADER_COMPUTE, 0, 3, ssbo, ~0);

View file

@ -589,8 +589,7 @@ static void r600_set_vertex_buffers(struct pipe_context *ctx,
(vb[i].is_user_buffer != input[i].is_user_buffer))) { (vb[i].is_user_buffer != input[i].is_user_buffer))) {
if (input[i].buffer.resource) { if (input[i].buffer.resource) {
vb[i].buffer_offset = input[i].buffer_offset; vb[i].buffer_offset = input[i].buffer_offset;
pipe_resource_reference(&vb[i].buffer.resource, NULL); pipe_resource_reference(&vb[i].buffer.resource, input[i].buffer.resource);
vb[i].buffer.resource = input[i].buffer.resource;
new_buffer_mask |= 1 << i; new_buffer_mask |= 1 << i;
r600_context_add_resource_size(ctx, input[i].buffer.resource); r600_context_add_resource_size(ctx, input[i].buffer.resource);
} else { } else {
@ -598,8 +597,7 @@ static void r600_set_vertex_buffers(struct pipe_context *ctx,
disable_mask |= 1 << i; disable_mask |= 1 << i;
} }
} else if (input[i].buffer.resource) { } else if (input[i].buffer.resource) {
pipe_resource_reference(&vb[i].buffer.resource, NULL); pipe_resource_reference(&vb[i].buffer.resource, input[i].buffer.resource);
vb[i].buffer.resource = input[i].buffer.resource;
} }
} }
@ -1240,7 +1238,6 @@ void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf
static void r600_set_constant_buffer(struct pipe_context *ctx, static void r600_set_constant_buffer(struct pipe_context *ctx,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *input) const struct pipe_constant_buffer *input)
{ {
struct r600_context *rctx = (struct r600_context *)ctx; struct r600_context *rctx = (struct r600_context *)ctx;
@ -1278,11 +1275,11 @@ static void r600_set_constant_buffer(struct pipe_context *ctx,
tmpPtr[i] = util_cpu_to_le32(((uint32_t *)ptr)[i]); tmpPtr[i] = util_cpu_to_le32(((uint32_t *)ptr)[i]);
} }
u_upload_data(ctx->stream_uploader, 0, size, 256, u_upload_data_ref(ctx->stream_uploader, 0, size, 256,
tmpPtr, &cb->buffer_offset, &cb->buffer); tmpPtr, &cb->buffer_offset, &cb->buffer);
free(tmpPtr); free(tmpPtr);
} else { } else {
u_upload_data(ctx->stream_uploader, 0, u_upload_data_ref(ctx->stream_uploader, 0,
input->buffer_size, 256, ptr, input->buffer_size, 256, ptr,
&cb->buffer_offset, &cb->buffer); &cb->buffer_offset, &cb->buffer);
} }
@ -1291,12 +1288,7 @@ static void r600_set_constant_buffer(struct pipe_context *ctx,
} else { } else {
/* Setup the hw buffer. */ /* Setup the hw buffer. */
cb->buffer_offset = input->buffer_offset; cb->buffer_offset = input->buffer_offset;
if (take_ownership) {
pipe_resource_reference(&cb->buffer, NULL);
cb->buffer = input->buffer;
} else {
pipe_resource_reference(&cb->buffer, input->buffer); pipe_resource_reference(&cb->buffer, input->buffer);
}
r600_context_add_resource_size(ctx, input->buffer); r600_context_add_resource_size(ctx, input->buffer);
} }
@ -1410,7 +1402,7 @@ void r600_update_driver_const_buffers(struct r600_context *rctx, bool compute_on
cb.user_buffer = ptr; cb.user_buffer = ptr;
cb.buffer_offset = 0; cb.buffer_offset = 0;
cb.buffer_size = size; cb.buffer_size = size;
rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, false, &cb); rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, &cb);
pipe_resource_reference(&cb.buffer, NULL); pipe_resource_reference(&cb.buffer, NULL);
} }
} }
@ -1599,21 +1591,21 @@ static void update_gs_block_state(struct r600_context *rctx, unsigned enable)
if (enable) { if (enable) {
r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_GEOMETRY, r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_GEOMETRY,
R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.esgs_ring); R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.esgs_ring);
if (rctx->tes_shader) { if (rctx->tes_shader) {
r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_EVAL, r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_EVAL,
R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.gsvs_ring); R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.gsvs_ring);
} else { } else {
r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_VERTEX, r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_VERTEX,
R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.gsvs_ring); R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.gsvs_ring);
} }
} else { } else {
r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_GEOMETRY, r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_GEOMETRY,
R600_GS_RING_CONST_BUFFER, false, NULL); R600_GS_RING_CONST_BUFFER, NULL);
r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_VERTEX, r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_VERTEX,
R600_GS_RING_CONST_BUFFER, false, NULL); R600_GS_RING_CONST_BUFFER, NULL);
r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_EVAL, r600_set_constant_buffer(&rctx->b.b, MESA_SHADER_TESS_EVAL,
R600_GS_RING_CONST_BUFFER, false, NULL); R600_GS_RING_CONST_BUFFER, NULL);
} }
} }
} }
@ -2298,7 +2290,7 @@ r600_indirect_parameters_init(struct r600_context *rctx,
indirect_parameters->counter = 0; indirect_parameters->counter = 0;
indirect_parameters->internal = NULL; indirect_parameters->internal = NULL;
u_upload_alloc(rctx->b.b.stream_uploader, 0, u_upload_alloc_ref(rctx->b.b.stream_uploader, 0,
sizeof(struct r600_indirect_gpu_internal), sizeof(struct r600_indirect_gpu_internal),
256, 256,
&indirect_parameters->internal_offset, &indirect_parameters->internal_offset,
@ -2630,7 +2622,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
indexbuf->width0 - index_offset; indexbuf->width0 - index_offset;
const unsigned out_width = count * out_size; const unsigned out_width = count * out_size;
u_upload_alloc(ctx->stream_uploader, start, out_width, u_upload_alloc_ref(ctx->stream_uploader, start, out_width,
256, &out_offset, &out_buffer, &ptr); 256, &out_offset, &out_buffer, &ptr);
if (unlikely(!ptr)) if (unlikely(!ptr))
@ -2655,7 +2647,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
draws[0].count*index_size > 20)) { draws[0].count*index_size > 20)) {
unsigned start_offset = draws[0].start * index_size; unsigned start_offset = draws[0].start * index_size;
indexbuf = NULL; indexbuf = NULL;
u_upload_data(ctx->stream_uploader, 0, u_upload_data_ref(ctx->stream_uploader, 0,
draws[0].count * index_size, 256, draws[0].count * index_size, 256,
(char*)info->index.user + start_offset, (char*)info->index.user + start_offset,
&index_offset, &indexbuf); &index_offset, &indexbuf);
@ -3966,6 +3958,7 @@ void r600_init_common_state_functions(struct r600_context *rctx)
rctx->b.b.set_sampler_views = r600_set_sampler_views; rctx->b.b.set_sampler_views = r600_set_sampler_views;
rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy; rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy;
rctx->b.b.sampler_view_release = u_default_sampler_view_release; rctx->b.b.sampler_view_release = u_default_sampler_view_release;
rctx->b.b.resource_release = u_default_resource_release;
rctx->b.b.memory_barrier = r600_memory_barrier; rctx->b.b.memory_barrier = r600_memory_barrier;
rctx->b.b.texture_barrier = r600_texture_barrier; rctx->b.b.texture_barrier = r600_texture_barrier;
rctx->b.b.set_stream_output_targets = r600_set_streamout_targets; rctx->b.b.set_stream_output_targets = r600_set_streamout_targets;

View file

@ -374,7 +374,7 @@ static void gfx11_sh_query_get_result_resource(struct si_context *sctx, struct s
ssbo[2].buffer_size = is_result_64bit ? 8 : 4; ssbo[2].buffer_size = is_result_64bit ? 8 : 4;
} }
sctx->b.set_constant_buffer(&sctx->b, MESA_SHADER_COMPUTE, 0, false, &constant_buffer); sctx->b.set_constant_buffer(&sctx->b, MESA_SHADER_COMPUTE, 0, &constant_buffer);
if (flags & PIPE_QUERY_WAIT) { if (flags & PIPE_QUERY_WAIT) {
uint64_t va; uint64_t va;

View file

@ -448,7 +448,7 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_resour
else else
uploader = sctx->b.stream_uploader; uploader = sctx->b.stream_uploader;
u_upload_alloc(uploader, 0, box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT), u_upload_alloc_ref(uploader, 0, box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT),
sctx->screen->info.tcc_cache_line_size, &offset, sctx->screen->info.tcc_cache_line_size, &offset,
(struct pipe_resource **)&staging, (void **)&data); (struct pipe_resource **)&staging, (void **)&data);

View file

@ -125,7 +125,7 @@ static void si_upload_descriptors(struct si_context *sctx, struct si_descriptors
uint32_t *ptr; uint32_t *ptr;
unsigned buffer_offset; unsigned buffer_offset;
u_upload_alloc(sctx->b.const_uploader, first_slot_offset, upload_size, u_upload_alloc_ref(sctx->b.const_uploader, first_slot_offset, upload_size,
si_optimal_tcc_alignment(sctx, upload_size), &buffer_offset, si_optimal_tcc_alignment(sctx, upload_size), &buffer_offset,
(struct pipe_resource **)&desc->buffer, (void **)&ptr); (struct pipe_resource **)&desc->buffer, (void **)&ptr);
if (!desc->buffer) { if (!desc->buffer) {
@ -1117,14 +1117,14 @@ static void si_upload_const_buffer(struct si_context *sctx, struct si_resource *
{ {
void *tmp; void *tmp;
u_upload_alloc(sctx->b.const_uploader, 0, size, si_optimal_tcc_alignment(sctx, size), u_upload_alloc_ref(sctx->b.const_uploader, 0, size, si_optimal_tcc_alignment(sctx, size),
const_offset, (struct pipe_resource **)buf, &tmp); const_offset, (struct pipe_resource **)buf, &tmp);
if (*buf) if (*buf)
util_memcpy_cpu_to_le32(tmp, ptr, size); util_memcpy_cpu_to_le32(tmp, ptr, size);
} }
static void si_set_constant_buffer(struct si_context *sctx, struct si_buffer_resources *buffers, static void si_set_constant_buffer(struct si_context *sctx, struct si_buffer_resources *buffers,
unsigned descriptors_idx, uint slot, bool take_ownership, unsigned descriptors_idx, uint slot,
const struct pipe_constant_buffer *input) const struct pipe_constant_buffer *input)
{ {
struct si_descriptors *descs = &sctx->descriptors[descriptors_idx]; struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
@ -1146,15 +1146,11 @@ static void si_set_constant_buffer(struct si_context *sctx, struct si_buffer_res
input->buffer_size, &buffer_offset); input->buffer_size, &buffer_offset);
if (!buffer) { if (!buffer) {
/* Just unbind on failure. */ /* Just unbind on failure. */
si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, false, NULL); si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, NULL);
return; return;
} }
} else {
if (take_ownership) {
buffer = input->buffer;
} else { } else {
pipe_resource_reference(&buffer, input->buffer); pipe_resource_reference(&buffer, input->buffer);
}
buffer_offset = input->buffer_offset; buffer_offset = input->buffer_offset;
} }
@ -1212,7 +1208,7 @@ void si_invalidate_inlinable_uniforms(struct si_context *sctx, mesa_shader_stage
} }
static void si_pipe_set_constant_buffer(struct pipe_context *ctx, mesa_shader_stage shader, static void si_pipe_set_constant_buffer(struct pipe_context *ctx, mesa_shader_stage shader,
uint slot, bool take_ownership, uint slot,
const struct pipe_constant_buffer *input) const struct pipe_constant_buffer *input)
{ {
struct si_context *sctx = (struct si_context *)ctx; struct si_context *sctx = (struct si_context *)ctx;
@ -1237,7 +1233,7 @@ static void si_pipe_set_constant_buffer(struct pipe_context *ctx, mesa_shader_st
slot = si_get_constbuf_slot(slot); slot = si_get_constbuf_slot(slot);
si_set_constant_buffer(sctx, &sctx->const_and_shader_buffers[shader], si_set_constant_buffer(sctx, &sctx->const_and_shader_buffers[shader],
si_const_and_shader_buffer_descriptors_idx(shader), slot, si_const_and_shader_buffer_descriptors_idx(shader), slot,
take_ownership, input); input);
} }
static void si_set_inlinable_constants(struct pipe_context *ctx, static void si_set_inlinable_constants(struct pipe_context *ctx,
@ -1390,7 +1386,7 @@ void si_get_shader_buffers(struct si_context *sctx, mesa_shader_stage shader, ui
void si_set_internal_const_buffer(struct si_context *sctx, uint slot, void si_set_internal_const_buffer(struct si_context *sctx, uint slot,
const struct pipe_constant_buffer *input) const struct pipe_constant_buffer *input)
{ {
si_set_constant_buffer(sctx, &sctx->internal_bindings, SI_DESCS_INTERNAL, slot, false, input); si_set_constant_buffer(sctx, &sctx->internal_bindings, SI_DESCS_INTERNAL, slot, input);
} }
void si_set_internal_shader_buffer(struct si_context *sctx, uint slot, void si_set_internal_shader_buffer(struct si_context *sctx, uint slot,

View file

@ -248,7 +248,7 @@ static void si_fine_fence_set(struct si_context *ctx, struct si_fine_fence *fine
assert(util_bitcount(flags & (PIPE_FLUSH_TOP_OF_PIPE | PIPE_FLUSH_BOTTOM_OF_PIPE)) == 1); assert(util_bitcount(flags & (PIPE_FLUSH_TOP_OF_PIPE | PIPE_FLUSH_BOTTOM_OF_PIPE)) == 1);
/* Use cached system memory for the fence. */ /* Use cached system memory for the fence. */
u_upload_alloc(ctx->cached_gtt_allocator, 0, 4, 4, &fine->offset, u_upload_alloc_ref(ctx->cached_gtt_allocator, 0, 4, 4, &fine->offset,
(struct pipe_resource **)&fine->buf, (void **)&fence_ptr); (struct pipe_resource **)&fine->buf, (void **)&fence_ptr);
if (!fine->buf) if (!fine->buf)
return; return;

View file

@ -773,7 +773,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
unsigned start_shader = sctx->is_gfx_queue ? 0 : MESA_SHADER_COMPUTE; unsigned start_shader = sctx->is_gfx_queue ? 0 : MESA_SHADER_COMPUTE;
for (shader = start_shader; shader < SI_NUM_SHADERS; shader++) { for (shader = start_shader; shader < SI_NUM_SHADERS; shader++) {
for (i = 0; i < SI_NUM_CONST_BUFFERS; i++) { for (i = 0; i < SI_NUM_CONST_BUFFERS; i++) {
sctx->b.set_constant_buffer(&sctx->b, shader, i, false, &sctx->null_const_buf); sctx->b.set_constant_buffer(&sctx->b, shader, i, &sctx->null_const_buf);
} }
} }

View file

@ -1152,9 +1152,9 @@ struct si_context {
uint16_t vertex_buffer_unaligned; /* bitmask of not dword-aligned buffers */ uint16_t vertex_buffer_unaligned; /* bitmask of not dword-aligned buffers */
struct pipe_vertex_buffer vertex_buffer[SI_NUM_VERTEX_BUFFERS]; struct pipe_vertex_buffer vertex_buffer[SI_NUM_VERTEX_BUFFERS];
/* Even though we don't need this variable, u_upload_alloc has an optimization that skips /* Even though we don't need this variable, u_upload_alloc_ref has an optimization that skips
* reference counting when the new upload buffer is the same as the last one. So keep * reference counting when the new upload buffer is the same as the last one. So keep
* the last upload buffer here and always pass &last_const_upload_buffer to u_upload_alloc. * the last upload buffer here and always pass &last_const_upload_buffer to u_upload_alloc_ref.
*/ */
struct si_resource *last_const_upload_buffer; struct si_resource *last_const_upload_buffer;

View file

@ -1619,7 +1619,7 @@ static void si_query_hw_get_result_resource(struct si_context *sctx, struct si_q
params.start_offset += qbuf->results_end - query->result_size; params.start_offset += qbuf->results_end - query->result_size;
} }
sctx->b.set_constant_buffer(&sctx->b, MESA_SHADER_COMPUTE, 0, false, &constant_buffer); sctx->b.set_constant_buffer(&sctx->b, MESA_SHADER_COMPUTE, 0, &constant_buffer);
ssbo[0].buffer = &qbuf->buf->b.b; ssbo[0].buffer = &qbuf->buf->b.b;
ssbo[0].buffer_offset = params.start_offset; ssbo[0].buffer_offset = params.start_offset;

View file

@ -323,7 +323,7 @@ static void *pre_upload_binary(struct si_screen *sscreen, struct si_shader *shad
*upload_ctx = si_get_aux_context(&sscreen->aux_context.shader_upload); *upload_ctx = si_get_aux_context(&sscreen->aux_context.shader_upload);
void *ret; void *ret;
u_upload_alloc((*upload_ctx)->b.stream_uploader, 0, binary_size, 256, u_upload_alloc_ref((*upload_ctx)->b.stream_uploader, 0, binary_size, 256,
staging_offset, staging, &ret); staging_offset, staging, &ret);
if (!ret) if (!ret)
si_put_aux_context_flush(&sscreen->aux_context.shader_upload); si_put_aux_context_flush(&sscreen->aux_context.shader_upload);

View file

@ -1841,7 +1841,7 @@ void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st)
void si_restore_qbo_state(struct si_context *sctx, struct si_qbo_state *st) void si_restore_qbo_state(struct si_context *sctx, struct si_qbo_state *st)
{ {
sctx->b.set_constant_buffer(&sctx->b, MESA_SHADER_COMPUTE, 0, true, &st->saved_const0); sctx->b.set_constant_buffer(&sctx->b, MESA_SHADER_COMPUTE, 0, &st->saved_const0);
} }
static void si_emit_db_render_state(struct si_context *sctx, unsigned index) static void si_emit_db_render_state(struct si_context *sctx, unsigned index)
@ -4738,8 +4738,7 @@ static void si_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
dst->buffer_offset = src->buffer_offset; dst->buffer_offset = src->buffer_offset;
/* Only unreference bound vertex buffers. */ /* Only unreference bound vertex buffers. */
pipe_resource_reference(&dst->buffer.resource, NULL); pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
dst->buffer.resource = src->buffer.resource;
if (src->buffer_offset & 3) if (src->buffer_offset & 3)
unaligned |= BITFIELD_BIT(i); unaligned |= BITFIELD_BIT(i);
@ -4891,6 +4890,7 @@ void si_init_state_compute_functions(struct si_context *sctx)
sctx->b.create_sampler_view = si_create_sampler_view; sctx->b.create_sampler_view = si_create_sampler_view;
sctx->b.sampler_view_destroy = si_sampler_view_destroy; sctx->b.sampler_view_destroy = si_sampler_view_destroy;
sctx->b.sampler_view_release = u_default_sampler_view_release; sctx->b.sampler_view_release = u_default_sampler_view_release;
sctx->b.resource_release = u_default_resource_release;
} }
void si_init_state_functions(struct si_context *sctx) void si_init_state_functions(struct si_context *sctx)

View file

@ -1868,7 +1868,7 @@ static bool si_upload_and_prefetch_VB_descriptors(struct si_context *sctx,
/* Vertex buffer descriptors are the only ones which are uploaded directly /* Vertex buffer descriptors are the only ones which are uploaded directly
* and don't go through si_upload_graphics_shader_descriptors. * and don't go through si_upload_graphics_shader_descriptors.
*/ */
u_upload_alloc(sctx->b.const_uploader, 0, alloc_size, u_upload_alloc_ref(sctx->b.const_uploader, 0, alloc_size,
si_optimal_tcc_alignment(sctx, alloc_size), &offset, si_optimal_tcc_alignment(sctx, alloc_size), &offset,
(struct pipe_resource **)&sctx->last_const_upload_buffer, (void **)&ptr); (struct pipe_resource **)&sctx->last_const_upload_buffer, (void **)&ptr);
if (!sctx->last_const_upload_buffer) if (!sctx->last_const_upload_buffer)
@ -2184,7 +2184,7 @@ static void si_draw(struct pipe_context *ctx,
start_offset = start * 2; start_offset = start * 2;
size = count * 2; size = count * 2;
/* Don't use u_upload_alloc because we don't need to map the buffer for CPU access. */ /* Don't use u_upload_alloc_ref because we don't need to map the buffer for CPU access. */
indexbuf = pipe_buffer_create(&sctx->screen->b, 0, PIPE_USAGE_IMMUTABLE, start_offset + size); indexbuf = pipe_buffer_create(&sctx->screen->b, 0, PIPE_USAGE_IMMUTABLE, start_offset + size);
if (unlikely(!indexbuf)) if (unlikely(!indexbuf))
return; return;
@ -2208,7 +2208,7 @@ static void si_draw(struct pipe_context *ctx,
start_offset = draws[0].start * index_size; start_offset = draws[0].start * index_size;
indexbuf = NULL; indexbuf = NULL;
u_upload_data(ctx->stream_uploader, start_offset, draws[0].count * index_size, u_upload_data_ref(ctx->stream_uploader, start_offset, draws[0].count * index_size,
sctx->screen->info.tcc_cache_line_size, sctx->screen->info.tcc_cache_line_size,
(char *)info->index.user + start_offset, &index_offset, &indexbuf); (char *)info->index.user + start_offset, &index_offset, &indexbuf);
if (unlikely(!indexbuf)) if (unlikely(!indexbuf))

View file

@ -86,7 +86,7 @@ static void si_emit_cull_state(struct si_context *sctx, unsigned index)
memcmp(&info, &sctx->last_small_prim_cull_info, sizeof(info))) { memcmp(&info, &sctx->last_small_prim_cull_info, sizeof(info))) {
unsigned offset = 0; unsigned offset = 0;
u_upload_data(sctx->b.const_uploader, 0, sizeof(info), u_upload_data_ref(sctx->b.const_uploader, 0, sizeof(info),
si_optimal_tcc_alignment(sctx, sizeof(info)), &info, &offset, si_optimal_tcc_alignment(sctx, sizeof(info)), &info, &offset,
(struct pipe_resource **)&sctx->small_prim_cull_info_buf); (struct pipe_resource **)&sctx->small_prim_cull_info_buf);

View file

@ -350,4 +350,5 @@ softpipe_init_sampler_funcs(struct pipe_context *pipe)
pipe->set_sampler_views = softpipe_set_sampler_views; pipe->set_sampler_views = softpipe_set_sampler_views;
pipe->sampler_view_destroy = softpipe_sampler_view_destroy; pipe->sampler_view_destroy = softpipe_sampler_view_destroy;
pipe->sampler_view_release = u_default_sampler_view_release; pipe->sampler_view_release = u_default_sampler_view_release;
pipe->resource_release = u_default_resource_release;
} }

View file

@ -357,7 +357,6 @@ softpipe_delete_gs_state(struct pipe_context *pipe, void *gs)
static void static void
softpipe_set_constant_buffer(struct pipe_context *pipe, softpipe_set_constant_buffer(struct pipe_context *pipe,
mesa_shader_stage shader, uint index, mesa_shader_stage shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *cb) const struct pipe_constant_buffer *cb)
{ {
struct softpipe_context *softpipe = softpipe_context(pipe); struct softpipe_context *softpipe = softpipe_context(pipe);
@ -381,13 +380,7 @@ softpipe_set_constant_buffer(struct pipe_context *pipe,
draw_flush(softpipe->draw); draw_flush(softpipe->draw);
/* note: reference counting */
if (take_ownership) {
pipe_resource_reference(&softpipe->constants[shader][index], NULL);
softpipe->constants[shader][index] = constants;
} else {
pipe_resource_reference(&softpipe->constants[shader][index], constants); pipe_resource_reference(&softpipe->constants[shader][index], constants);
}
if (shader == MESA_SHADER_VERTEX || shader == MESA_SHADER_GEOMETRY) { if (shader == MESA_SHADER_VERTEX || shader == MESA_SHADER_GEOMETRY) {
draw_set_mapped_constant_buffer(softpipe->draw, shader, index, data, size); draw_set_mapped_constant_buffer(softpipe->draw, shader, index, data, size);

View file

@ -89,7 +89,7 @@ softpipe_set_vertex_buffers(struct pipe_context *pipe,
util_set_vertex_buffers_count(softpipe->vertex_buffer, util_set_vertex_buffers_count(softpipe->vertex_buffer,
&softpipe->num_vertex_buffers, &softpipe->num_vertex_buffers,
buffers, count, true); buffers, count);
softpipe->dirty |= SP_NEW_VERTEX; softpipe->dirty |= SP_NEW_VERTEX;

View file

@ -111,7 +111,7 @@ translate_indices(struct svga_hwtnl *hwtnl,
goto fail; goto fail;
} else { } else {
/* Allocate upload buffer space. Align to the index size. */ /* Allocate upload buffer space. Align to the index size. */
u_upload_alloc(pipe->stream_uploader, 0, size, gen_size, u_upload_alloc_ref(pipe->stream_uploader, 0, size, gen_size,
out_offset, &dst, &dst_map); out_offset, &dst, &dst_map);
if (!dst) if (!dst)
goto fail; goto fail;
@ -246,7 +246,7 @@ svga_hwtnl_draw_range_elements(struct svga_hwtnl *hwtnl,
unsigned index_offset; unsigned index_offset;
if (info->has_user_indices) { if (info->has_user_indices) {
u_upload_data(pipe->stream_uploader, 0, count * info->index_size, u_upload_data_ref(pipe->stream_uploader, 0, count * info->index_size,
info->index_size, (char *) info->index.user + start_offset, info->index_size, (char *) info->index.user + start_offset,
&index_offset, &index_buffer); &index_offset, &index_buffer);
u_upload_unmap(pipe->stream_uploader); u_upload_unmap(pipe->stream_uploader);

Some files were not shown because too many files have changed in this diff Show more