mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-23 09:00:10 +01:00
mesa/st: start moving bufferobject alloc/free/reference to main.
This moves these out of the state tracker code Reviewed-by: Marek Olšák <marek.olsak@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14133>
This commit is contained in:
parent
970daedb1d
commit
90cb1493b7
10 changed files with 85 additions and 103 deletions
|
|
@ -52,6 +52,7 @@
|
||||||
|
|
||||||
#include "state_tracker/st_cb_bufferobjects.h"
|
#include "state_tracker/st_cb_bufferobjects.h"
|
||||||
|
|
||||||
|
#include "util/u_inlines.h"
|
||||||
/* Debug flags */
|
/* Debug flags */
|
||||||
/*#define VBO_DEBUG*/
|
/*#define VBO_DEBUG*/
|
||||||
/*#define BOUNDS_CHECK*/
|
/*#define BOUNDS_CHECK*/
|
||||||
|
|
@ -453,6 +454,26 @@ convert_clear_buffer_data(struct gl_context *ctx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
mesa_buffer_object_release_buffer(struct gl_buffer_object *obj)
|
||||||
|
{
|
||||||
|
if (!obj->buffer)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Subtract the remaining private references before unreferencing
|
||||||
|
* the buffer. See the header file for explanation.
|
||||||
|
*/
|
||||||
|
if (obj->private_refcount) {
|
||||||
|
assert(obj->private_refcount > 0);
|
||||||
|
p_atomic_add(&obj->buffer->reference.count,
|
||||||
|
-obj->private_refcount);
|
||||||
|
obj->private_refcount = 0;
|
||||||
|
}
|
||||||
|
obj->private_refcount_ctx = NULL;
|
||||||
|
|
||||||
|
pipe_resource_reference(&obj->buffer, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Delete a buffer object.
|
* Delete a buffer object.
|
||||||
*
|
*
|
||||||
|
|
@ -462,7 +483,9 @@ void
|
||||||
_mesa_delete_buffer_object(struct gl_context *ctx,
|
_mesa_delete_buffer_object(struct gl_context *ctx,
|
||||||
struct gl_buffer_object *bufObj)
|
struct gl_buffer_object *bufObj)
|
||||||
{
|
{
|
||||||
(void) ctx;
|
assert(bufObj->RefCount == 0);
|
||||||
|
_mesa_buffer_unmap_all_mappings(ctx, bufObj);
|
||||||
|
mesa_buffer_object_release_buffer(bufObj);
|
||||||
|
|
||||||
vbo_delete_minmax_cache(bufObj);
|
vbo_delete_minmax_cache(bufObj);
|
||||||
align_free(bufObj->Data);
|
align_free(bufObj->Data);
|
||||||
|
|
@ -501,7 +524,7 @@ _mesa_reference_buffer_object_(struct gl_context *ctx,
|
||||||
*/
|
*/
|
||||||
if (shared_binding || ctx != oldObj->Ctx) {
|
if (shared_binding || ctx != oldObj->Ctx) {
|
||||||
if (p_atomic_dec_zero(&oldObj->RefCount)) {
|
if (p_atomic_dec_zero(&oldObj->RefCount)) {
|
||||||
st_bufferobj_free(ctx, oldObj);
|
_mesa_delete_buffer_object(ctx, oldObj);
|
||||||
}
|
}
|
||||||
} else if (ctx == oldObj->Ctx) {
|
} else if (ctx == oldObj->Ctx) {
|
||||||
/* Update the private ref count. */
|
/* Update the private ref count. */
|
||||||
|
|
@ -801,6 +824,16 @@ _mesa_free_buffer_objects( struct gl_context *ctx )
|
||||||
_mesa_HashUnlockMutex(ctx->Shared->BufferObjects);
|
_mesa_HashUnlockMutex(ctx->Shared->BufferObjects);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct gl_buffer_object *
|
||||||
|
_mesa_internal_buffer_object_alloc(struct gl_context *ctx, GLuint id)
|
||||||
|
{
|
||||||
|
struct gl_buffer_object *buf = CALLOC_STRUCT(gl_buffer_object);
|
||||||
|
if (!buf)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
_mesa_initialize_buffer_object(ctx, buf, id);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Create a buffer object that will be backed by an OpenGL buffer ID
|
* Create a buffer object that will be backed by an OpenGL buffer ID
|
||||||
* where the creating context will hold one global buffer reference instead
|
* where the creating context will hold one global buffer reference instead
|
||||||
|
|
@ -811,7 +844,7 @@ _mesa_free_buffer_objects( struct gl_context *ctx )
|
||||||
static struct gl_buffer_object *
|
static struct gl_buffer_object *
|
||||||
new_gl_buffer_object(struct gl_context *ctx, GLuint id)
|
new_gl_buffer_object(struct gl_context *ctx, GLuint id)
|
||||||
{
|
{
|
||||||
struct gl_buffer_object *buf = st_bufferobj_alloc(ctx, id);
|
struct gl_buffer_object *buf = _mesa_internal_buffer_object_alloc(ctx, id);
|
||||||
|
|
||||||
buf->Ctx = ctx;
|
buf->Ctx = ctx;
|
||||||
buf->RefCount++; /* global buffer reference held by the context */
|
buf->RefCount++; /* global buffer reference held by the context */
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,42 @@
|
||||||
* Internal functions
|
* Internal functions
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static inline struct pipe_resource *
|
||||||
|
_mesa_get_buffer_object_reference(struct gl_context *ctx, struct gl_buffer_object *obj)
|
||||||
|
{
|
||||||
|
if (unlikely(!obj))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
struct pipe_resource *buffer = obj->buffer;
|
||||||
|
|
||||||
|
if (unlikely(!buffer))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
/* Only one context is using the fast path. All other contexts must use
|
||||||
|
* the slow path.
|
||||||
|
*/
|
||||||
|
if (unlikely(obj->private_refcount_ctx != ctx)) {
|
||||||
|
p_atomic_inc(&buffer->reference.count);
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(obj->private_refcount <= 0)) {
|
||||||
|
assert(obj->private_refcount == 0);
|
||||||
|
|
||||||
|
/* This is the number of atomic increments we will skip. */
|
||||||
|
obj->private_refcount = 100000000;
|
||||||
|
p_atomic_add(&buffer->reference.count, obj->private_refcount);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return a buffer reference while decrementing the private refcount. */
|
||||||
|
obj->private_refcount--;
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct gl_buffer_object *
|
||||||
|
_mesa_internal_buffer_object_alloc(struct gl_context *ctx, GLuint id);
|
||||||
|
void
|
||||||
|
mesa_buffer_object_release_buffer(struct gl_buffer_object *obj);
|
||||||
|
|
||||||
/** Is the given buffer object currently mapped by the GL user? */
|
/** Is the given buffer object currently mapped by the GL user? */
|
||||||
static inline GLboolean
|
static inline GLboolean
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,8 @@ new_upload_buffer(struct gl_context *ctx, GLsizeiptr size, uint8_t **ptr)
|
||||||
{
|
{
|
||||||
assert(ctx->GLThread.SupportsBufferUploads);
|
assert(ctx->GLThread.SupportsBufferUploads);
|
||||||
|
|
||||||
struct gl_buffer_object *obj = st_bufferobj_alloc(ctx, -1);
|
struct gl_buffer_object *obj =
|
||||||
|
_mesa_internal_buffer_object_alloc(ctx, -1);
|
||||||
if (!obj)
|
if (!obj)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
|
@ -46,7 +47,7 @@ new_upload_buffer(struct gl_context *ctx, GLsizeiptr size, uint8_t **ptr)
|
||||||
GL_WRITE_ONLY,
|
GL_WRITE_ONLY,
|
||||||
GL_CLIENT_STORAGE_BIT | GL_MAP_WRITE_BIT,
|
GL_CLIENT_STORAGE_BIT | GL_MAP_WRITE_BIT,
|
||||||
obj)) {
|
obj)) {
|
||||||
st_bufferobj_free(ctx, obj);
|
_mesa_delete_buffer_object(ctx, obj);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -56,7 +57,7 @@ new_upload_buffer(struct gl_context *ctx, GLsizeiptr size, uint8_t **ptr)
|
||||||
MESA_MAP_THREAD_SAFE_BIT,
|
MESA_MAP_THREAD_SAFE_BIT,
|
||||||
obj, MAP_GLTHREAD);
|
obj, MAP_GLTHREAD);
|
||||||
if (!*ptr) {
|
if (!*ptr) {
|
||||||
st_bufferobj_free(ctx, obj);
|
_mesa_delete_buffer_object(ctx, obj);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -109,7 +109,7 @@ setup_arrays(struct st_context *st,
|
||||||
/* Set the vertex buffer. */
|
/* Set the vertex buffer. */
|
||||||
if (binding->BufferObj) {
|
if (binding->BufferObj) {
|
||||||
vbuffer[bufidx].buffer.resource =
|
vbuffer[bufidx].buffer.resource =
|
||||||
st_get_buffer_reference(ctx, binding->BufferObj);
|
_mesa_get_buffer_object_reference(ctx, binding->BufferObj);
|
||||||
vbuffer[bufidx].is_user_buffer = false;
|
vbuffer[bufidx].is_user_buffer = false;
|
||||||
vbuffer[bufidx].buffer_offset = binding->Offset +
|
vbuffer[bufidx].buffer_offset = binding->Offset +
|
||||||
attrib->RelativeOffset;
|
attrib->RelativeOffset;
|
||||||
|
|
@ -142,7 +142,7 @@ setup_arrays(struct st_context *st,
|
||||||
if (binding->BufferObj) {
|
if (binding->BufferObj) {
|
||||||
/* Set the binding */
|
/* Set the binding */
|
||||||
vbuffer[bufidx].buffer.resource =
|
vbuffer[bufidx].buffer.resource =
|
||||||
st_get_buffer_reference(ctx, binding->BufferObj);
|
_mesa_get_buffer_object_reference(ctx, binding->BufferObj);
|
||||||
vbuffer[bufidx].is_user_buffer = false;
|
vbuffer[bufidx].is_user_buffer = false;
|
||||||
vbuffer[bufidx].buffer_offset = _mesa_draw_binding_offset(binding);
|
vbuffer[bufidx].buffer_offset = _mesa_draw_binding_offset(binding);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -41,6 +41,7 @@
|
||||||
#include "util/u_upload_mgr.h"
|
#include "util/u_upload_mgr.h"
|
||||||
#include "cso_cache/cso_context.h"
|
#include "cso_cache/cso_context.h"
|
||||||
|
|
||||||
|
#include "main/bufferobj.h"
|
||||||
#include "st_debug.h"
|
#include "st_debug.h"
|
||||||
#include "st_context.h"
|
#include "st_context.h"
|
||||||
#include "st_atom.h"
|
#include "st_atom.h"
|
||||||
|
|
@ -272,7 +273,7 @@ st_bind_ubos(struct st_context *st, struct gl_program *prog,
|
||||||
binding =
|
binding =
|
||||||
&st->ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
|
&st->ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
|
||||||
|
|
||||||
cb.buffer = st_get_buffer_reference(st->ctx, binding->BufferObject);
|
cb.buffer = _mesa_get_buffer_object_reference(st->ctx, binding->BufferObject);
|
||||||
|
|
||||||
if (cb.buffer) {
|
if (cb.buffer) {
|
||||||
cb.buffer_offset = binding->Offset;
|
cb.buffer_offset = binding->Offset;
|
||||||
|
|
|
||||||
|
|
@ -50,61 +50,6 @@
|
||||||
#include "util/u_inlines.h"
|
#include "util/u_inlines.h"
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* There is some duplication between mesa's bufferobjects and our
|
|
||||||
* bufmgr buffers. Both have an integer handle and a hashtable to
|
|
||||||
* lookup an opaque structure. It would be nice if the handles and
|
|
||||||
* internal structure where somehow shared.
|
|
||||||
*/
|
|
||||||
struct gl_buffer_object *
|
|
||||||
st_bufferobj_alloc(struct gl_context *ctx, GLuint name)
|
|
||||||
{
|
|
||||||
struct gl_buffer_object *obj = ST_CALLOC_STRUCT(gl_buffer_object);
|
|
||||||
|
|
||||||
if (!obj)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
_mesa_initialize_buffer_object(ctx, obj, name);
|
|
||||||
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void
|
|
||||||
release_buffer(struct gl_buffer_object *obj)
|
|
||||||
{
|
|
||||||
if (!obj->buffer)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Subtract the remaining private references before unreferencing
|
|
||||||
* the buffer. See the header file for explanation.
|
|
||||||
*/
|
|
||||||
if (obj->private_refcount) {
|
|
||||||
assert(obj->private_refcount > 0);
|
|
||||||
p_atomic_add(&obj->buffer->reference.count,
|
|
||||||
-obj->private_refcount);
|
|
||||||
obj->private_refcount = 0;
|
|
||||||
}
|
|
||||||
obj->private_refcount_ctx = NULL;
|
|
||||||
|
|
||||||
pipe_resource_reference(&obj->buffer, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Deallocate/free a vertex/pixel buffer object.
|
|
||||||
* Called via glDeleteBuffersARB().
|
|
||||||
*/
|
|
||||||
void st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj)
|
|
||||||
{
|
|
||||||
assert(obj->RefCount == 0);
|
|
||||||
_mesa_buffer_unmap_all_mappings(ctx, obj);
|
|
||||||
release_buffer(obj);
|
|
||||||
_mesa_delete_buffer_object(ctx, obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Replace data in a subrange of buffer object. If the data range
|
* Replace data in a subrange of buffer object. If the data range
|
||||||
* specified by size + offset extends beyond the end of the buffer or
|
* specified by size + offset extends beyond the end of the buffer or
|
||||||
|
|
@ -343,7 +288,7 @@ bufferobj_data(struct gl_context *ctx,
|
||||||
obj->Usage = usage;
|
obj->Usage = usage;
|
||||||
obj->StorageFlags = storageFlags;
|
obj->StorageFlags = storageFlags;
|
||||||
|
|
||||||
release_buffer(obj);
|
mesa_buffer_object_release_buffer(obj);
|
||||||
|
|
||||||
unsigned bindings = buffer_target_to_bind_flags(target);
|
unsigned bindings = buffer_target_to_bind_flags(target);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -43,40 +43,6 @@ extern void
|
||||||
st_init_bufferobject_functions(struct pipe_screen *screen,
|
st_init_bufferobject_functions(struct pipe_screen *screen,
|
||||||
struct dd_function_table *functions);
|
struct dd_function_table *functions);
|
||||||
|
|
||||||
static inline struct pipe_resource *
|
|
||||||
st_get_buffer_reference(struct gl_context *ctx, struct gl_buffer_object *obj)
|
|
||||||
{
|
|
||||||
if (unlikely(!obj))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
struct pipe_resource *buffer = obj->buffer;
|
|
||||||
|
|
||||||
if (unlikely(!buffer))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/* Only one context is using the fast path. All other contexts must use
|
|
||||||
* the slow path.
|
|
||||||
*/
|
|
||||||
if (unlikely(obj->private_refcount_ctx != ctx)) {
|
|
||||||
p_atomic_inc(&buffer->reference.count);
|
|
||||||
return buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(obj->private_refcount <= 0)) {
|
|
||||||
assert(obj->private_refcount == 0);
|
|
||||||
|
|
||||||
/* This is the number of atomic increments we will skip. */
|
|
||||||
obj->private_refcount = 100000000;
|
|
||||||
p_atomic_add(&buffer->reference.count, obj->private_refcount);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return a buffer reference while decrementing the private refcount. */
|
|
||||||
obj->private_refcount--;
|
|
||||||
return buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct gl_buffer_object *st_bufferobj_alloc(struct gl_context *ctx, GLuint name);
|
|
||||||
void st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj);
|
|
||||||
void st_bufferobj_subdata(struct gl_context *ctx,
|
void st_bufferobj_subdata(struct gl_context *ctx,
|
||||||
GLintptrARB offset,
|
GLintptrARB offset,
|
||||||
GLsizeiptrARB size,
|
GLsizeiptrARB size,
|
||||||
|
|
|
||||||
|
|
@ -154,7 +154,7 @@ prepare_indexed_draw(/* pass both st and ctx to reduce dereferences */
|
||||||
* the threaded batch buffer.
|
* the threaded batch buffer.
|
||||||
*/
|
*/
|
||||||
info->index.resource =
|
info->index.resource =
|
||||||
st_get_buffer_reference(ctx, info->index.gl_bo);
|
_mesa_get_buffer_object_reference(ctx, info->index.gl_bo);
|
||||||
info->take_index_buffer_ownership = true;
|
info->take_index_buffer_ownership = true;
|
||||||
} else {
|
} else {
|
||||||
info->index.resource = info->index.gl_bo->buffer;
|
info->index.resource = info->index.gl_bo->buffer;
|
||||||
|
|
|
||||||
|
|
@ -1097,7 +1097,7 @@ vbo_exec_vtx_init(struct vbo_exec_context *exec)
|
||||||
{
|
{
|
||||||
struct gl_context *ctx = gl_context_from_vbo_exec(exec);
|
struct gl_context *ctx = gl_context_from_vbo_exec(exec);
|
||||||
|
|
||||||
exec->vtx.bufferobj = st_bufferobj_alloc(ctx, IMM_BUFFER_NAME);
|
exec->vtx.bufferobj = _mesa_internal_buffer_object_alloc(ctx, IMM_BUFFER_NAME);
|
||||||
|
|
||||||
exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
|
exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
|
||||||
vbo_reset_all_attr(exec);
|
vbo_reset_all_attr(exec);
|
||||||
|
|
|
||||||
|
|
@ -768,7 +768,7 @@ compile_vertex_list(struct gl_context *ctx)
|
||||||
if (total_bytes_needed > available_bytes) {
|
if (total_bytes_needed > available_bytes) {
|
||||||
if (save->current_bo)
|
if (save->current_bo)
|
||||||
_mesa_reference_buffer_object(ctx, &save->current_bo, NULL);
|
_mesa_reference_buffer_object(ctx, &save->current_bo, NULL);
|
||||||
save->current_bo = st_bufferobj_alloc(ctx, VBO_BUF_ID + 1);
|
save->current_bo = _mesa_internal_buffer_object_alloc(ctx, VBO_BUF_ID + 1);
|
||||||
bool success = st_bufferobj_data(ctx,
|
bool success = st_bufferobj_data(ctx,
|
||||||
GL_ELEMENT_ARRAY_BUFFER_ARB,
|
GL_ELEMENT_ARRAY_BUFFER_ARB,
|
||||||
MAX2(total_bytes_needed, VBO_SAVE_BUFFER_SIZE),
|
MAX2(total_bytes_needed, VBO_SAVE_BUFFER_SIZE),
|
||||||
|
|
@ -901,7 +901,7 @@ end:
|
||||||
node->draw_begins = node->cold->prims[0].begin;
|
node->draw_begins = node->cold->prims[0].begin;
|
||||||
|
|
||||||
if (!save->current_bo) {
|
if (!save->current_bo) {
|
||||||
save->current_bo = st_bufferobj_alloc(ctx, VBO_BUF_ID + 1);
|
save->current_bo = _mesa_internal_buffer_object_alloc(ctx, VBO_BUF_ID + 1);
|
||||||
bool success = st_bufferobj_data(ctx,
|
bool success = st_bufferobj_data(ctx,
|
||||||
GL_ELEMENT_ARRAY_BUFFER_ARB,
|
GL_ELEMENT_ARRAY_BUFFER_ARB,
|
||||||
VBO_SAVE_BUFFER_SIZE,
|
VBO_SAVE_BUFFER_SIZE,
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue