mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-06 09:28:07 +02:00
pb: add void * for flush ctx to mapping functions
If the buffer we are attempting to map is referenced by the unsubmitted command stream for this context, we need to flush the command stream, however to do that we need to be able to access the context at the lowest level map function, currently we set the buffer in the toplevel map, but this racy between context. (we probably have a lot more issues than that.) I'll look into a proper solution as suggested by jrfonseca when I get some time.
This commit is contained in:
parent
95555ed03e
commit
b5fcf0c8e0
11 changed files with 24 additions and 28 deletions
|
|
@ -130,7 +130,7 @@ struct pb_vtbl
|
|||
* flags is bitmask of PB_USAGE_CPU_READ/WRITE.
|
||||
*/
|
||||
void *(*map)( struct pb_buffer *buf,
|
||||
unsigned flags );
|
||||
unsigned flags, void *flush_ctx );
|
||||
|
||||
void (*unmap)( struct pb_buffer *buf );
|
||||
|
||||
|
|
@ -164,13 +164,13 @@ struct pb_vtbl
|
|||
*/
|
||||
static INLINE void *
|
||||
pb_map(struct pb_buffer *buf,
|
||||
unsigned flags)
|
||||
unsigned flags, void *flush_ctx)
|
||||
{
|
||||
assert(buf);
|
||||
if(!buf)
|
||||
return NULL;
|
||||
assert(pipe_is_referenced(&buf->base.reference));
|
||||
return buf->vtbl->map(buf, flags);
|
||||
return buf->vtbl->map(buf, flags, flush_ctx);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -624,7 +624,7 @@ fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
|
|||
assert(fenced_buf->data);
|
||||
assert(fenced_buf->buffer);
|
||||
|
||||
map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE);
|
||||
map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE, NULL);
|
||||
if(!map)
|
||||
return PIPE_ERROR;
|
||||
|
||||
|
|
@ -644,7 +644,7 @@ fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
|
|||
assert(fenced_buf->data);
|
||||
assert(fenced_buf->buffer);
|
||||
|
||||
map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_READ);
|
||||
map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_READ, NULL);
|
||||
if(!map)
|
||||
return PIPE_ERROR;
|
||||
|
||||
|
|
@ -674,7 +674,7 @@ fenced_buffer_destroy(struct pb_buffer *buf)
|
|||
|
||||
static void *
|
||||
fenced_buffer_map(struct pb_buffer *buf,
|
||||
unsigned flags)
|
||||
unsigned flags, void *flush_ctx)
|
||||
{
|
||||
struct fenced_buffer *fenced_buf = fenced_buffer(buf);
|
||||
struct fenced_manager *fenced_mgr = fenced_buf->mgr;
|
||||
|
|
@ -712,7 +712,7 @@ fenced_buffer_map(struct pb_buffer *buf,
|
|||
}
|
||||
|
||||
if(fenced_buf->buffer) {
|
||||
map = pb_map(fenced_buf->buffer, flags);
|
||||
map = pb_map(fenced_buf->buffer, flags, flush_ctx);
|
||||
}
|
||||
else {
|
||||
assert(fenced_buf->data);
|
||||
|
|
|
|||
|
|
@ -167,10 +167,10 @@ pb_cache_buffer_destroy(struct pb_buffer *_buf)
|
|||
|
||||
static void *
|
||||
pb_cache_buffer_map(struct pb_buffer *_buf,
|
||||
unsigned flags)
|
||||
unsigned flags, void *flush_ctx)
|
||||
{
|
||||
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
|
||||
return pb_map(buf->buffer, flags);
|
||||
return pb_map(buf->buffer, flags, flush_ctx);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -242,7 +242,7 @@ pb_cache_is_buffer_compat(struct pb_cache_buffer *buf,
|
|||
if(!pb_check_usage(desc->usage, buf->base.base.usage))
|
||||
return FALSE;
|
||||
|
||||
map = pb_map(buf->buffer, PB_USAGE_DONTBLOCK);
|
||||
map = pb_map(buf->buffer, PB_USAGE_DONTBLOCK, NULL);
|
||||
if (!map) {
|
||||
return FALSE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ pb_debug_buffer_check(struct pb_debug_buffer *buf)
|
|||
|
||||
map = pb_map(buf->buffer,
|
||||
PB_USAGE_CPU_READ |
|
||||
PB_USAGE_UNSYNCHRONIZED);
|
||||
PB_USAGE_UNSYNCHRONIZED, NULL);
|
||||
assert(map);
|
||||
if(map) {
|
||||
boolean underflow, overflow;
|
||||
|
|
@ -247,14 +247,14 @@ pb_debug_buffer_destroy(struct pb_buffer *_buf)
|
|||
|
||||
static void *
|
||||
pb_debug_buffer_map(struct pb_buffer *_buf,
|
||||
unsigned flags)
|
||||
unsigned flags, void *flush_ctx)
|
||||
{
|
||||
struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
|
||||
void *map;
|
||||
|
||||
pb_debug_buffer_check(buf);
|
||||
|
||||
map = pb_map(buf->buffer, flags);
|
||||
map = pb_map(buf->buffer, flags, flush_ctx);
|
||||
if(!map)
|
||||
return NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -269,7 +269,7 @@ mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
|
|||
|
||||
mm->map = pb_map(mm->buffer,
|
||||
PB_USAGE_CPU_READ |
|
||||
PB_USAGE_CPU_WRITE);
|
||||
PB_USAGE_CPU_WRITE, NULL);
|
||||
if(!mm->map)
|
||||
goto failure;
|
||||
|
||||
|
|
|
|||
|
|
@ -103,13 +103,13 @@ pb_ondemand_buffer_destroy(struct pb_buffer *_buf)
|
|||
|
||||
static void *
|
||||
pb_ondemand_buffer_map(struct pb_buffer *_buf,
|
||||
unsigned flags)
|
||||
unsigned flags, void *flush_ctx)
|
||||
{
|
||||
struct pb_ondemand_buffer *buf = pb_ondemand_buffer(_buf);
|
||||
|
||||
if(buf->buffer) {
|
||||
assert(!buf->data);
|
||||
return pb_map(buf->buffer, flags);
|
||||
return pb_map(buf->buffer, flags, flush_ctx);
|
||||
}
|
||||
else {
|
||||
assert(buf->data);
|
||||
|
|
@ -150,7 +150,7 @@ pb_ondemand_buffer_instantiate(struct pb_ondemand_buffer *buf)
|
|||
if(!buf->buffer)
|
||||
return PIPE_ERROR_OUT_OF_MEMORY;
|
||||
|
||||
map = pb_map(buf->buffer, PB_USAGE_CPU_READ);
|
||||
map = pb_map(buf->buffer, PB_USAGE_CPU_READ, NULL);
|
||||
if(!map) {
|
||||
pb_reference(&buf->buffer, NULL);
|
||||
return PIPE_ERROR;
|
||||
|
|
|
|||
|
|
@ -285,7 +285,7 @@ pool_bufmgr_create(struct pb_manager *provider,
|
|||
|
||||
pool->map = pb_map(pool->buffer,
|
||||
PB_USAGE_CPU_READ |
|
||||
PB_USAGE_CPU_WRITE);
|
||||
PB_USAGE_CPU_WRITE, NULL);
|
||||
if(!pool->map)
|
||||
goto failure;
|
||||
|
||||
|
|
|
|||
|
|
@ -316,7 +316,7 @@ pb_slab_create(struct pb_slab_manager *mgr)
|
|||
* through this address so it is required that the buffer is pinned. */
|
||||
slab->virtual = pb_map(slab->bo,
|
||||
PB_USAGE_CPU_READ |
|
||||
PB_USAGE_CPU_WRITE);
|
||||
PB_USAGE_CPU_WRITE, NULL);
|
||||
if(!slab->virtual) {
|
||||
ret = PIPE_ERROR_OUT_OF_MEMORY;
|
||||
goto out_err1;
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *pipe,
|
|||
int write = 0;
|
||||
|
||||
if (rbuffer->pb) {
|
||||
return (uint8_t*)pb_map(rbuffer->pb, transfer->usage) + transfer->box.x;
|
||||
return (uint8_t*)pb_map(rbuffer->pb, transfer->usage, NULL) + transfer->box.x;
|
||||
}
|
||||
if (transfer->usage & PIPE_TRANSFER_DONTBLOCK) {
|
||||
/* FIXME */
|
||||
|
|
|
|||
|
|
@ -89,10 +89,10 @@ static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage)
|
|||
|
||||
static void *
|
||||
radeon_drm_buffer_map_internal(struct pb_buffer *_buf,
|
||||
unsigned flags)
|
||||
unsigned flags, void *flush_ctx)
|
||||
{
|
||||
struct radeon_drm_buffer *buf = radeon_drm_buffer(_buf);
|
||||
struct radeon_libdrm_cs *cs = buf->cs;
|
||||
struct radeon_libdrm_cs *cs = flush_ctx;
|
||||
int write = 0;
|
||||
|
||||
if (flags & PB_USAGE_DONTBLOCK) {
|
||||
|
|
@ -293,12 +293,8 @@ void *radeon_drm_buffer_map(struct r300_winsys_screen *ws,
|
|||
enum pipe_transfer_usage usage)
|
||||
{
|
||||
struct pb_buffer *_buf = radeon_pb_buffer(buf);
|
||||
struct radeon_drm_buffer *rbuf = get_drm_buffer(_buf);
|
||||
|
||||
if (rbuf)
|
||||
rbuf->cs = radeon_libdrm_cs(cs);
|
||||
|
||||
return pb_map(_buf, get_pb_usage_from_transfer_flags(usage));
|
||||
return pb_map(_buf, get_pb_usage_from_transfer_flags(usage), radeon_libdrm_cs(cs));
|
||||
}
|
||||
|
||||
void radeon_drm_buffer_unmap(struct r300_winsys_screen *ws,
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ vmw_svga_winsys_buffer_map(struct svga_winsys_screen *sws,
|
|||
unsigned flags)
|
||||
{
|
||||
(void)sws;
|
||||
return pb_map(vmw_pb_buffer(buf), flags);
|
||||
return pb_map(vmw_pb_buffer(buf), flags, NULL);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue