svga: Further cleanup/comment svga buffer code.

Just cosmetic changes -- no behavior change.
This commit is contained in:
José Fonseca 2010-02-18 14:05:57 +00:00
parent 770945cff4
commit a51d638ff3
2 changed files with 142 additions and 112 deletions

View file

@ -83,7 +83,7 @@ svga_buffer_create_host_surface(struct svga_screen *ss,
* as svga_screen_surface_create might have passed a recycled host
* buffer.
*/
sbuf->hw.flags.discard = TRUE;
sbuf->dma.flags.discard = TRUE;
SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n", sbuf->handle, sbuf->base.size);
}
@ -109,10 +109,10 @@ svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
struct svga_winsys_screen *sws = ss->sws;
assert(!sbuf->map.count);
assert(sbuf->hw.buf);
if(sbuf->hw.buf) {
sws->buffer_destroy(sws, sbuf->hw.buf);
sbuf->hw.buf = NULL;
assert(sbuf->hwbuf);
if(sbuf->hwbuf) {
sws->buffer_destroy(sws, sbuf->hwbuf);
sbuf->hwbuf = NULL;
}
}
@ -151,16 +151,16 @@ static INLINE enum pipe_error
svga_buffer_create_hw_storage(struct svga_screen *ss,
struct svga_buffer *sbuf)
{
if(!sbuf->hw.buf) {
if(!sbuf->hwbuf) {
unsigned alignment = sbuf->base.alignment;
unsigned usage = 0;
unsigned size = sbuf->base.size;
sbuf->hw.buf = svga_winsys_buffer_create(ss, alignment, usage, size);
if(!sbuf->hw.buf)
sbuf->hwbuf = svga_winsys_buffer_create(ss, alignment, usage, size);
if(!sbuf->hwbuf)
return PIPE_ERROR_OUT_OF_MEMORY;
assert(!sbuf->needs_flush);
assert(!sbuf->dma.pending);
}
return PIPE_OK;
@ -175,12 +175,12 @@ svga_buffer_upload_command(struct svga_context *svga,
struct svga_buffer *sbuf)
{
struct svga_winsys_context *swc = svga->swc;
struct svga_winsys_buffer *guest = sbuf->hw.buf;
struct svga_winsys_buffer *guest = sbuf->hwbuf;
struct svga_winsys_surface *host = sbuf->handle;
SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
SVGA3dSurfaceDMAFlags flags = sbuf->hw.flags;
SVGA3dSurfaceDMAFlags flags = sbuf->dma.flags;
SVGA3dCmdSurfaceDMA *cmd;
uint32 numBoxes = sbuf->hw.num_ranges;
uint32 numBoxes = sbuf->map.num_ranges;
SVGA3dCopyBox *boxes;
SVGA3dCmdSurfaceDMASuffix *pSuffix;
unsigned region_flags;
@ -218,8 +218,8 @@ svga_buffer_upload_command(struct svga_context *svga,
cmd->transfer = transfer;
sbuf->hw.boxes = (SVGA3dCopyBox *)&cmd[1];
sbuf->hw.svga = svga;
sbuf->dma.boxes = (SVGA3dCopyBox *)&cmd[1];
sbuf->dma.svga = svga;
/* Increment reference count */
dummy = NULL;
@ -248,10 +248,10 @@ svga_buffer_upload_flush(struct svga_context *svga,
unsigned i;
assert(sbuf->handle);
assert(sbuf->hw.buf);
assert(sbuf->hw.num_ranges);
assert(sbuf->hw.svga == svga);
assert(sbuf->hw.boxes);
assert(sbuf->hwbuf);
assert(sbuf->map.num_ranges);
assert(sbuf->dma.svga == svga);
assert(sbuf->dma.boxes);
/*
* Patch the DMA command with the final copy box.
@ -259,34 +259,34 @@ svga_buffer_upload_flush(struct svga_context *svga,
SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
boxes = sbuf->hw.boxes;
for(i = 0; i < sbuf->hw.num_ranges; ++i) {
boxes = sbuf->dma.boxes;
for(i = 0; i < sbuf->map.num_ranges; ++i) {
SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
sbuf->hw.ranges[i].start, sbuf->hw.ranges[i].end);
sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
boxes[i].x = sbuf->hw.ranges[i].start;
boxes[i].x = sbuf->map.ranges[i].start;
boxes[i].y = 0;
boxes[i].z = 0;
boxes[i].w = sbuf->hw.ranges[i].end - sbuf->hw.ranges[i].start;
boxes[i].w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
boxes[i].h = 1;
boxes[i].d = 1;
boxes[i].srcx = sbuf->hw.ranges[i].start;
boxes[i].srcx = sbuf->map.ranges[i].start;
boxes[i].srcy = 0;
boxes[i].srcz = 0;
}
sbuf->hw.num_ranges = 0;
memset(&sbuf->hw.flags, 0, sizeof sbuf->hw.flags);
sbuf->map.num_ranges = 0;
memset(&sbuf->dma.flags, 0, sizeof sbuf->dma.flags);
assert(sbuf->head.prev && sbuf->head.next);
LIST_DEL(&sbuf->head);
#ifdef DEBUG
sbuf->head.next = sbuf->head.prev = NULL;
#endif
sbuf->needs_flush = FALSE;
sbuf->dma.pending = FALSE;
sbuf->hw.svga = NULL;
sbuf->hw.boxes = NULL;
sbuf->dma.svga = NULL;
sbuf->dma.boxes = NULL;
/* Decrement reference count */
pipe_buffer_reference((struct pipe_buffer **)&sbuf, NULL);
@ -294,7 +294,7 @@ svga_buffer_upload_flush(struct svga_context *svga,
/**
* Queue a DMA upload of a range of this buffer to the host.
* Note a dirty range.
*
* This function only notes the range down. It doesn't actually emit a DMA
* upload command. That only happens when a context tries to refer to this
@ -303,19 +303,19 @@ svga_buffer_upload_flush(struct svga_context *svga,
* We try to lump as many contiguous DMA transfers together as possible.
*/
static void
svga_buffer_upload_queue(struct svga_buffer *sbuf,
unsigned start,
unsigned end)
svga_buffer_add_range(struct svga_buffer *sbuf,
unsigned start,
unsigned end)
{
unsigned i;
unsigned nearest_range;
unsigned nearest_dist;
assert(sbuf->hw.buf);
assert(sbuf->hwbuf);
assert(end > start);
if (sbuf->hw.num_ranges < SVGA_BUFFER_MAX_RANGES) {
nearest_range = sbuf->hw.num_ranges;
if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
nearest_range = sbuf->map.num_ranges;
nearest_dist = ~0;
} else {
nearest_range = SVGA_BUFFER_MAX_RANGES - 1;
@ -332,13 +332,13 @@ svga_buffer_upload_queue(struct svga_buffer *sbuf,
* buffer should be flushed.
*/
for(i = 0; i < sbuf->hw.num_ranges; ++i) {
for(i = 0; i < sbuf->map.num_ranges; ++i) {
int left_dist;
int right_dist;
int dist;
left_dist = start - sbuf->hw.ranges[i].end;
right_dist = sbuf->hw.ranges[i].start - end;
left_dist = start - sbuf->map.ranges[i].end;
right_dist = sbuf->map.ranges[i].start - end;
dist = MAX2(left_dist, right_dist);
if (dist <= 0) {
@ -346,8 +346,8 @@ svga_buffer_upload_queue(struct svga_buffer *sbuf,
* Ranges are contiguous or overlapping -- extend this one and return.
*/
sbuf->hw.ranges[i].start = MIN2(sbuf->hw.ranges[i].start, start);
sbuf->hw.ranges[i].end = MAX2(sbuf->hw.ranges[i].end, end);
sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
sbuf->map.ranges[i].end = MAX2(sbuf->map.ranges[i].end, end);
return;
}
else {
@ -367,21 +367,21 @@ svga_buffer_upload_queue(struct svga_buffer *sbuf,
* pending DMA upload and start clean.
*/
if(sbuf->needs_flush)
svga_buffer_upload_flush(sbuf->hw.svga, sbuf);
if(sbuf->dma.pending)
svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
assert(!sbuf->needs_flush);
assert(!sbuf->hw.svga);
assert(!sbuf->hw.boxes);
assert(!sbuf->dma.pending);
assert(!sbuf->dma.svga);
assert(!sbuf->dma.boxes);
if (sbuf->hw.num_ranges < SVGA_BUFFER_MAX_RANGES) {
if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
/*
* Add a new range.
*/
sbuf->hw.ranges[sbuf->hw.num_ranges].start = start;
sbuf->hw.ranges[sbuf->hw.num_ranges].end = end;
++sbuf->hw.num_ranges;
sbuf->map.ranges[sbuf->map.num_ranges].start = start;
sbuf->map.ranges[sbuf->map.num_ranges].end = end;
++sbuf->map.num_ranges;
} else {
/*
* Everything else failed, so just extend the nearest range.
@ -391,9 +391,9 @@ svga_buffer_upload_queue(struct svga_buffer *sbuf,
*/
assert(nearest_range < SVGA_BUFFER_MAX_RANGES);
assert(nearest_range < sbuf->hw.num_ranges);
sbuf->hw.ranges[nearest_range].start = MIN2(sbuf->hw.ranges[nearest_range].start, start);
sbuf->hw.ranges[nearest_range].end = MAX2(sbuf->hw.ranges[nearest_range].end, end);
assert(nearest_range < sbuf->map.num_ranges);
sbuf->map.ranges[nearest_range].start = MIN2(sbuf->map.ranges[nearest_range].start, start);
sbuf->map.ranges[nearest_range].end = MAX2(sbuf->map.ranges[nearest_range].end, end);
}
}
@ -409,7 +409,7 @@ svga_buffer_map_range( struct pipe_screen *screen,
struct svga_buffer *sbuf = svga_buffer( buf );
void *map;
if (!sbuf->swbuf && !sbuf->hw.buf) {
if (!sbuf->swbuf && !sbuf->hwbuf) {
if (svga_buffer_create_hw_storage(ss, sbuf) != PIPE_OK) {
/*
* We can't create a hardware buffer big enough, so create a malloc
@ -428,8 +428,8 @@ svga_buffer_map_range( struct pipe_screen *screen,
/* User/malloc buffer */
map = sbuf->swbuf;
}
else if (sbuf->hw.buf) {
map = sws->buffer_map(sws, sbuf->hw.buf, usage);
else if (sbuf->hwbuf) {
map = sws->buffer_map(sws, sbuf->hwbuf, usage);
}
else {
map = NULL;
@ -465,8 +465,8 @@ svga_buffer_flush_mapped_range( struct pipe_screen *screen,
assert(sbuf->map.writing);
if(sbuf->map.writing) {
assert(sbuf->map.flush_explicit);
if(sbuf->hw.buf)
svga_buffer_upload_queue(sbuf, offset, offset + length);
if(sbuf->hwbuf)
svga_buffer_add_range(sbuf, offset, offset + length);
}
pipe_mutex_unlock(ss->swc_mutex);
}
@ -485,16 +485,16 @@ svga_buffer_unmap( struct pipe_screen *screen,
if(sbuf->map.count)
--sbuf->map.count;
if(sbuf->hw.buf)
sws->buffer_unmap(sws, sbuf->hw.buf);
if(sbuf->hwbuf)
sws->buffer_unmap(sws, sbuf->hwbuf);
if(sbuf->map.writing) {
if(!sbuf->map.flush_explicit) {
/* No mapped range was flushed -- flush the whole buffer */
SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
if(sbuf->hw.buf)
svga_buffer_upload_queue(sbuf, 0, sbuf->base.size);
if(sbuf->hwbuf)
svga_buffer_add_range(sbuf, 0, sbuf->base.size);
}
sbuf->map.writing = FALSE;
@ -512,12 +512,12 @@ svga_buffer_destroy( struct pipe_buffer *buf )
assert(!p_atomic_read(&buf->reference.count));
assert(!sbuf->needs_flush);
assert(!sbuf->dma.pending);
if(sbuf->handle)
svga_buffer_destroy_host_surface(ss, sbuf);
if(sbuf->hw.buf)
if(sbuf->hwbuf)
svga_buffer_destroy_hw_storage(ss, sbuf);
if(sbuf->swbuf && !sbuf->user)
@ -620,7 +620,7 @@ svga_screen_init_buffer_functions(struct pipe_screen *screen)
static INLINE enum pipe_error
svga_buffer_update_hw(struct svga_screen *ss, struct svga_buffer *sbuf)
{
if(!sbuf->hw.buf) {
if(!sbuf->hwbuf) {
enum pipe_error ret;
void *map;
@ -634,7 +634,7 @@ svga_buffer_update_hw(struct svga_screen *ss, struct svga_buffer *sbuf)
return ret;
pipe_mutex_lock(ss->swc_mutex);
map = ss->sws->buffer_map(ss->sws, sbuf->hw.buf, PIPE_BUFFER_USAGE_CPU_WRITE);
map = ss->sws->buffer_map(ss->sws, sbuf->hwbuf, PIPE_BUFFER_USAGE_CPU_WRITE);
assert(map);
if(!map) {
pipe_mutex_unlock(ss->swc_mutex);
@ -642,7 +642,7 @@ svga_buffer_update_hw(struct svga_screen *ss, struct svga_buffer *sbuf)
}
memcpy(map, sbuf->swbuf, sbuf->base.size);
ss->sws->buffer_unmap(ss->sws, sbuf->hw.buf);
ss->sws->buffer_unmap(ss->sws, sbuf->hwbuf);
/* This user/malloc buffer is now indistinguishable from a gpu buffer */
assert(!sbuf->map.count);
@ -654,7 +654,7 @@ svga_buffer_update_hw(struct svga_screen *ss, struct svga_buffer *sbuf)
sbuf->swbuf = NULL;
}
svga_buffer_upload_queue(sbuf, 0, sbuf->base.size);
svga_buffer_add_range(sbuf, 0, sbuf->base.size);
}
pipe_mutex_unlock(ss->swc_mutex);
@ -688,16 +688,16 @@ svga_buffer_handle(struct svga_context *svga,
return NULL;
}
if(!sbuf->needs_flush && sbuf->hw.num_ranges) {
if(!sbuf->dma.pending && sbuf->map.num_ranges) {
/* Queue the buffer for flushing */
ret = svga_buffer_upload_command(svga, sbuf);
if(ret != PIPE_OK)
/* XXX: Should probably have a richer return value */
return NULL;
assert(sbuf->hw.svga == svga);
assert(sbuf->dma.svga == svga);
sbuf->needs_flush = TRUE;
sbuf->dma.pending = TRUE;
assert(!sbuf->head.prev && !sbuf->head.next);
LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers);
}
@ -757,7 +757,7 @@ svga_context_flush_buffers(struct svga_context *svga)
sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
assert(p_atomic_read(&sbuf->base.reference.count) != 0);
assert(sbuf->needs_flush);
assert(sbuf->dma.pending);
svga_buffer_upload_flush(svga, sbuf);

View file

@ -56,35 +56,6 @@ struct svga_buffer_range
};
/**
* Describe a
*
* This holds the information to emit a SVGA3dCmdSurfaceDMA.
*/
struct svga_buffer_upload
{
/**
* Guest memory region.
*/
struct svga_winsys_buffer *buf;
struct svga_buffer_range ranges[SVGA_BUFFER_MAX_RANGES];
unsigned num_ranges;
SVGA3dSurfaceDMAFlags flags;
/**
* Pointer to the DMA copy box *inside* the command buffer.
*/
SVGA3dCopyBox *boxes;
/**
* Context that has the pending DMA to this buffer.
*/
struct svga_context *svga;
};
/**
* SVGA pipe buffer.
*/
@ -110,14 +81,6 @@ struct svga_buffer
*/
boolean user;
/**
* DMA'ble memory.
*
* A piece of GMR memory. It is created when mapping the buffer, and will be
* used to upload/download vertex data from the host.
*/
struct svga_buffer_upload hw;
/**
* Creation key for the host surface handle.
*
@ -134,14 +97,81 @@ struct svga_buffer
* trying to bind
*/
struct svga_winsys_surface *handle;
/**
* Information about ongoing and past map operations.
*/
struct {
/**
* Number of concurrent mappings.
*
* XXX: It is impossible to guarantee concurrent maps work in all
* circumstances -- pipe_buffers really need transfer objects too.
*/
unsigned count;
/**
* Whether this buffer is currently mapped for writing.
*/
boolean writing;
/**
* Whether the application will tell us explicity which ranges it touched
* or not.
*/
boolean flush_explicit;
/**
* Dirty ranges.
*
* Ranges that were touched by the application and need to be uploaded to
* the host.
*
* This information will be copied into dma.boxes, when emiting the
* SVGA3dCmdSurfaceDMA command.
*/
struct svga_buffer_range ranges[SVGA_BUFFER_MAX_RANGES];
unsigned num_ranges;
} map;
boolean needs_flush;
/**
* DMA'ble memory.
*
* A piece of GMR memory, with the same size of the buffer. It is created
* when mapping the buffer, and will be used to upload vertex data to the
* host.
*/
struct svga_winsys_buffer *hwbuf;
/**
* Information about pending DMA uploads.
*
*/
struct {
/**
* Whether this buffer has an unfinished DMA upload command.
*
* If not set then the rest of the information is null.
*/
boolean pending;
SVGA3dSurfaceDMAFlags flags;
/**
* Pointer to the DMA copy box *inside* the command buffer.
*/
SVGA3dCopyBox *boxes;
/**
* Context that has the pending DMA to this buffer.
*/
struct svga_context *svga;
} dma;
/**
* Linked list head, used to gather all buffers with pending dma uploads on
* a context. It is only valid if the dma.pending is set above.
*/
struct list_head head;
};