Move over to libdrm.

At least one lockup remaining with multiple clients when at least one client
is using textures.
This commit is contained in:
Thomas Hellström 2006-03-05 21:52:03 +00:00
parent 78aa58c914
commit ccf52b6784
24 changed files with 537 additions and 2624 deletions

View file

@ -13,7 +13,7 @@ CXX = g++
#MKDEP = gcc -M
#MKDEP_OPTIONS = -MF depend
OPT_FLAGS = -g
OPT_FLAGS = -g -march=pentium4 -fprefetch-loop-arrays
PIC_FLAGS = -fPIC
# Add '-DGLX_USE_TLS' to ARCH_FLAGS to enable TLS support.
@ -63,7 +63,7 @@ WINDOW_SYSTEM=dri
# gamma are missing because they have not been converted to use the new
# interface.
DRI_DIRS = i810 i915 mach64 mga r128 r200 r300 radeon s3v \
DRI_DIRS = i915 mach64 mga r128 r200 r300 radeon s3v \
savage sis tdfx trident unichrome ffb
DRI_DIRS = i915

View file

@ -12,7 +12,6 @@ DRIVER_SOURCES = \
i830_tex.c \
i830_texstate.c \
i830_vtbl.c \
bufmgr_fake.c \
intel_render.c \
intel_regions.c \
intel_buffer_objects.c \
@ -46,7 +45,8 @@ DRIVER_SOURCES = \
intel_screen.c \
intel_span.c \
intel_state.c \
intel_tris.c
intel_tris.c \
intel_bufmgr.c

View file

@ -1,188 +0,0 @@
#ifndef BUFMGR_H
#define BUFMGR_H
#include "intel_context.h"
/* Note that this is destined to be external to Mesa, so don't use GL
* types like GLuint, etc.
*/
/* The buffer manager context. Opaque.
*/
struct bufmgr;
#define BM_LIST_MAX 32
/* List of buffers to validate. Probably better managed by the client:
*/
struct bm_buffer_list {
struct {
unsigned buffer;
unsigned *offset_return;
unsigned *memtype_return;
} elem[BM_LIST_MAX];
unsigned nr;
};
struct bufmgr *bm_fake_intel_Attach( struct intel_context *intel );
/* struct bufmgr *bmCreate( ... ); */
/* struct bufmgr *bmAttach( ... ); */
/* Define an address space. Doesn't really do anything, but the
* information could be used to validate the bmInitPool() requests.
*/
void bmInitMemType( struct bufmgr *,
unsigned mem_type,
unsigned long size );
/* Create a pool of a given memory type, from a certain offset and a
* certain size.
*
* Also passed in is a virtual pointer to the start of the pool. This
* is useful in the faked-out version in i915 so that MapBuffer can
* return a pointer to a buffer residing in AGP space.
*
* Flags passed into a pool are inherited by all buffers allocated in
* that pool. So pools representing the static front,back,depth
* buffer allocations should have MEM_AGP|NO_UPLOAD|NO_EVICT|NO_MOVE to match
* the behaviour of the legacy allocations.
*
* Returns -1 for failure, pool number for success.
*/
int bmInitPool( struct bufmgr *,
unsigned long low_offset,
void *low_virtual,
unsigned long size,
unsigned flags);
/* Flags for validate and other calls. If both NO_UPLOAD and NO_EVICT
* are specified, ValidateBuffers is essentially a query.
*/
#define BM_MEM_LOCAL 0x1
#define BM_MEM_AGP 0x2
#define BM_MEM_VRAM 0x4 /* not yet used */
#define BM_WRITE 0x8 /* not yet used */
#define BM_READ 0x10 /* not yet used */
#define BM_NO_UPLOAD 0x20
#define BM_NO_EVICT 0x40
#define BM_NO_MOVE 0x80 /* not yet used */
#define BM_NO_ALLOC 0x100 /* legacy "fixed" buffers only */
#define BM_CLIENT 0x200 /* for map - pointer will be accessed
* without dri lock */
#define BM_NO_TTM 0x400
#define BM_MEM_MASK (BM_MEM_LOCAL|BM_MEM_AGP|BM_MEM_VRAM)
/* Stick closely to ARB_vbo semantics - they're well defined and
* understood, and drivers can just pass the calls through without too
* much thunking.
*/
void bmGenBuffers(struct bufmgr *, unsigned n, unsigned *buffers);
void bmDeleteBuffers(struct bufmgr *, unsigned n, unsigned *buffers);
/* Hook to inform faked buffer manager about fixed-position
* front,depth,back buffers. These may move to a fully memory-managed
* scheme, or they may continue to be managed as is.
*/
unsigned bmBufferStatic(struct bufmgr *,
unsigned buffer,
unsigned size,
unsigned pool);
/* The driver has more intimate knowledge of the hardare than a GL
* client would, so flags here is more proscriptive than the usage
* values in the ARB_vbo interface:
*/
void bmBufferData(struct bufmgr *,
unsigned buffer,
unsigned size,
const void *data,
unsigned flags );
void bmBufferSubData(struct bufmgr *,
unsigned buffer,
unsigned offset,
unsigned size,
const void *data );
void bmBufferGetSubData(struct bufmgr *,
unsigned buffer,
unsigned offset,
unsigned size,
void *data );
void *bmMapBuffer( struct bufmgr *,
unsigned buffer,
unsigned access );
void bmUnmapBuffer( struct bufmgr *,
unsigned buffer );
/* To be called prior to emitting commands to hardware which reference
* these buffers.
*
* NewBufferList() and AddBuffer() build up a list of buffers to be
* validated. The buffer list provides information on where the
* buffers should be placed and whether their contents need to be
* preserved on copying. The offset data elements are return values
* from this function telling the driver exactly where the buffers are
* currently located.
*
* ValidateBufferList() performs the actual validation and returns the
* buffer pools and offsets within the pools.
*
* FenceBufferList() must be called to set fences and other
* housekeeping before unlocking after a successful call to
* ValidateBufferList(). The buffer manager knows how to emit and test
* fences directly through the drm and without callbacks to the
* driver.
*/
struct bm_buffer_list *bmNewBufferList( void );
void bmAddBuffer( struct bm_buffer_list *list,
unsigned buffer,
unsigned flags,
unsigned *pool_return,
unsigned *offset_return );
int bmValidateBufferList( struct bufmgr *,
struct bm_buffer_list *,
unsigned flags );
unsigned bmFenceBufferList( struct bufmgr *,
struct bm_buffer_list * );
void bmFreeBufferList( struct bm_buffer_list * );
/* This functionality is used by the buffer manager, not really sure
* if we need to be exposing it in this way, probably libdrm will
* offer equivalent calls.
*
* For now they can stay, but will likely change/move before final:
*/
unsigned bmSetFence( struct bufmgr * );
int bmTestFence( struct bufmgr *, unsigned fence );
void bmFinishFence( struct bufmgr *, unsigned fence );
void bmFlushReadCaches( struct bufmgr *bm );
void bmFlushDrawCache( struct bufmgr *bm );
void bm_fake_NotifyContendedLockTake( struct bufmgr * );
extern int INTEL_DEBUG;
#define DEBUG_BUFMGR 0x2000
#define DBG(...) do { if (INTEL_DEBUG & DEBUG_BUFMGR) _mesa_printf(__VA_ARGS__); } while(0)
#endif

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -384,11 +384,11 @@ static void i830_emit_state( struct intel_context *intel )
BEGIN_BATCH(I830_DEST_SETUP_SIZE+2, 0);
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR1]);
OUT_RELOC(state->draw_region->buffer, BM_MEM_AGP|BM_WRITE, 0);
OUT_RELOC(state->draw_region->buffer, DRM_MM_TT|DRM_MM_WRITE, 0);
OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR1]);
OUT_RELOC(state->depth_region->buffer, BM_MEM_AGP|BM_WRITE, 0);
OUT_RELOC(state->depth_region->buffer, DRM_MM_TT |DRM_MM_WRITE, 0);
OUT_BATCH(state->Buffer[I830_DESTREG_DV0]);
OUT_BATCH(state->Buffer[I830_DESTREG_DV1]);
@ -415,7 +415,7 @@ static void i830_emit_state( struct intel_context *intel )
if (state->tex_buffer[i]) {
OUT_RELOC(state->tex_buffer[i],
BM_MEM_AGP|BM_READ,
DRM_MM_TT|DRM_MM_READ,
state->tex_offset[i] | TM0S0_USE_FENCE);
}
else {

View file

@ -41,7 +41,7 @@
#include "utils.h"
#include "i915_reg.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
#include "intel_regions.h"
#include "intel_batchbuffer.h"

View file

@ -248,11 +248,11 @@ static void i915_emit_state( struct intel_context *intel )
BEGIN_BATCH(I915_DEST_SETUP_SIZE+2, 0);
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR1]);
OUT_RELOC(state->draw_region->buffer, BM_MEM_AGP|BM_WRITE, 0);
OUT_RELOC(state->draw_region->buffer, DRM_MM_TT|DRM_MM_WRITE, 0);
OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR1]);
OUT_RELOC(state->depth_region->buffer, BM_MEM_AGP|BM_WRITE, 0);
OUT_RELOC(state->depth_region->buffer, DRM_MM_TT|DRM_MM_WRITE, 0);
OUT_BATCH(state->Buffer[I915_DESTREG_DV0]);
OUT_BATCH(state->Buffer[I915_DESTREG_DV1]);
@ -291,7 +291,7 @@ static void i915_emit_state( struct intel_context *intel )
if (state->tex_buffer[i]) {
OUT_RELOC(state->tex_buffer[i],
BM_MEM_AGP|BM_READ,
DRM_MM_TT|DRM_MM_READ,
state->tex_offset[i]);
}
else {

View file

@ -27,7 +27,7 @@
#include "intel_batchbuffer.h"
#include "intel_ioctl.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
/* Relocations in kernel space:
* - pass dma buffer seperately
@ -91,18 +91,19 @@ static void intel_batchbuffer_reset( struct intel_batchbuffer *batch )
if (!batch->list)
batch->list = bmNewBufferList();
batch->list->nr = 0;
drmMMClearBufList(batch->list);
batch->list_count = 0;
batch->nr_relocs = 0;
batch->flags = 0;
bmAddBuffer( batch->list,
bmAddBuffer( batch->bm,
batch->list,
batch->buffer,
0,
DRM_MM_TT,
NULL,
&batch->offset[batch->list->nr]);
&batch->offset[batch->list_count++]);
batch->map = bmMapBuffer(batch->bm, batch->buffer,
BM_MEM_AGP|BM_MEM_LOCAL|BM_CLIENT|BM_WRITE);
batch->map = bmMapBuffer(batch->bm, batch->buffer, DRM_MM_WRITE);
batch->ptr = batch->map;
}
@ -116,7 +117,7 @@ struct intel_batchbuffer *intel_batchbuffer_alloc( struct intel_context *intel )
batch->intel = intel;
batch->bm = intel->bm;
bmGenBuffers(intel->bm, 1, &batch->buffer);
bmGenBuffers(intel->bm, 1, &batch->buffer, BM_BATCHBUFFER);
intel_batchbuffer_reset( batch );
return batch;
}
@ -140,43 +141,36 @@ static void do_flush_locked( struct intel_batchbuffer *batch,
bmValidateBufferList( batch->bm,
batch->list,
BM_MEM_AGP );
DRM_MM_TT );
/* Apply the relocations. This nasty map indicates to me that the
* whole task should be done internally by the memory manager, and
* that dma buffers probably need to be pinned within agp space.
*/
ptr = (GLuint *)bmMapBuffer(batch->bm, batch->buffer,
BM_NO_MOVE|BM_NO_UPLOAD|
BM_NO_EVICT|BM_MEM_AGP|
BM_WRITE);
ptr = (GLuint *)bmMapBuffer(batch->bm, batch->buffer, DRM_MM_WRITE);
for (i = 0; i < batch->nr_relocs; i++) {
struct buffer_reloc *r = &batch->reloc[i];
assert(r->elem < batch->list->nr);
DBG("apply fixup at offset 0x%x, elem %d (buf %d, offset 0x%x), delta 0x%x\n",
r->offset, r->elem, batch->list->elem[r->elem].buffer,
batch->offset[r->elem], r->delta);
assert(r->elem < batch->list_count);
ptr[r->offset/4] = batch->offset[r->elem] + r->delta;
}
if (INTEL_DEBUG & DEBUG_DMA)
intel_dump_batchbuffer( 0, ptr, used );
bmUnmapBuffer(batch->bm, batch->buffer);
/* Fire the batch buffer, which was uploaded above:
*/
#if 1
intel_batch_ioctl(batch->intel,
batch->offset[0],
used,
ignore_cliprects);
#endif
batch->last_fence = bmFenceBufferList(batch->bm, batch->list);
}
@ -248,19 +242,15 @@ GLboolean intel_batchbuffer_emit_reloc( struct intel_batchbuffer *batch,
assert(batch->nr_relocs <= MAX_RELOCS);
for (i = 0; i < batch->list->nr; i++)
if (buffer == batch->list->elem[i].buffer)
break;
if (i == batch->list->nr) {
if (i == BM_LIST_MAX)
return GL_FALSE;
bmAddBuffer(batch->list,
i = bmScanBufferList(batch->bm, batch->list, buffer);
if (i == -1) {
i = batch->list_count;
bmAddBuffer(batch->bm,
batch->list,
buffer,
flags,
NULL,
&batch->offset[i]);
&batch->offset[batch->list_count++]);
}
{

View file

@ -2,7 +2,7 @@
#define INTEL_BATCHBUFFER_H
#include "mtypes.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
struct intel_context;
@ -30,8 +30,9 @@ struct intel_batchbuffer {
/* In progress:
*/
GLuint offset[BM_LIST_MAX];
struct bm_buffer_list *list;
unsigned long offset[MAX_RELOCS];
struct _drmMMBufList *list;
GLuint list_count;
GLubyte *map;
GLubyte *ptr;

View file

@ -39,7 +39,7 @@
#include "intel_blit.h"
#include "intel_regions.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
@ -58,8 +58,6 @@ void intelCopyBuffer( const __DRIdrawablePrivate *dPriv )
assert(dPriv->driContextPriv->driverPrivate);
intel = (struct intel_context *) dPriv->driContextPriv->driverPrivate;
intelFlush( &intel->ctx );
bmFinishFence(intel->bm, intel->last_swap_fence);
@ -106,17 +104,17 @@ void intelCopyBuffer( const __DRIdrawablePrivate *dPriv )
OUT_BATCH( (pbox->y2 << 16) | pbox->x2 );
if (intel->sarea->pf_current_page == 0)
OUT_RELOC( intel->front_region->buffer, BM_MEM_AGP|BM_WRITE, 0 );
OUT_RELOC( intel->front_region->buffer, DRM_MM_TT|DRM_MM_WRITE, 0 );
else
OUT_RELOC( intel->back_region->buffer, BM_MEM_AGP|BM_WRITE, 0 );
OUT_RELOC( intel->back_region->buffer, DRM_MM_TT|DRM_MM_WRITE, 0 );
OUT_BATCH( (pbox->y1 << 16) | pbox->x1 );
OUT_BATCH( BR13 & 0xffff );
if (intel->sarea->pf_current_page == 0)
OUT_RELOC( intel->back_region->buffer, BM_MEM_AGP|BM_READ, 0 );
OUT_RELOC( intel->back_region->buffer, DRM_MM_TT|DRM_MM_READ, 0 );
else
OUT_RELOC( intel->front_region->buffer, BM_MEM_AGP|BM_READ, 0 );
OUT_RELOC( intel->front_region->buffer, DRM_MM_TT|DRM_MM_READ, 0 );
ADVANCE_BATCH();
}
@ -164,7 +162,7 @@ void intelEmitFillBlit( struct intel_context *intel,
OUT_BATCH( BR13 );
OUT_BATCH( (y << 16) | x );
OUT_BATCH( ((y+h) << 16) | (x+w) );
OUT_RELOC( dst_buffer, BM_MEM_AGP|BM_WRITE, dst_offset );
OUT_RELOC( dst_buffer, DRM_MM_TT|DRM_MM_WRITE, dst_offset );
OUT_BATCH( color );
ADVANCE_BATCH();
}
@ -234,10 +232,10 @@ void intelEmitCopyBlit( struct intel_context *intel,
OUT_BATCH( BR13 );
OUT_BATCH( (dst_y << 16) | dst_x );
OUT_BATCH( (dst_y2 << 16) | dst_x2 );
OUT_RELOC( dst_buffer, BM_MEM_AGP|BM_WRITE, dst_offset );
OUT_RELOC( dst_buffer, DRM_MM_TT|DRM_MM_WRITE, dst_offset );
OUT_BATCH( (src_y << 16) | src_x );
OUT_BATCH( ((GLint)src_pitch&0xffff) );
OUT_RELOC( src_buffer, BM_MEM_AGP|BM_READ, src_offset );
OUT_RELOC( src_buffer, DRM_MM_TT|DRM_MM_READ, src_offset );
ADVANCE_BATCH();
}
else {
@ -246,10 +244,10 @@ void intelEmitCopyBlit( struct intel_context *intel,
OUT_BATCH( BR13 );
OUT_BATCH( (0 << 16) | dst_x );
OUT_BATCH( (h << 16) | dst_x2 );
OUT_RELOC( dst_buffer, BM_MEM_AGP|BM_WRITE, dst_offset + dst_y * dst_pitch );
OUT_RELOC( dst_buffer, DRM_MM_TT|DRM_MM_WRITE, dst_offset + dst_y * dst_pitch );
OUT_BATCH( (0 << 16) | src_x );
OUT_BATCH( ((GLint)src_pitch&0xffff) );
OUT_RELOC( src_buffer, BM_MEM_AGP|BM_READ, src_offset + src_y * src_pitch );
OUT_RELOC( src_buffer, DRM_MM_TT|DRM_MM_READ, src_offset + src_y * src_pitch );
ADVANCE_BATCH();
}
}
@ -345,7 +343,7 @@ void intelClearWithBlit(GLcontext *ctx, GLbitfield flags, GLboolean all,
OUT_BATCH( BR13 );
OUT_BATCH( (b.y1 << 16) | b.x1 );
OUT_BATCH( (b.y2 << 16) | b.x2 );
OUT_RELOC( intel->front_region->buffer, BM_MEM_AGP|BM_WRITE, 0 );
OUT_RELOC( intel->front_region->buffer, DRM_MM_TT|DRM_MM_WRITE, 0 );
OUT_BATCH( clear_color );
ADVANCE_BATCH();
}
@ -356,7 +354,7 @@ void intelClearWithBlit(GLcontext *ctx, GLbitfield flags, GLboolean all,
OUT_BATCH( BR13 );
OUT_BATCH( (b.y1 << 16) | b.x1 );
OUT_BATCH( (b.y2 << 16) | b.x2 );
OUT_RELOC( intel->back_region->buffer, BM_MEM_AGP|BM_WRITE, 0 );
OUT_RELOC( intel->back_region->buffer, DRM_MM_TT|DRM_MM_WRITE, 0 );
OUT_BATCH( clear_color );
ADVANCE_BATCH();
}
@ -367,7 +365,7 @@ void intelClearWithBlit(GLcontext *ctx, GLbitfield flags, GLboolean all,
OUT_BATCH( BR13 );
OUT_BATCH( (b.y1 << 16) | b.x1 );
OUT_BATCH( (b.y2 << 16) | b.x2 );
OUT_RELOC( intel->depth_region->buffer, BM_MEM_AGP|BM_WRITE, 0 );
OUT_RELOC( intel->depth_region->buffer, DRM_MM_TT|DRM_MM_WRITE, 0 );
OUT_BATCH( clear_depth );
ADVANCE_BATCH();
}

View file

@ -32,7 +32,7 @@
#include "intel_context.h"
#include "intel_buffer_objects.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
/* There is some duplication between mesa's bufferobjects and our
@ -51,7 +51,7 @@ static struct gl_buffer_object *intel_bufferobj_alloc( GLcontext *ctx,
/* XXX: We generate our own handle, which is different to 'name' above.
*/
bmGenBuffers(intel->bm, 1, &obj->buffer);
bmGenBuffers(intel->bm, 1, &obj->buffer, 0);
return &obj->Base;
}

View file

@ -0,0 +1,325 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Steamboat Springs, CO.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
#include "intel_bufmgr.h"
#include "intel_context.h"
#include "intel_ioctl.h"
#include "hash.h"
#include "simple_list.h"
#include "mm.h"
#include "imports.h"
#include <sys/ioctl.h>
#include <unistd.h>
#include <drm.h>
struct _mesa_HashTable;
struct bufmgr
{
struct intel_context *intel;
struct _mesa_HashTable *hash;
unsigned buf_nr; /* for generating ids */
drmMMPool batchPool;
};
/***********************************************************************
* Public functions
*/
/* The initialization functions are skewed in the fake implementation.
* This call would be to attach to an existing manager, rather than to
* create a local one.
*/
struct bufmgr *
bm_intel_Attach(struct intel_context *intel)
{
struct bufmgr *bm = (struct bufmgr *)calloc(sizeof(*bm), 1);
bm->intel = intel;
bm->hash = _mesa_NewHashTable();
drmGetLock(bm->intel->driFd, bm->intel->hHWContext, 0);
assert(!drmMMAllocBufferPool(bm->intel->driFd, mmPoolRing, 0,
BM_BATCHBUFFER | DRM_MM_TT |
DRM_MM_NO_EVICT ,
1024 * 1024, 4096, &bm->batchPool));
drmUnlock(bm->intel->driFd, bm->intel->hHWContext);
return bm;
}
void
bmGenBuffers(struct bufmgr *bm, unsigned n, unsigned *buffers, unsigned flags)
{
unsigned i;
unsigned bFlags =
(flags) ? flags : DRM_MM_TT | DRM_MM_VRAM | DRM_MM_SYSTEM;
for (i = 0; i < n; i++) {
drmMMBuf *buf = calloc(sizeof(*buf), 1);
assert(!drmMMInitBuffer(bm->intel->driFd, bFlags, 12, buf));
buf->client_priv = ++bm->buf_nr;
buffers[i] = buf->client_priv;
_mesa_HashInsert(bm->hash, buffers[i], buf);
}
}
void
bmSetShared(struct bufmgr *bm, unsigned buffer, unsigned flags,
unsigned long offset, void *virtual)
{
drmMMBuf *buf = _mesa_HashLookup(bm->hash, buffer);
buf->flags = DRM_MM_NO_EVICT | DRM_MM_NO_MOVE | DRM_MM_SHARED;
buf->flags |= flags & DRM_MM_MEMTYPE_MASK;
buf->offset = offset;
buf->virtual = virtual;
assert(!drmMMAllocBuffer(bm->intel->driFd, 0, NULL, 0, buf));
}
void
bmDeleteBuffers(struct bufmgr *bm, unsigned n, unsigned *buffers)
{
unsigned i;
assert(0);
for (i = 0; i < n; i++) {
drmMMBuf *buf = _mesa_HashLookup(bm->hash, buffers[i]);
drmMMFreeBuffer(bm->intel->driFd, buf);
}
}
/* If buffer size changes, free and reallocate. Otherwise update in
* place.
*/
void
bmBufferData(struct bufmgr *bm,
unsigned buffer, unsigned size, const void *data, unsigned flags)
{
drmMMBuf *buf = (drmMMBuf *) _mesa_HashLookup(bm->hash, buffer);
DBG("bmBufferData %d sz 0x%x data: %p\n", buffer, size, data);
assert(!buf->mapped);
if (buf->flags & BM_BATCHBUFFER) {
assert(!drmMMFreeBuffer(bm->intel->driFd, buf));
assert(!drmMMAllocBuffer(bm->intel->driFd, size, &bm->batchPool, 1,
buf));
} else if (!(buf->flags & DRM_MM_SHARED)) {
if (buf->size < size || drmBufIsBusy(bm->intel->driFd, buf)) {
assert(!drmMMFreeBuffer(bm->intel->driFd, buf));
}
if (!buf->block) {
assert(!drmMMAllocBuffer(bm->intel->driFd, size, NULL, 0, buf));
}
}
if (data != NULL) {
memcpy(bmMapBuffer(bm, buf->client_priv, flags), data, size);
bmUnmapBuffer(bm, buf->client_priv);
}
}
/* Update the buffer in place, in whatever space it is currently resident:
*/
void
bmBufferSubData(struct bufmgr *bm,
unsigned buffer,
unsigned offset, unsigned size, const void *data)
{
drmMMBuf *buf = (drmMMBuf *) _mesa_HashLookup(bm->hash, buffer);
DBG("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buffer, offset, size);
drmBufWaitBusy(bm->intel->driFd, buf);
if (size) {
memcpy(bmMapBuffer(bm, buf->client_priv, 0) + offset, data, size);
bmUnmapBuffer(bm, buf->client_priv);
}
}
/* Extract data from the buffer:
*/
void
bmBufferGetSubData(struct bufmgr *bm,
unsigned buffer,
unsigned offset, unsigned size, void *data)
{
drmMMBuf *buf = (drmMMBuf *) _mesa_HashLookup(bm->hash, buffer);
DBG("bmBufferSubdata %d offset 0x%x sz 0x%x\n", buffer, offset, size);
drmBufWaitBusy(bm->intel->driFd, buf);
if (size) {
memcpy(data, bmMapBuffer(bm, buf->client_priv, 0) + offset, size);
bmUnmapBuffer(bm, buf->client_priv);
}
}
/* Return a pointer to whatever space the buffer is currently resident in:
*/
void *
bmMapBuffer(struct bufmgr *bm, unsigned buffer, unsigned flags)
{
drmMMBuf *buf = (drmMMBuf *) _mesa_HashLookup(bm->hash, buffer);
DBG("bmMapBuffer %d\n", buffer);
DBG("Map: Block is 0x%x\n", &buf->block);
assert(!buf->mapped);
return drmMMMapBuffer(bm->intel->driFd, buf);
}
void
bmUnmapBuffer(struct bufmgr *bm, unsigned buffer)
{
drmMMBuf *buf = (drmMMBuf *) _mesa_HashLookup(bm->hash, buffer);
if (!buf)
return;
DBG("bmUnmapBuffer %d\n", buffer);
drmMMUnmapBuffer(bm->intel->driFd, buf);
}
/* Build the list of buffers to validate:
*/
struct _drmMMBufList *
bmNewBufferList(void)
{
return drmMMInitListHead();
}
int
bmAddBuffer(struct bufmgr *bm,
struct _drmMMBufList *list,
unsigned buffer,
unsigned flags,
unsigned *memtype_return, unsigned long *offset_return)
{
drmMMBuf *buf = (drmMMBuf *) _mesa_HashLookup(bm->hash, buffer);
return drmMMBufListAdd(list, buf, 0, flags, memtype_return, offset_return);
}
void
bmFreeBufferList(struct _drmMMBufList *list)
{
drmMMFreeBufList(list);
}
int
bmScanBufferList(struct bufmgr *bm,
struct _drmMMBufList *list, unsigned buffer)
{
drmMMBuf *buf = (drmMMBuf *) _mesa_HashLookup(bm->hash, buffer);
return drmMMScanBufList(list, buf);
}
/* To be called prior to emitting commands to hardware which reference
* these buffers. The buffer_usage list provides information on where
* the buffers should be placed and whether their contents need to be
* preserved on copying. The offset and pool data elements are return
* values from this function telling the driver exactly where the
* buffers are currently located.
*/
int
bmValidateBufferList(struct bufmgr *bm,
struct _drmMMBufList *list, unsigned flags)
{
return drmMMValidateBuffers(bm->intel->driFd, list);
}
/* After commands are emitted but before unlocking, this must be
* called so that the buffer manager can correctly age the buffers.
* The buffer manager keeps track of the list of validated buffers, so
* already knows what to apply the fence to.
*
* The buffer manager knows how to emit and test fences directly
* through the drm and without callbacks or whatever into the driver.
*/
unsigned
bmFenceBufferList(struct bufmgr *bm, struct _drmMMBufList *list)
{
drmFence fence;
assert(!drmMMFenceBuffers(bm->intel->driFd, list));
assert(!drmEmitFence(bm->intel->driFd, 0, &fence));
return fence.fenceSeq;
}
/* This functionality is used by the buffer manager, not really sure
* if we need to be exposing it in this way, probably libdrm will
* offer equivalent calls.
*
* For now they can stay, but will likely change/move before final:
*/
unsigned
bmSetFence(struct bufmgr *bm)
{
drmFence dFence;
assert(!drmEmitFence(bm->intel->driFd, 0, &dFence));
return dFence.fenceSeq;
}
int
bmTestFence(struct bufmgr *bm, unsigned fence)
{
drmFence dFence = { 0, fence };
int retired;
assert(!drmTestFence(bm->intel->driFd, dFence, 0, &retired));
return retired;
}
void
bmFinishFence(struct bufmgr *bm, unsigned fence)
{
drmFence dFence = { 0, fence };
assert(!drmWaitFence(bm->intel->driFd, dFence));
}

View file

@ -0,0 +1,132 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Steamboat Springs, CO.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
#ifndef BUFMGR_H
#define BUFMGR_H
#include "intel_context.h"
/* Note that this is destined to be external to Mesa, so don't use GL
* types like GLuint, etc.
*/
/* The buffer manager context. Opaque.
*/
struct bufmgr;
struct bufmgr *bm_intel_Attach(struct intel_context *intel);
#define BM_BATCHBUFFER 0x01000000 /* for map - pointer will be accessed
* without dri lock */
/* Stick closely to ARB_vbo semantics - they're well defined and
* understood, and drivers can just pass the calls through without too
* much thunking.
*/
void bmGenBuffers(struct bufmgr *, unsigned n, unsigned *buffers,
unsigned flags);
void bmDeleteBuffers(struct bufmgr *, unsigned n, unsigned *buffers);
/* The driver has more intimate knowledge of the hardare than a GL
* client would, so flags here is more proscriptive than the usage
* values in the ARB_vbo interface:
*/
void bmBufferData(struct bufmgr *,
unsigned buffer,
unsigned size, const void *data, unsigned flags);
void bmBufferSubData(struct bufmgr *,
unsigned buffer,
unsigned offset, unsigned size, const void *data);
void bmBufferGetSubData(struct bufmgr *,
unsigned buffer,
unsigned offset, unsigned size, void *data);
void *bmMapBuffer(struct bufmgr *, unsigned buffer, unsigned access);
void bmUnmapBuffer(struct bufmgr *, unsigned buffer);
/* To be called prior to emitting commands to hardware which reference
* these buffers.
*
* NewBufferList() and AddBuffer() build up a list of buffers to be
* validated. The buffer list provides information on where the
* buffers should be placed and whether their contents need to be
* preserved on copying. The offset data elements are return values
* from this function telling the driver exactly where the buffers are
* currently located.
*
* ValidateBufferList() performs the actual validation and returns the
* buffer pools and offsets within the pools.
*
* FenceBufferList() must be called to set fences and other
* housekeeping before unlocking after a successful call to
* ValidateBufferList(). The buffer manager knows how to emit and test
* fences directly through the drm and without callbacks to the
* driver.
*/
struct _drmMMBufList *bmNewBufferList(void);
int bmAddBuffer(struct bufmgr *bm,
struct _drmMMBufList *list,
unsigned buffer,
unsigned flags,
unsigned *pool_return, unsigned long *offset_return);
int bmValidateBufferList(struct bufmgr *,
struct _drmMMBufList *, unsigned flags);
unsigned bmFenceBufferList(struct bufmgr *, struct _drmMMBufList *);
void bmFreeBufferList(struct _drmMMBufList *);
int bmScanBufferList(struct bufmgr *bm,
struct _drmMMBufList *list, unsigned buffer);
/* This functionality is used by the buffer manager, not really sure
* if we need to be exposing it in this way, probably libdrm will
* offer equivalent calls.
*
* For now they can stay, but will likely change/move before final:
*/
unsigned bmSetFence(struct bufmgr *);
int bmTestFence(struct bufmgr *, unsigned fence);
void bmFinishFence(struct bufmgr *, unsigned fence);
void bmSetShared(struct bufmgr *bm, unsigned buffer,
unsigned flags, unsigned long offset, void *virtual);
extern int INTEL_DEBUG;
#define DEBUG_BUFMGR 0x2000
#define DBG(...) do { if (INTEL_DEBUG & DEBUG_BUFMGR) _mesa_printf(__VA_ARGS__); } while(0)
#endif

View file

@ -59,7 +59,7 @@
#include "intel_regions.h"
#include "intel_buffer_objects.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
#include "utils.h"
#ifndef INTEL_DEBUG
@ -251,13 +251,16 @@ void intelFlush( GLcontext *ctx )
/* XXX: Need to do an MI_FLUSH here. Actually, the bufmgr_fake.c
* code will have done one already.
*/
bmFinishFence( intel->bm, intel->batch->last_fence );
}
void intelFinish( GLcontext *ctx )
{
struct intel_context *intel = intel_context( ctx );
intelFlush( ctx );
bmFinishFence( intel->bm, intel->last_fence );
bmFinishFence( intel->bm, intel->batch->last_fence );
}
@ -385,19 +388,19 @@ GLboolean intelInitContext( struct intel_context *intel,
/* Buffer manager:
*/
intel->bm = bm_fake_intel_Attach( intel );
intel->bm = bm_intel_Attach( intel );
#if 0
bmInitPool(intel->bm,
intel->intelScreen->tex.offset, /* low offset */
intel->intelScreen->tex.map, /* low virtual */
intel->intelScreen->tex.size,
BM_MEM_AGP);
DRM_MM_TT);
#endif
/* These are still static, but create regions for them.
*/
intel->front_region =
intel_region_create_static(intel,
BM_MEM_AGP,
DRM_MM_TT,
intelScreen->front.offset,
intelScreen->front.map,
intelScreen->cpp,
@ -407,7 +410,7 @@ GLboolean intelInitContext( struct intel_context *intel,
intel->back_region =
intel_region_create_static(intel,
BM_MEM_AGP,
DRM_MM_TT,
intelScreen->back.offset,
intelScreen->back.map,
intelScreen->cpp,
@ -418,7 +421,7 @@ GLboolean intelInitContext( struct intel_context *intel,
*/
intel->depth_region =
intel_region_create_static(intel,
BM_MEM_AGP,
DRM_MM_TT,
intelScreen->depth.offset,
intelScreen->depth.map,
intelScreen->cpp,

View file

@ -41,7 +41,7 @@
#include "intel_blit.h"
#include "intel_regions.h"
#include "drm.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
int intelEmitIrqLocked( struct intel_context *intel )

View file

@ -28,7 +28,7 @@
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
#include "enums.h"
static GLenum target_to_target( GLenum target )

View file

@ -39,7 +39,7 @@
#include "intel_regions.h"
#include "intel_tris.h"
#include "intel_pixel.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
static struct intel_region *copypix_src_region( struct intel_context *intel,

View file

@ -42,7 +42,7 @@
#include "intel_pixel.h"
#include "intel_buffer_objects.h"
#include "intel_tris.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"

View file

@ -42,7 +42,7 @@
#include "intel_pixel.h"
#include "intel_buffer_objects.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
/* For many applications, the new ability to pull the source buffers
* back out of the GTT and then do the packing/conversion operations

View file

@ -42,7 +42,7 @@
#include "intel_context.h"
#include "intel_regions.h"
#include "intel_blit.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
/* XXX: Thread safety?
*/
@ -80,7 +80,7 @@ struct intel_region *intel_region_alloc( struct intel_context *intel,
region->height = height; /* needed? */
region->refcount = 1;
bmGenBuffers(intel->bm, 1, &region->buffer);
bmGenBuffers(intel->bm, 1, &region->buffer, 0);
bmBufferData(intel->bm, region->buffer, pitch * cpp * height, NULL, 0);
return region;
@ -120,9 +120,6 @@ struct intel_region *intel_region_create_static( struct intel_context *intel,
GLuint height )
{
struct intel_region *region = calloc(sizeof(*region), 1);
GLuint size = cpp * pitch * height;
GLint pool;
DBG("%s\n", __FUNCTION__);
region->cpp = cpp;
@ -130,22 +127,13 @@ struct intel_region *intel_region_create_static( struct intel_context *intel,
region->height = height; /* needed? */
region->refcount = 1;
/* Recipe for creating a static buffer - create a static pool with
* the right offset and size, generate a buffer and use a special
* call to bind it to all of the memory in that pool.
/*
* We use a "shared" buffer type to indicate buffers created and
* shared by others.
*/
pool = bmInitPool(intel->bm, offset, virtual, size,
(BM_MEM_AGP |
BM_NO_UPLOAD |
BM_NO_EVICT |
BM_NO_MOVE));
if (pool < 0) {
_mesa_printf("bmInitPool failed for static region\n");
exit(1);
}
bmGenBuffers(intel->bm, 1, &region->buffer);
bmBufferStatic(intel->bm, region->buffer, size, pool);
bmGenBuffers(intel->bm, 1, &region->buffer, DRM_MM_TT | DRM_MM_SHARED);
bmSetShared(intel->bm, region->buffer, DRM_MM_TT, offset, virtual);
return region;
}

View file

@ -29,7 +29,7 @@
#define INTEL_REGIONS_H
#include "mtypes.h"
#include "bufmgr.h" /* for DBG! */
#include "intel_bufmgr.h" /* for DBG! */
struct intel_context;
/* A layer on top of the bufmgr buffers that adds a few useful things:

View file

@ -38,7 +38,7 @@
#include "intel_tex.h"
#include "intel_blit.h"
#include "intel_pixel.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
/* Do the best we can using the blitter. A future project is to use
* the texture engine and fragment programs for these copies.

View file

@ -4,7 +4,7 @@
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_tex.h"
#include "bufmgr.h"
#include "intel_bufmgr.h"
/**
* Compute which mipmap levels that really need to be sent to the hardware.