intel: Remove the ttm backend

RIP ttm, its been fun knowing you.
This commit is contained in:
Jakob Bornecrantz 2009-02-02 01:32:11 +01:00
parent 5de5ab428c
commit 56d88aece9
16 changed files with 0 additions and 4178 deletions

View file

@ -1,23 +0,0 @@
TOP = ../../../../../..
include $(TOP)/configs/current
LIBNAME = inteldrm
C_SOURCES = \
intel_be_batchbuffer.c \
intel_be_context.c \
intel_be_device.c \
ws_dri_bufmgr.c \
ws_dri_drmpool.c \
ws_dri_fencemgr.c \
ws_dri_mallocpool.c \
ws_dri_slabpool.c
include ./Makefile.template
DRIVER_DEFINES = $(shell pkg-config libdrm --cflags \
&& pkg-config libdrm --atleast-version=2.3.1 \
&& echo "-DDRM_VBLANK_FLIP=DRM_VBLANK_FLIP")
symlinks:

View file

@ -1,64 +0,0 @@
# -*-makefile-*-
# We still have a dependency on the "dri" buffer manager. Most likely
# the interface can be reused in non-dri environments, and also as a
# frontend to simpler memory managers.
#
COMMON_SOURCES =
OBJECTS = $(C_SOURCES:.c=.o) \
$(CPP_SOURCES:.cpp=.o) \
$(ASM_SOURCES:.S=.o)
### Include directories
INCLUDES = \
-I. \
-I$(TOP)/src/gallium/include \
-I$(TOP)/src/gallium/auxiliary \
-I$(TOP)/src/gallium/drivers \
-I$(TOP)/include \
$(DRIVER_INCLUDES)
##### RULES #####
.c.o:
$(CC) -c $(INCLUDES) $(CFLAGS) $(DRIVER_DEFINES) $< -o $@
.cpp.o:
$(CXX) -c $(INCLUDES) $(CXXFLAGS) $(DRIVER_DEFINES) $< -o $@
.S.o:
$(CC) -c $(INCLUDES) $(CFLAGS) $(DRIVER_DEFINES) $< -o $@
##### TARGETS #####
default: depend symlinks $(LIBNAME)
$(LIBNAME): $(OBJECTS) Makefile Makefile.template
$(TOP)/bin/mklib -o $@ -static $(OBJECTS) $(DRIVER_LIBS)
depend: $(C_SOURCES) $(CPP_SOURCES) $(ASM_SOURCES) $(SYMLINKS)
rm -f depend
touch depend
$(MKDEP) $(MKDEP_OPTIONS) $(DRIVER_DEFINES) $(INCLUDES) $(C_SOURCES) $(CPP_SOURCES) \
$(ASM_SOURCES) 2> /dev/null
# Emacs tags
tags:
etags `find . -name \*.[ch]` `find ../include`
# Remove .o and backup files
clean::
-rm -f *.o */*.o *~ *.so *~ server/*.o $(SYMLINKS)
-rm -f depend depend.bak
include depend

View file

@ -1,429 +0,0 @@
#include "intel_be_batchbuffer.h"
#include "intel_be_context.h"
#include "intel_be_device.h"
#include <errno.h>
#include "xf86drm.h"
static void
intel_realloc_relocs(struct intel_be_batchbuffer *batch, int num_relocs)
{
unsigned long size = num_relocs * I915_RELOC0_STRIDE + I915_RELOC_HEADER;
size *= sizeof(uint32_t);
batch->reloc = realloc(batch->reloc, size);
batch->reloc_size = num_relocs;
}
void
intel_be_batchbuffer_reset(struct intel_be_batchbuffer *batch)
{
/*
* Get a new, free batchbuffer.
*/
drmBO *bo;
struct drm_bo_info_req *req;
driBOUnrefUserList(batch->list);
driBOResetList(batch->list);
/* base.size is the size available to the i915simple driver */
batch->base.size = batch->device->max_batch_size - BATCH_RESERVED;
batch->base.actual_size = batch->device->max_batch_size;
driBOData(batch->buffer, batch->base.actual_size, NULL, NULL, 0);
/*
* Add the batchbuffer to the validate list.
*/
driBOAddListItem(batch->list, batch->buffer,
DRM_BO_FLAG_EXE | DRM_BO_FLAG_MEM_TT,
DRM_BO_FLAG_EXE | DRM_BO_MASK_MEM,
&batch->dest_location, &batch->node);
req = &batch->node->bo_arg.d.req.bo_req;
/*
* Set up information needed for us to make relocations
* relative to the underlying drm buffer objects.
*/
driReadLockKernelBO();
bo = driBOKernel(batch->buffer);
req->presumed_offset = (uint64_t) bo->offset;
req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
batch->drmBOVirtual = (uint8_t *) bo->virtual;
driReadUnlockKernelBO();
/*
* Adjust the relocation buffer size.
*/
if (batch->reloc_size > INTEL_MAX_RELOCS ||
batch->reloc == NULL)
intel_realloc_relocs(batch, INTEL_DEFAULT_RELOCS);
assert(batch->reloc != NULL);
batch->reloc[0] = 0; /* No relocs yet. */
batch->reloc[1] = 1; /* Reloc type 1 */
batch->reloc[2] = 0; /* Only a single relocation list. */
batch->reloc[3] = 0; /* Only a single relocation list. */
batch->base.map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0);
batch->poolOffset = driBOPoolOffset(batch->buffer);
batch->base.ptr = batch->base.map;
batch->dirty_state = ~0;
batch->nr_relocs = 0;
batch->flags = 0;
batch->id = 0;//batch->intel->intelScreen->batch_id++;
}
/*======================================================================
* Public functions
*/
struct intel_be_batchbuffer *
intel_be_batchbuffer_alloc(struct intel_be_context *intel)
{
struct intel_be_batchbuffer *batch = calloc(sizeof(*batch), 1);
batch->intel = intel;
batch->device = intel->device;
driGenBuffers(intel->device->batchPool, "batchbuffer", 1,
&batch->buffer, 4096,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0);
batch->last_fence = NULL;
batch->list = driBOCreateList(20);
batch->reloc = NULL;
intel_be_batchbuffer_reset(batch);
return batch;
}
void
intel_be_batchbuffer_free(struct intel_be_batchbuffer *batch)
{
if (batch->last_fence) {
driFenceFinish(batch->last_fence,
DRM_FENCE_TYPE_EXE, FALSE);
driFenceUnReference(&batch->last_fence);
}
if (batch->base.map) {
driBOUnmap(batch->buffer);
batch->base.map = NULL;
}
driBOUnReference(batch->buffer);
driBOFreeList(batch->list);
if (batch->reloc)
free(batch->reloc);
batch->buffer = NULL;
free(batch);
}
void
intel_be_offset_relocation(struct intel_be_batchbuffer *batch,
unsigned pre_add,
struct _DriBufferObject *driBO,
uint64_t val_flags,
uint64_t val_mask)
{
int itemLoc;
struct _drmBONode *node;
uint32_t *reloc;
struct drm_bo_info_req *req;
driBOAddListItem(batch->list, driBO, val_flags, val_mask,
&itemLoc, &node);
req = &node->bo_arg.d.req.bo_req;
if (!(req->hint & DRM_BO_HINT_PRESUMED_OFFSET)) {
/*
* Stop other threads from tampering with the underlying
* drmBO while we're reading its offset.
*/
driReadLockKernelBO();
req->presumed_offset = (uint64_t) driBOKernel(driBO)->offset;
driReadUnlockKernelBO();
req->hint = DRM_BO_HINT_PRESUMED_OFFSET;
}
pre_add += driBOPoolOffset(driBO);
if (batch->nr_relocs == batch->reloc_size)
intel_realloc_relocs(batch, batch->reloc_size * 2);
reloc = batch->reloc +
(I915_RELOC_HEADER + batch->nr_relocs * I915_RELOC0_STRIDE);
reloc[0] = ((uint8_t *)batch->base.ptr - batch->drmBOVirtual);
i915_batchbuffer_dword(&batch->base, req->presumed_offset + pre_add);
reloc[1] = pre_add;
reloc[2] = itemLoc;
reloc[3] = batch->dest_location;
batch->nr_relocs++;
}
static void
i915_drm_copy_reply(const struct drm_bo_info_rep * rep, drmBO * buf)
{
buf->handle = rep->handle;
buf->flags = rep->flags;
buf->size = rep->size;
buf->offset = rep->offset;
buf->mapHandle = rep->arg_handle;
buf->proposedFlags = rep->proposed_flags;
buf->start = rep->buffer_start;
buf->fenceFlags = rep->fence_flags;
buf->replyFlags = rep->rep_flags;
buf->pageAlignment = rep->page_alignment;
}
static int
i915_execbuf(struct intel_be_batchbuffer *batch,
unsigned int used,
boolean ignore_cliprects,
drmBOList *list,
struct drm_i915_execbuffer *ea)
{
// struct intel_be_context *intel = batch->intel;
drmBONode *node;
drmMMListHead *l;
struct drm_i915_op_arg *arg, *first;
struct drm_bo_op_req *req;
struct drm_bo_info_rep *rep;
uint64_t *prevNext = NULL;
drmBO *buf;
int ret = 0;
uint32_t count = 0;
first = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
req = &arg->d.req;
if (!first)
first = arg;
if (prevNext)
*prevNext = (unsigned long)arg;
prevNext = &arg->next;
req->bo_req.handle = node->buf->handle;
req->op = drm_bo_validate;
req->bo_req.flags = node->arg0;
req->bo_req.mask = node->arg1;
req->bo_req.hint |= 0;
count++;
}
memset(ea, 0, sizeof(*ea));
ea->num_buffers = count;
ea->batch.start = batch->poolOffset;
ea->batch.used = used;
#if 0 /* ZZZ JB: no cliprects used */
ea->batch.cliprects = intel->pClipRects;
ea->batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
ea->batch.DR1 = 0;
ea->batch.DR4 = 0;((((GLuint) intel->drawX) & 0xffff) |
(((GLuint) intel->drawY) << 16));
#else
ea->batch.cliprects = NULL;
ea->batch.num_cliprects = 0;
ea->batch.DR1 = 0;
ea->batch.DR4 = 0;
#endif
ea->fence_arg.flags = DRM_I915_FENCE_FLAG_FLUSHED;
ea->ops_list = (unsigned long) first;
first->reloc_ptr = (unsigned long) batch->reloc;
batch->reloc[0] = batch->nr_relocs;
//return -EFAULT;
do {
ret = drmCommandWriteRead(batch->device->fd, DRM_I915_EXECBUFFER, ea,
sizeof(*ea));
} while (ret == -EAGAIN);
if (ret != 0)
return ret;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
rep = &arg->d.rep.bo_info;
if (!arg->handled) {
return -EFAULT;
}
if (arg->d.rep.ret)
return arg->d.rep.ret;
buf = node->buf;
i915_drm_copy_reply(rep, buf);
}
return 0;
}
/* TODO: Push this whole function into bufmgr.
*/
static struct _DriFenceObject *
do_flush_locked(struct intel_be_batchbuffer *batch,
unsigned int used,
boolean ignore_cliprects, boolean allow_unlock)
{
struct intel_be_context *intel = batch->intel;
struct _DriFenceObject *fo;
drmFence fence;
drmBOList *boList;
struct drm_i915_execbuffer ea;
int ret = 0;
driBOValidateUserList(batch->list);
boList = driGetdrmBOList(batch->list);
#if 0 /* ZZZ JB Allways run */
if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
#else
if (1) {
#endif
ret = i915_execbuf(batch, used, ignore_cliprects, boList, &ea);
} else {
driPutdrmBOList(batch->list);
fo = NULL;
goto out;
}
driPutdrmBOList(batch->list);
if (ret)
abort();
if (ea.fence_arg.error != 0) {
/*
* The hardware has been idled by the kernel.
* Don't fence the driBOs.
*/
if (batch->last_fence)
driFenceUnReference(&batch->last_fence);
#if 0 /* ZZZ JB: no _mesa_* funcs in gallium */
_mesa_printf("fence error\n");
#endif
batch->last_fence = NULL;
fo = NULL;
goto out;
}
fence.handle = ea.fence_arg.handle;
fence.fence_class = ea.fence_arg.fence_class;
fence.type = ea.fence_arg.type;
fence.flags = ea.fence_arg.flags;
fence.signaled = ea.fence_arg.signaled;
fo = driBOFenceUserList(batch->device->fenceMgr, batch->list,
"SuperFence", &fence);
if (driFenceType(fo) & DRM_I915_FENCE_TYPE_RW) {
if (batch->last_fence)
driFenceUnReference(&batch->last_fence);
/*
* FIXME: Context last fence??
*/
batch->last_fence = fo;
driFenceReference(fo);
}
out:
#if 0 /* ZZZ JB: fix this */
intel->vtbl.lost_hardware(intel);
#else
(void)intel;
#endif
return fo;
}
struct _DriFenceObject *
intel_be_batchbuffer_flush(struct intel_be_batchbuffer *batch)
{
struct intel_be_context *intel = batch->intel;
unsigned int used = batch->base.ptr - batch->base.map;
boolean was_locked = batch->intel->hardware_locked(intel);
struct _DriFenceObject *fence;
if (used == 0) {
driFenceReference(batch->last_fence);
return batch->last_fence;
}
/* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
* performance drain that we would like to avoid.
*/
#if 0 /* ZZZ JB: what should we do here? */
if (used & 4) {
((int *) batch->base.ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->base.ptr)[1] = 0;
((int *) batch->base.ptr)[2] = MI_BATCH_BUFFER_END;
used += 12;
}
else {
((int *) batch->base.ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->base.ptr)[1] = MI_BATCH_BUFFER_END;
used += 8;
}
#else
if (used & 4) {
((int *) batch->base.ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
((int *) batch->base.ptr)[1] = 0;
((int *) batch->base.ptr)[2] = (0xA<<23); // MI_BATCH_BUFFER_END;
used += 12;
}
else {
((int *) batch->base.ptr)[0] = ((0<<29)|(4<<23)); // MI_FLUSH;
((int *) batch->base.ptr)[1] = (0xA<<23); // MI_BATCH_BUFFER_END;
used += 8;
}
#endif
driBOUnmap(batch->buffer);
batch->base.ptr = NULL;
batch->base.map = NULL;
/* TODO: Just pass the relocation list and dma buffer up to the
* kernel.
*/
if (!was_locked)
intel->hardware_lock(intel);
fence = do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
FALSE);
if (!was_locked)
intel->hardware_unlock(intel);
/* Reset the buffer:
*/
intel_be_batchbuffer_reset(batch);
return fence;
}
void
intel_be_batchbuffer_finish(struct intel_be_batchbuffer *batch)
{
struct _DriFenceObject *fence = intel_be_batchbuffer_flush(batch);
driFenceFinish(fence, driFenceType(fence), FALSE);
driFenceUnReference(&fence);
}
#if 0
void
intel_be_batchbuffer_data(struct intel_be_batchbuffer *batch,
const void *data, unsigned int bytes, unsigned int flags)
{
assert((bytes & 3) == 0);
intel_batchbuffer_require_space(batch, bytes, flags);
memcpy(batch->base.ptr, data, bytes);
batch->base.ptr += bytes;
}
#endif

View file

@ -1,69 +0,0 @@
#ifndef INTEL_BE_BATCHBUFFER_H
#define INTEL_BE_BATCHBUFFER_H
#include "i915simple/i915_batch.h"
#include "ws_dri_bufmgr.h"
#define BATCH_RESERVED 16
#define INTEL_DEFAULT_RELOCS 100
#define INTEL_MAX_RELOCS 400
#define INTEL_BATCH_NO_CLIPRECTS 0x1
#define INTEL_BATCH_CLIPRECTS 0x2
struct intel_be_context;
struct intel_be_device;
struct intel_be_batchbuffer
{
struct i915_batchbuffer base;
struct intel_be_context *intel;
struct intel_be_device *device;
struct _DriBufferObject *buffer;
struct _DriFenceObject *last_fence;
uint32_t flags;
struct _DriBufferList *list;
size_t list_count;
uint32_t *reloc;
size_t reloc_size;
size_t nr_relocs;
uint32_t dirty_state;
uint32_t id;
uint32_t poolOffset;
uint8_t *drmBOVirtual;
struct _drmBONode *node; /* Validation list node for this buffer */
int dest_location; /* Validation list sequence for this buffer */
};
struct intel_be_batchbuffer *
intel_be_batchbuffer_alloc(struct intel_be_context *intel);
void
intel_be_batchbuffer_free(struct intel_be_batchbuffer *batch);
void
intel_be_batchbuffer_finish(struct intel_be_batchbuffer *batch);
struct _DriFenceObject *
intel_be_batchbuffer_flush(struct intel_be_batchbuffer *batch);
void
intel_be_batchbuffer_reset(struct intel_be_batchbuffer *batch);
void
intel_be_offset_relocation(struct intel_be_batchbuffer *batch,
unsigned pre_add,
struct _DriBufferObject *driBO,
uint64_t val_flags,
uint64_t val_mask);
#endif

View file

@ -1,107 +0,0 @@
/*
* Authors: Jakob Bornecrantz <jakob-at-tungstengraphics.com>
*/
#include "ws_dri_fencemgr.h"
#include "intel_be_device.h"
#include "intel_be_context.h"
#include "intel_be_batchbuffer.h"
static INLINE struct intel_be_context *
intel_be_context(struct i915_winsys *sws)
{
return (struct intel_be_context *)sws;
}
/* Simple batchbuffer interface:
*/
static struct i915_batchbuffer*
intel_i915_batch_get(struct i915_winsys *sws)
{
struct intel_be_context *intel = intel_be_context(sws);
return &intel->batch->base;
}
static void intel_i915_batch_reloc(struct i915_winsys *sws,
struct pipe_buffer *buf,
unsigned access_flags,
unsigned delta)
{
struct intel_be_context *intel = intel_be_context(sws);
unsigned flags = DRM_BO_FLAG_MEM_TT;
unsigned mask = DRM_BO_MASK_MEM;
if (access_flags & I915_BUFFER_ACCESS_WRITE) {
flags |= DRM_BO_FLAG_WRITE;
mask |= DRM_BO_FLAG_WRITE;
}
if (access_flags & I915_BUFFER_ACCESS_READ) {
flags |= DRM_BO_FLAG_READ;
mask |= DRM_BO_FLAG_READ;
}
intel_be_offset_relocation(intel->batch,
delta,
dri_bo(buf),
flags,
mask);
}
static void intel_i915_batch_flush(struct i915_winsys *sws,
struct pipe_fence_handle **fence)
{
struct intel_be_context *intel = intel_be_context(sws);
union {
struct _DriFenceObject *dri;
struct pipe_fence_handle *pipe;
} fu;
if (fence)
assert(!*fence);
fu.dri = intel_be_batchbuffer_flush(intel->batch);
if (!fu.dri) {
assert(0);
*fence = NULL;
return;
}
if (fu.dri) {
if (fence)
*fence = fu.pipe;
else
driFenceUnReference(&fu.dri);
}
}
boolean
intel_be_init_context(struct intel_be_context *intel, struct intel_be_device *device)
{
assert(intel);
assert(device);
intel->device = device;
/* TODO move framebuffer createion to the driver */
intel->base.batch_get = intel_i915_batch_get;
intel->base.batch_reloc = intel_i915_batch_reloc;
intel->base.batch_flush = intel_i915_batch_flush;
intel->batch = intel_be_batchbuffer_alloc(intel);
return true;
}
void
intel_be_destroy_context(struct intel_be_context *intel)
{
intel_be_batchbuffer_free(intel->batch);
}

View file

@ -1,40 +0,0 @@
/* These need to be diffrent from the intel winsys */
#ifndef INTEL_BE_CONTEXT_H
#define INTEL_BE_CONTEXT_H
#include "i915simple/i915_winsys.h"
struct intel_be_context
{
/** Interface to i915simple driver */
struct i915_winsys base;
struct intel_be_device *device;
struct intel_be_batchbuffer *batch;
/*
* Hardware lock functions.
*
* Needs to be filled in by the winsys.
*/
void (*hardware_lock)(struct intel_be_context *context);
void (*hardware_unlock)(struct intel_be_context *context);
boolean (*hardware_locked)(struct intel_be_context *context);
};
/**
* Intialize a allocated intel_be_context struct.
*
* Remember to set the hardware_* functions.
*/
boolean
intel_be_init_context(struct intel_be_context *intel,
struct intel_be_device *device);
/**
* Destroy a intel_be_context.
* Does not free the struct that is up to the winsys.
*/
void
intel_be_destroy_context(struct intel_be_context *intel);
#endif

View file

@ -1,296 +0,0 @@
/*
* Authors: Keith Whitwell <keithw-at-tungstengraphics-dot-com>
* Jakob Bornecrantz <jakob-at-tungstengraphics-dot-com>
*/
#include "intel_be_device.h"
#include "ws_dri_bufmgr.h"
#include "ws_dri_bufpool.h"
#include "ws_dri_fencemgr.h"
#include "pipe/internal/p_winsys_screen.h"
#include "pipe/p_defines.h"
#include "pipe/p_state.h"
#include "pipe/p_inlines.h"
#include "util/u_memory.h"
#include "i915simple/i915_screen.h"
/* Turn a pipe winsys into an intel/pipe winsys:
*/
static INLINE struct intel_be_device *
intel_be_device( struct pipe_winsys *winsys )
{
return (struct intel_be_device *)winsys;
}
/*
* Buffer functions.
*
* Most callbacks map direcly onto dri_bufmgr operations:
*/
static void *intel_be_buffer_map(struct pipe_winsys *winsys,
struct pipe_buffer *buf,
unsigned flags )
{
unsigned drm_flags = 0;
if (flags & PIPE_BUFFER_USAGE_CPU_WRITE)
drm_flags |= DRM_BO_FLAG_WRITE;
if (flags & PIPE_BUFFER_USAGE_CPU_READ)
drm_flags |= DRM_BO_FLAG_READ;
return driBOMap( dri_bo(buf), drm_flags, 0 );
}
static void intel_be_buffer_unmap(struct pipe_winsys *winsys,
struct pipe_buffer *buf)
{
driBOUnmap( dri_bo(buf) );
}
static void
intel_be_buffer_destroy(struct pipe_winsys *winsys,
struct pipe_buffer *buf)
{
driBOUnReference( dri_bo(buf) );
FREE(buf);
}
static struct pipe_buffer *
intel_be_buffer_create(struct pipe_winsys *winsys,
unsigned alignment,
unsigned usage,
unsigned size )
{
struct intel_be_buffer *buffer = CALLOC_STRUCT( intel_be_buffer );
struct intel_be_device *iws = intel_be_device(winsys);
unsigned flags = 0;
struct _DriBufferPool *pool;
buffer->base.refcount = 1;
buffer->base.alignment = alignment;
buffer->base.usage = usage;
buffer->base.size = size;
if (usage & (PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_CONSTANT)) {
flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
pool = iws->mallocPool;
} else if (usage & PIPE_BUFFER_USAGE_CUSTOM) {
/* For vertex buffers */
flags |= DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_TT;
pool = iws->vertexPool;
} else {
flags |= DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_TT;
pool = iws->regionPool;
}
if (usage & PIPE_BUFFER_USAGE_GPU_READ)
flags |= DRM_BO_FLAG_READ;
if (usage & PIPE_BUFFER_USAGE_GPU_WRITE)
flags |= DRM_BO_FLAG_WRITE;
/* drm complains if we don't set any read/write flags.
*/
if ((flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) == 0)
flags |= DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
buffer->pool = pool;
driGenBuffers( buffer->pool,
"pipe buffer", 1, &buffer->driBO, alignment, flags, 0 );
driBOData( buffer->driBO, size, NULL, buffer->pool, 0 );
return &buffer->base;
}
static struct pipe_buffer *
intel_be_user_buffer_create(struct pipe_winsys *winsys, void *ptr, unsigned bytes)
{
struct intel_be_buffer *buffer = CALLOC_STRUCT( intel_be_buffer );
struct intel_be_device *iws = intel_be_device(winsys);
driGenUserBuffer( iws->regionPool,
"pipe user buffer", &buffer->driBO, ptr, bytes );
buffer->base.refcount = 1;
return &buffer->base;
}
struct pipe_buffer *
intel_be_buffer_from_handle(struct intel_be_device *device,
const char* name, unsigned handle)
{
struct intel_be_buffer *be_buf = malloc(sizeof(*be_buf));
struct pipe_buffer *buffer;
if (!be_buf)
goto err;
memset(be_buf, 0, sizeof(*be_buf));
driGenBuffers(device->staticPool, name, 1, &be_buf->driBO, 0, 0, 0);
driBOSetReferenced(be_buf->driBO, handle);
if (0) /** XXX TODO check error */
goto err_bo;
buffer = &be_buf->base;
buffer->refcount = 1;
buffer->alignment = 0;
buffer->usage = 0;
buffer->size = driBOSize(be_buf->driBO);
return buffer;
err_bo:
free(be_buf);
err:
return NULL;
}
static struct pipe_buffer *
intel_i915_surface_buffer_create(struct pipe_winsys *winsys,
unsigned width, unsigned height,
enum pipe_format format,
unsigned usage,
unsigned *stride)
{
const unsigned alignment = 64;
struct pipe_format_block block;
unsigned nblocksx, nblocksy;
pf_get_block(format, &block);
nblocksx = pf_get_nblocksx(&block, width);
nblocksy = pf_get_nblocksy(&block, height);
*stride = round_up(nblocksx * block.size, alignment);
return winsys->buffer_create(winsys, alignment,
usage,
*stride * nblocksy);
}
/*
* Fence functions
*/
static void
intel_be_fence_reference( struct pipe_winsys *sws,
struct pipe_fence_handle **ptr,
struct pipe_fence_handle *fence )
{
if (*ptr)
driFenceUnReference((struct _DriFenceObject **)ptr);
if (fence)
*ptr = (struct pipe_fence_handle *)driFenceReference((struct _DriFenceObject *)fence);
}
static int
intel_be_fence_signalled( struct pipe_winsys *sws,
struct pipe_fence_handle *fence,
unsigned flag )
{
return driFenceSignaled((struct _DriFenceObject *)fence, flag);
}
static int
intel_be_fence_finish( struct pipe_winsys *sws,
struct pipe_fence_handle *fence,
unsigned flag )
{
return driFenceFinish((struct _DriFenceObject *)fence, flag, 0);
}
/*
* Misc functions
*/
boolean
intel_be_init_device(struct intel_be_device *dev, int fd, unsigned id)
{
dev->fd = fd;
dev->max_batch_size = 16 * 4096;
dev->max_vertex_size = 128 * 4096;
dev->base.buffer_create = intel_be_buffer_create;
dev->base.user_buffer_create = intel_be_user_buffer_create;
dev->base.buffer_map = intel_be_buffer_map;
dev->base.buffer_unmap = intel_be_buffer_unmap;
dev->base.buffer_destroy = intel_be_buffer_destroy;
dev->base.surface_buffer_create = intel_i915_surface_buffer_create;
dev->base.fence_reference = intel_be_fence_reference;
dev->base.fence_signalled = intel_be_fence_signalled;
dev->base.fence_finish = intel_be_fence_finish;
#if 0 /* Set by the winsys */
dev->base.flush_frontbuffer = intel_flush_frontbuffer;
dev->base.get_name = intel_get_name;
#endif
dev->fMan = driInitFreeSlabManager(10, 10);
dev->fenceMgr = driFenceMgrTTMInit(dev->fd);
dev->mallocPool = driMallocPoolInit();
dev->staticPool = driDRMPoolInit(dev->fd);
/* Sizes: 64 128 256 512 1024 2048 4096 8192 16384 32768 */
dev->regionPool = driSlabPoolInit(dev->fd,
DRM_BO_FLAG_READ |
DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_TT,
DRM_BO_FLAG_READ |
DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_TT,
64,
10, 120, 4096 * 64, 0,
dev->fMan);
dev->vertexPool = driSlabPoolInit(dev->fd,
DRM_BO_FLAG_READ |
DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_TT,
DRM_BO_FLAG_READ |
DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_TT,
dev->max_vertex_size,
1, 120, dev->max_vertex_size * 4, 0,
dev->fMan);
dev->batchPool = driSlabPoolInit(dev->fd,
DRM_BO_FLAG_EXE |
DRM_BO_FLAG_MEM_TT,
DRM_BO_FLAG_EXE |
DRM_BO_FLAG_MEM_TT,
dev->max_batch_size,
1, 40, dev->max_batch_size * 16, 0,
dev->fMan);
/* Fill in this struct with callbacks that i915simple will need to
* communicate with the window system, buffer manager, etc.
*/
dev->screen = i915_create_screen(&dev->base, id);
return true;
}
void
intel_be_destroy_device(struct intel_be_device *dev)
{
driPoolTakeDown(dev->mallocPool);
driPoolTakeDown(dev->staticPool);
driPoolTakeDown(dev->regionPool);
driPoolTakeDown(dev->vertexPool);
driPoolTakeDown(dev->batchPool);
/** TODO takedown fenceMgr and fMan */
}

View file

@ -1,72 +0,0 @@
#ifndef INTEL_DRM_DEVICE_H
#define INTEL_DRM_DEVICE_H
#include "pipe/internal/p_winsys_screen.h"
#include "pipe/p_context.h"
/*
* Device
*/
struct intel_be_device
{
struct pipe_winsys base;
/**
* Hw level screen
*/
struct pipe_screen *screen;
int fd; /**< Drm file discriptor */
size_t max_batch_size;
size_t max_vertex_size;
struct _DriFenceMgr *fenceMgr;
struct _DriBufferPool *batchPool;
struct _DriBufferPool *regionPool;
struct _DriBufferPool *mallocPool;
struct _DriBufferPool *vertexPool;
struct _DriBufferPool *staticPool;
struct _DriFreeSlabManager *fMan;
};
boolean
intel_be_init_device(struct intel_be_device *device, int fd, unsigned id);
void
intel_be_destroy_device(struct intel_be_device *dev);
/*
* Buffer
*/
struct intel_be_buffer {
struct pipe_buffer base;
struct _DriBufferPool *pool;
struct _DriBufferObject *driBO;
};
/**
* Create a be buffer from a drm bo handle
*
* Takes a reference
*/
struct pipe_buffer *
intel_be_buffer_from_handle(struct intel_be_device *device,
const char* name, unsigned handle);
static INLINE struct intel_be_buffer *
intel_be_buffer(struct pipe_buffer *buf)
{
return (struct intel_be_buffer *)buf;
}
static INLINE struct _DriBufferObject *
dri_bo(struct pipe_buffer *buf)
{
return intel_be_buffer(buf)->driBO;
}
#endif

View file

@ -1,949 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include <stdio.h>
#include "pipe/p_thread.h"
#include "errno.h"
#include "ws_dri_bufmgr.h"
#include "string.h"
#include "pipe/p_debug.h"
#include "ws_dri_bufpool.h"
#include "ws_dri_fencemgr.h"
/*
* This lock is here to protect drmBO structs changing underneath us during a
* validate list call, since validatelist cannot take individiual locks for
* each drmBO. Validatelist takes this lock in write mode. Any access to an
* individual drmBO should take this lock in read mode, since in that case, the
* driBufferObject mutex will protect the access. Locking order is
* driBufferObject mutex - > this rw lock.
*/
pipe_static_mutex(bmMutex);
pipe_static_condvar(bmCond);
static int kernelReaders = 0;
static int num_buffers = 0;
static int num_user_buffers = 0;
static drmBO *drmBOListBuf(void *iterator)
{
drmBONode *node;
drmMMListHead *l = (drmMMListHead *) iterator;
node = DRMLISTENTRY(drmBONode, l, head);
return node->buf;
}
static void *drmBOListIterator(drmBOList *list)
{
void *ret = list->list.next;
if (ret == &list->list)
return NULL;
return ret;
}
static void *drmBOListNext(drmBOList *list, void *iterator)
{
void *ret;
drmMMListHead *l = (drmMMListHead *) iterator;
ret = l->next;
if (ret == &list->list)
return NULL;
return ret;
}
static drmBONode *drmAddListItem(drmBOList *list, drmBO *item,
uint64_t arg0,
uint64_t arg1)
{
drmBONode *node;
drmMMListHead *l;
l = list->free.next;
if (l == &list->free) {
node = (drmBONode *) malloc(sizeof(*node));
if (!node) {
return NULL;
}
list->numCurrent++;
}
else {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
}
node->buf = item;
node->arg0 = arg0;
node->arg1 = arg1;
DRMLISTADD(&node->head, &list->list);
list->numOnList++;
return node;
}
static int drmAddValidateItem(drmBOList *list, drmBO *buf, uint64_t flags,
uint64_t mask, int *newItem)
{
drmBONode *node, *cur;
drmMMListHead *l;
*newItem = 0;
cur = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
if (node->buf == buf) {
cur = node;
break;
}
}
if (!cur) {
cur = drmAddListItem(list, buf, flags, mask);
if (!cur) {
return -ENOMEM;
}
*newItem = 1;
cur->arg0 = flags;
cur->arg1 = mask;
}
else {
uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
return -EINVAL;
}
cur->arg1 |= mask;
cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
(cur->arg0 & DRM_BO_MASK_MEM) == 0) {
return -EINVAL;
}
}
return 0;
}
static void drmBOFreeList(drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
l = list->list.next;
while(l != &list->list) {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
l = list->list.next;
list->numCurrent--;
list->numOnList--;
}
l = list->free.next;
while(l != &list->free) {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
l = list->free.next;
list->numCurrent--;
}
}
static int drmAdjustListNodes(drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
int ret = 0;
while(list->numCurrent < list->numTarget) {
node = (drmBONode *) malloc(sizeof(*node));
if (!node) {
ret = -ENOMEM;
break;
}
list->numCurrent++;
DRMLISTADD(&node->head, &list->free);
}
while(list->numCurrent > list->numTarget) {
l = list->free.next;
if (l == &list->free)
break;
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
free(node);
list->numCurrent--;
}
return ret;
}
static int drmBOCreateList(int numTarget, drmBOList *list)
{
DRMINITLISTHEAD(&list->list);
DRMINITLISTHEAD(&list->free);
list->numTarget = numTarget;
list->numCurrent = 0;
list->numOnList = 0;
return drmAdjustListNodes(list);
}
static int drmBOResetList(drmBOList *list)
{
drmMMListHead *l;
int ret;
ret = drmAdjustListNodes(list);
if (ret)
return ret;
l = list->list.next;
while (l != &list->list) {
DRMLISTDEL(l);
DRMLISTADD(l, &list->free);
list->numOnList--;
l = list->list.next;
}
return drmAdjustListNodes(list);
}
void driWriteLockKernelBO(void)
{
pipe_mutex_lock(bmMutex);
while(kernelReaders != 0)
pipe_condvar_wait(bmCond, bmMutex);
}
void driWriteUnlockKernelBO(void)
{
pipe_mutex_unlock(bmMutex);
}
void driReadLockKernelBO(void)
{
pipe_mutex_lock(bmMutex);
kernelReaders++;
pipe_mutex_unlock(bmMutex);
}
void driReadUnlockKernelBO(void)
{
pipe_mutex_lock(bmMutex);
if (--kernelReaders == 0)
pipe_condvar_broadcast(bmCond);
pipe_mutex_unlock(bmMutex);
}
/*
* TODO: Introduce fence pools in the same way as
* buffer object pools.
*/
typedef struct _DriBufferObject
{
DriBufferPool *pool;
pipe_mutex mutex;
int refCount;
const char *name;
uint64_t flags;
unsigned hint;
unsigned alignment;
unsigned createdByReference;
void *private;
/* user-space buffer: */
unsigned userBuffer;
void *userData;
unsigned userSize;
} DriBufferObject;
typedef struct _DriBufferList {
drmBOList drmBuffers; /* List of kernel buffers needing validation */
drmBOList driBuffers; /* List of user-space buffers needing validation */
} DriBufferList;
void
bmError(int val, const char *file, const char *function, int line)
{
printf("Fatal video memory manager error \"%s\".\n"
"Check kernel logs or set the LIBGL_DEBUG\n"
"environment variable to \"verbose\" for more info.\n"
"Detected in file %s, line %d, function %s.\n",
strerror(-val), file, line, function);
#ifndef NDEBUG
abort();
#else
abort();
#endif
}
extern drmBO *
driBOKernel(struct _DriBufferObject *buf)
{
drmBO *ret;
driReadLockKernelBO();
pipe_mutex_lock(buf->mutex);
assert(buf->private != NULL);
ret = buf->pool->kernel(buf->pool, buf->private);
if (!ret)
BM_CKFATAL(-EINVAL);
pipe_mutex_unlock(buf->mutex);
driReadUnlockKernelBO();
return ret;
}
void
driBOWaitIdle(struct _DriBufferObject *buf, int lazy)
{
/*
* This function may block. Is it sane to keep the mutex held during
* that time??
*/
pipe_mutex_lock(buf->mutex);
BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, &buf->mutex, lazy));
pipe_mutex_unlock(buf->mutex);
}
void *
driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint)
{
void *virtual;
int retval;
if (buf->userBuffer) {
return buf->userData;
}
pipe_mutex_lock(buf->mutex);
assert(buf->private != NULL);
retval = buf->pool->map(buf->pool, buf->private, flags, hint,
&buf->mutex, &virtual);
pipe_mutex_unlock(buf->mutex);
return retval == 0 ? virtual : NULL;
}
void
driBOUnmap(struct _DriBufferObject *buf)
{
if (buf->userBuffer)
return;
assert(buf->private != NULL);
pipe_mutex_lock(buf->mutex);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
pipe_mutex_unlock(buf->mutex);
}
unsigned long
driBOOffset(struct _DriBufferObject *buf)
{
unsigned long ret;
assert(buf->private != NULL);
pipe_mutex_lock(buf->mutex);
ret = buf->pool->offset(buf->pool, buf->private);
pipe_mutex_unlock(buf->mutex);
return ret;
}
unsigned long
driBOPoolOffset(struct _DriBufferObject *buf)
{
unsigned long ret;
assert(buf->private != NULL);
pipe_mutex_lock(buf->mutex);
ret = buf->pool->poolOffset(buf->pool, buf->private);
pipe_mutex_unlock(buf->mutex);
return ret;
}
uint64_t
driBOFlags(struct _DriBufferObject *buf)
{
uint64_t ret;
assert(buf->private != NULL);
driReadLockKernelBO();
pipe_mutex_lock(buf->mutex);
ret = buf->pool->flags(buf->pool, buf->private);
pipe_mutex_unlock(buf->mutex);
driReadUnlockKernelBO();
return ret;
}
struct _DriBufferObject *
driBOReference(struct _DriBufferObject *buf)
{
pipe_mutex_lock(buf->mutex);
if (++buf->refCount == 1) {
pipe_mutex_unlock(buf->mutex);
BM_CKFATAL(-EINVAL);
}
pipe_mutex_unlock(buf->mutex);
return buf;
}
void
driBOUnReference(struct _DriBufferObject *buf)
{
int tmp;
if (!buf)
return;
pipe_mutex_lock(buf->mutex);
tmp = --buf->refCount;
if (!tmp) {
pipe_mutex_unlock(buf->mutex);
if (buf->private) {
if (buf->createdByReference)
buf->pool->unreference(buf->pool, buf->private);
else
buf->pool->destroy(buf->pool, buf->private);
}
if (buf->userBuffer)
num_user_buffers--;
else
num_buffers--;
free(buf);
} else
pipe_mutex_unlock(buf->mutex);
}
int
driBOData(struct _DriBufferObject *buf,
unsigned size, const void *data,
DriBufferPool *newPool,
uint64_t flags)
{
void *virtual = NULL;
int newBuffer;
int retval = 0;
struct _DriBufferPool *pool;
assert(!buf->userBuffer); /* XXX just do a memcpy? */
pipe_mutex_lock(buf->mutex);
pool = buf->pool;
if (pool == NULL && newPool != NULL) {
buf->pool = newPool;
pool = newPool;
}
if (newPool == NULL)
newPool = pool;
if (!pool->create) {
assert((size_t)"driBOData called on invalid buffer\n" & 0);
BM_CKFATAL(-EINVAL);
}
newBuffer = (!buf->private || pool != newPool ||
pool->size(pool, buf->private) < size);
if (!flags)
flags = buf->flags;
if (newBuffer) {
if (buf->createdByReference) {
assert((size_t)"driBOData requiring resizing called on shared buffer.\n" & 0);
BM_CKFATAL(-EINVAL);
}
if (buf->private)
buf->pool->destroy(buf->pool, buf->private);
pool = newPool;
buf->pool = newPool;
buf->private = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
buf->alignment);
if (!buf->private)
retval = -ENOMEM;
if (retval == 0)
retval = pool->map(pool, buf->private,
DRM_BO_FLAG_WRITE,
DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual);
} else if (pool->map(pool, buf->private, DRM_BO_FLAG_WRITE,
DRM_BO_HINT_DONT_BLOCK, &buf->mutex, &virtual)) {
/*
* Buffer is busy. need to create a new one.
*/
void *newBuf;
newBuf = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
buf->alignment);
if (newBuf) {
buf->pool->destroy(buf->pool, buf->private);
buf->private = newBuf;
}
retval = pool->map(pool, buf->private,
DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
} else {
uint64_t flag_diff = flags ^ buf->flags;
/*
* We might need to change buffer flags.
*/
if (flag_diff){
assert(pool->setStatus != NULL);
BM_CKFATAL(pool->unmap(pool, buf->private));
BM_CKFATAL(pool->setStatus(pool, buf->private, flag_diff,
buf->flags));
if (!data)
goto out;
retval = pool->map(pool, buf->private,
DRM_BO_FLAG_WRITE, 0, &buf->mutex, &virtual);
}
}
if (retval == 0) {
if (data)
memcpy(virtual, data, size);
BM_CKFATAL(pool->unmap(pool, buf->private));
}
out:
pipe_mutex_unlock(buf->mutex);
return retval;
}
void
driBOSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size, const void *data)
{
void *virtual;
assert(!buf->userBuffer); /* XXX just do a memcpy? */
pipe_mutex_lock(buf->mutex);
if (size && data) {
BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
DRM_BO_FLAG_WRITE, 0, &buf->mutex,
&virtual));
memcpy((unsigned char *) virtual + offset, data, size);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
}
pipe_mutex_unlock(buf->mutex);
}
void
driBOGetSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size, void *data)
{
void *virtual;
assert(!buf->userBuffer); /* XXX just do a memcpy? */
pipe_mutex_lock(buf->mutex);
if (size && data) {
BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
DRM_BO_FLAG_READ, 0, &buf->mutex, &virtual));
memcpy(data, (unsigned char *) virtual + offset, size);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
}
pipe_mutex_unlock(buf->mutex);
}
void
driBOSetReferenced(struct _DriBufferObject *buf,
unsigned long handle)
{
pipe_mutex_lock(buf->mutex);
if (buf->private != NULL) {
assert((size_t)"Invalid buffer for setReferenced\n" & 0);
BM_CKFATAL(-EINVAL);
}
if (buf->pool->reference == NULL) {
assert((size_t)"Invalid buffer pool for setReferenced\n" & 0);
BM_CKFATAL(-EINVAL);
}
buf->private = buf->pool->reference(buf->pool, handle);
if (!buf->private) {
assert((size_t)"Invalid buffer pool for setStatic\n" & 0);
BM_CKFATAL(-ENOMEM);
}
buf->createdByReference = TRUE;
buf->flags = buf->pool->kernel(buf->pool, buf->private)->flags;
pipe_mutex_unlock(buf->mutex);
}
int
driGenBuffers(struct _DriBufferPool *pool,
const char *name,
unsigned n,
struct _DriBufferObject *buffers[],
unsigned alignment, uint64_t flags, unsigned hint)
{
struct _DriBufferObject *buf;
int i;
flags = (flags) ? flags : DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM |
DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
++num_buffers;
assert(pool);
for (i = 0; i < n; ++i) {
buf = (struct _DriBufferObject *) calloc(1, sizeof(*buf));
if (!buf)
return -ENOMEM;
pipe_mutex_init(buf->mutex);
pipe_mutex_lock(buf->mutex);
buf->refCount = 1;
buf->flags = flags;
buf->hint = hint;
buf->name = name;
buf->alignment = alignment;
buf->pool = pool;
buf->createdByReference = 0;
pipe_mutex_unlock(buf->mutex);
buffers[i] = buf;
}
return 0;
}
void
driGenUserBuffer(struct _DriBufferPool *pool,
const char *name,
struct _DriBufferObject **buffers,
void *ptr, unsigned bytes)
{
const unsigned alignment = 1, flags = 0, hint = 0;
--num_buffers; /* JB: is inced in GenBuffes */
driGenBuffers(pool, name, 1, buffers, alignment, flags, hint);
++num_user_buffers;
(*buffers)->userBuffer = 1;
(*buffers)->userData = ptr;
(*buffers)->userSize = bytes;
}
void
driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[])
{
int i;
for (i = 0; i < n; ++i) {
driBOUnReference(buffers[i]);
}
}
void
driInitBufMgr(int fd)
{
;
}
/*
* Note that lists are per-context and don't need mutex protection.
*/
struct _DriBufferList *
driBOCreateList(int target)
{
struct _DriBufferList *list = calloc(sizeof(*list), 1);
BM_CKFATAL(drmBOCreateList(target, &list->drmBuffers));
BM_CKFATAL(drmBOCreateList(target, &list->driBuffers));
return list;
}
int
driBOResetList(struct _DriBufferList * list)
{
int ret;
ret = drmBOResetList(&list->drmBuffers);
if (ret)
return ret;
ret = drmBOResetList(&list->driBuffers);
return ret;
}
void
driBOFreeList(struct _DriBufferList * list)
{
drmBOFreeList(&list->drmBuffers);
drmBOFreeList(&list->driBuffers);
free(list);
}
/*
* Copied from libdrm, because it is needed by driAddValidateItem.
*/
static drmBONode *
driAddListItem(drmBOList * list, drmBO * item,
uint64_t arg0, uint64_t arg1)
{
drmBONode *node;
drmMMListHead *l;
l = list->free.next;
if (l == &list->free) {
node = (drmBONode *) malloc(sizeof(*node));
if (!node) {
return NULL;
}
list->numCurrent++;
} else {
DRMLISTDEL(l);
node = DRMLISTENTRY(drmBONode, l, head);
}
memset(&node->bo_arg, 0, sizeof(node->bo_arg));
node->buf = item;
node->arg0 = arg0;
node->arg1 = arg1;
DRMLISTADDTAIL(&node->head, &list->list);
list->numOnList++;
return node;
}
/*
* Slightly modified version compared to the libdrm version.
* This one returns the list index of the buffer put on the list.
*/
static int
driAddValidateItem(drmBOList * list, drmBO * buf, uint64_t flags,
uint64_t mask, int *itemLoc,
struct _drmBONode **pnode)
{
drmBONode *node, *cur;
drmMMListHead *l;
int count = 0;
cur = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
if (node->buf == buf) {
cur = node;
break;
}
count++;
}
if (!cur) {
cur = driAddListItem(list, buf, flags, mask);
if (!cur)
return -ENOMEM;
cur->arg0 = flags;
cur->arg1 = mask;
} else {
uint64_t memFlags = cur->arg0 & flags & DRM_BO_MASK_MEM;
uint64_t accFlags = (cur->arg0 | flags) & ~DRM_BO_MASK_MEM;
if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
return -EINVAL;
}
cur->arg1 |= mask;
cur->arg0 = (cur->arg0 & ~mask) | ((memFlags | accFlags) & mask);
if (((cur->arg1 & DRM_BO_MASK_MEM) != 0) &&
(cur->arg0 & DRM_BO_MASK_MEM) == 0) {
return -EINVAL;
}
}
*itemLoc = count;
*pnode = cur;
return 0;
}
void
driBOAddListItem(struct _DriBufferList * list, struct _DriBufferObject *buf,
uint64_t flags, uint64_t mask, int *itemLoc,
struct _drmBONode **node)
{
int newItem;
pipe_mutex_lock(buf->mutex);
BM_CKFATAL(driAddValidateItem(&list->drmBuffers,
buf->pool->kernel(buf->pool, buf->private),
flags, mask, itemLoc, node));
BM_CKFATAL(drmAddValidateItem(&list->driBuffers, (drmBO *) buf,
flags, mask, &newItem));
if (newItem)
buf->refCount++;
pipe_mutex_unlock(buf->mutex);
}
drmBOList *driGetdrmBOList(struct _DriBufferList *list)
{
driWriteLockKernelBO();
return &list->drmBuffers;
}
void driPutdrmBOList(struct _DriBufferList *list)
{
driWriteUnlockKernelBO();
}
void
driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence)
{
pipe_mutex_lock(buf->mutex);
if (buf->pool->fence)
BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence));
pipe_mutex_unlock(buf->mutex);
}
void
driBOUnrefUserList(struct _DriBufferList *list)
{
struct _DriBufferObject *buf;
void *curBuf;
curBuf = drmBOListIterator(&list->driBuffers);
while (curBuf) {
buf = (struct _DriBufferObject *)drmBOListBuf(curBuf);
driBOUnReference(buf);
curBuf = drmBOListNext(&list->driBuffers, curBuf);
}
}
struct _DriFenceObject *
driBOFenceUserList(struct _DriFenceMgr *mgr,
struct _DriBufferList *list, const char *name,
drmFence *kFence)
{
struct _DriFenceObject *fence;
struct _DriBufferObject *buf;
void *curBuf;
fence = driFenceCreate(mgr, kFence->fence_class, kFence->type,
kFence, sizeof(*kFence));
curBuf = drmBOListIterator(&list->driBuffers);
/*
* User-space fencing callbacks.
*/
while (curBuf) {
buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
driBOFence(buf, fence);
driBOUnReference(buf);
curBuf = drmBOListNext(&list->driBuffers, curBuf);
}
driBOResetList(list);
return fence;
}
void
driBOValidateUserList(struct _DriBufferList * list)
{
void *curBuf;
struct _DriBufferObject *buf;
curBuf = drmBOListIterator(&list->driBuffers);
/*
* User-space validation callbacks.
*/
while (curBuf) {
buf = (struct _DriBufferObject *) drmBOListBuf(curBuf);
pipe_mutex_lock(buf->mutex);
if (buf->pool->validate)
BM_CKFATAL(buf->pool->validate(buf->pool, buf->private, &buf->mutex));
pipe_mutex_unlock(buf->mutex);
curBuf = drmBOListNext(&list->driBuffers, curBuf);
}
}
void
driPoolTakeDown(struct _DriBufferPool *pool)
{
pool->takeDown(pool);
}
unsigned long
driBOSize(struct _DriBufferObject *buf)
{
unsigned long size;
pipe_mutex_lock(buf->mutex);
size = buf->pool->size(buf->pool, buf->private);
pipe_mutex_unlock(buf->mutex);
return size;
}
drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list)
{
return &list->drmBuffers;
}
drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list)
{
return &list->driBuffers;
}

View file

@ -1,138 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#ifndef _PSB_BUFMGR_H_
#define _PSB_BUFMGR_H_
#include <xf86mm.h>
#include "i915_drm.h"
#include "ws_dri_fencemgr.h"
typedef struct _drmBONode
{
drmMMListHead head;
drmBO *buf;
struct drm_i915_op_arg bo_arg;
uint64_t arg0;
uint64_t arg1;
} drmBONode;
typedef struct _drmBOList {
unsigned numTarget;
unsigned numCurrent;
unsigned numOnList;
drmMMListHead list;
drmMMListHead free;
} drmBOList;
struct _DriFenceObject;
struct _DriBufferObject;
struct _DriBufferPool;
struct _DriBufferList;
/*
* Return a pointer to the libdrm buffer object this DriBufferObject
* uses.
*/
extern drmBO *driBOKernel(struct _DriBufferObject *buf);
extern void *driBOMap(struct _DriBufferObject *buf, unsigned flags,
unsigned hint);
extern void driBOUnmap(struct _DriBufferObject *buf);
extern unsigned long driBOOffset(struct _DriBufferObject *buf);
extern unsigned long driBOPoolOffset(struct _DriBufferObject *buf);
extern uint64_t driBOFlags(struct _DriBufferObject *buf);
extern struct _DriBufferObject *driBOReference(struct _DriBufferObject *buf);
extern void driBOUnReference(struct _DriBufferObject *buf);
extern int driBOData(struct _DriBufferObject *r_buf,
unsigned size, const void *data,
struct _DriBufferPool *pool, uint64_t flags);
extern void driBOSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size,
const void *data);
extern void driBOGetSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size,
void *data);
extern int driGenBuffers(struct _DriBufferPool *pool,
const char *name,
unsigned n,
struct _DriBufferObject *buffers[],
unsigned alignment, uint64_t flags, unsigned hint);
extern void driGenUserBuffer(struct _DriBufferPool *pool,
const char *name,
struct _DriBufferObject *buffers[],
void *ptr, unsigned bytes);
extern void driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[]);
extern void driInitBufMgr(int fd);
extern struct _DriBufferList *driBOCreateList(int target);
extern int driBOResetList(struct _DriBufferList * list);
extern void driBOAddListItem(struct _DriBufferList * list,
struct _DriBufferObject *buf,
uint64_t flags, uint64_t mask, int *itemLoc,
struct _drmBONode **node);
extern void driBOValidateList(int fd, struct _DriBufferList * list);
extern void driBOFreeList(struct _DriBufferList * list);
extern struct _DriFenceObject *driBOFenceUserList(struct _DriFenceMgr *mgr,
struct _DriBufferList *list,
const char *name,
drmFence *kFence);
extern void driBOUnrefUserList(struct _DriBufferList *list);
extern void driBOValidateUserList(struct _DriBufferList * list);
extern drmBOList *driGetdrmBOList(struct _DriBufferList *list);
extern void driPutdrmBOList(struct _DriBufferList *list);
extern void driBOFence(struct _DriBufferObject *buf,
struct _DriFenceObject *fence);
extern void driPoolTakeDown(struct _DriBufferPool *pool);
extern void driBOSetReferenced(struct _DriBufferObject *buf,
unsigned long handle);
unsigned long driBOSize(struct _DriBufferObject *buf);
extern void driBOWaitIdle(struct _DriBufferObject *buf, int lazy);
extern void driPoolTakeDown(struct _DriBufferPool *pool);
extern void driReadLockKernelBO(void);
extern void driReadUnlockKernelBO(void);
extern void driWriteLockKernelBO(void);
extern void driWriteUnlockKernelBO(void);
/*
* For debugging purposes.
*/
extern drmBOList *driBOGetDRMBuffers(struct _DriBufferList *list);
extern drmBOList *driBOGetDRIBuffers(struct _DriBufferList *list);
#endif

View file

@ -1,102 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _PSB_BUFPOOL_H_
#define _PSB_BUFPOOL_H_
#include <xf86drm.h>
#include "pipe/p_thread.h"
struct _DriFenceObject;
typedef struct _DriBufferPool
{
int fd;
int (*map) (struct _DriBufferPool * pool, void *private,
unsigned flags, int hint, pipe_mutex *mutex,
void **virtual);
int (*unmap) (struct _DriBufferPool * pool, void *private);
int (*destroy) (struct _DriBufferPool * pool, void *private);
unsigned long (*offset) (struct _DriBufferPool * pool, void *private);
unsigned long (*poolOffset) (struct _DriBufferPool * pool, void *private);
uint64_t (*flags) (struct _DriBufferPool * pool, void *private);
unsigned long (*size) (struct _DriBufferPool * pool, void *private);
void *(*create) (struct _DriBufferPool * pool, unsigned long size,
uint64_t flags, unsigned hint, unsigned alignment);
void *(*reference) (struct _DriBufferPool * pool, unsigned handle);
int (*unreference) (struct _DriBufferPool * pool, void *private);
int (*fence) (struct _DriBufferPool * pool, void *private,
struct _DriFenceObject * fence);
drmBO *(*kernel) (struct _DriBufferPool * pool, void *private);
int (*validate) (struct _DriBufferPool * pool, void *private, pipe_mutex *mutex);
int (*waitIdle) (struct _DriBufferPool *pool, void *private, pipe_mutex *mutex,
int lazy);
int (*setStatus) (struct _DriBufferPool *pool, void *private,
uint64_t flag_diff, uint64_t old_flags);
void (*takeDown) (struct _DriBufferPool * pool);
void *data;
} DriBufferPool;
extern void bmError(int val, const char *file, const char *function,
int line);
#define BM_CKFATAL(val) \
do{ \
int tstVal = (val); \
if (tstVal) \
bmError(tstVal, __FILE__, __FUNCTION__, __LINE__); \
} while(0);
/*
* Builtin pools.
*/
/*
* Kernel buffer objects. Size in multiples of page size. Page size aligned.
*/
extern struct _DriBufferPool *driDRMPoolInit(int fd);
extern struct _DriBufferPool *driMallocPoolInit(void);
struct _DriFreeSlabManager;
extern struct _DriBufferPool * driSlabPoolInit(int fd, uint64_t flags,
uint64_t validMask,
uint32_t smallestSize,
uint32_t numSizes,
uint32_t desiredNumBuffers,
uint32_t maxSlabSize,
uint32_t pageAlignment,
struct _DriFreeSlabManager *fMan);
extern void driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan);
extern struct _DriFreeSlabManager *
driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec);
#endif

View file

@ -1,268 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include <unistd.h>
#include "ws_dri_bufpool.h"
#include "ws_dri_bufmgr.h"
#include "assert.h"
/*
* Buffer pool implementation using DRM buffer objects as DRI buffer objects.
*/
static void *
pool_create(struct _DriBufferPool *pool,
unsigned long size, uint64_t flags, unsigned hint,
unsigned alignment)
{
drmBO *buf = (drmBO *) malloc(sizeof(*buf));
int ret;
unsigned pageSize = getpagesize();
if (!buf)
return NULL;
if ((alignment > pageSize) && (alignment % pageSize)) {
free(buf);
return NULL;
}
ret = drmBOCreate(pool->fd, size, alignment / pageSize,
NULL,
flags, hint, buf);
if (ret) {
free(buf);
return NULL;
}
return (void *) buf;
}
static void *
pool_reference(struct _DriBufferPool *pool, unsigned handle)
{
drmBO *buf = (drmBO *) malloc(sizeof(*buf));
int ret;
if (!buf)
return NULL;
ret = drmBOReference(pool->fd, handle, buf);
if (ret) {
free(buf);
return NULL;
}
return (void *) buf;
}
static int
pool_destroy(struct _DriBufferPool *pool, void *private)
{
int ret;
drmBO *buf = (drmBO *) private;
driReadLockKernelBO();
ret = drmBOUnreference(pool->fd, buf);
free(buf);
driReadUnlockKernelBO();
return ret;
}
static int
pool_unreference(struct _DriBufferPool *pool, void *private)
{
int ret;
drmBO *buf = (drmBO *) private;
driReadLockKernelBO();
ret = drmBOUnreference(pool->fd, buf);
free(buf);
driReadUnlockKernelBO();
return ret;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, pipe_mutex *mutex, void **virtual)
{
drmBO *buf = (drmBO *) private;
int ret;
driReadLockKernelBO();
ret = drmBOMap(pool->fd, buf, flags, hint, virtual);
driReadUnlockKernelBO();
return ret;
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
int ret;
driReadLockKernelBO();
ret = drmBOUnmap(pool->fd, buf);
driReadUnlockKernelBO();
return ret;
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
unsigned long offset;
driReadLockKernelBO();
assert(buf->flags & DRM_BO_FLAG_NO_MOVE);
offset = buf->offset;
driReadUnlockKernelBO();
return buf->offset;
}
static unsigned long
pool_poolOffset(struct _DriBufferPool *pool, void *private)
{
return 0;
}
static uint64_t
pool_flags(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
uint64_t flags;
driReadLockKernelBO();
flags = buf->flags;
driReadUnlockKernelBO();
return flags;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
unsigned long size;
driReadLockKernelBO();
size = buf->size;
driReadUnlockKernelBO();
return buf->size;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
/*
* Noop. The kernel handles all fencing.
*/
return 0;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
return (drmBO *) private;
}
static int
pool_waitIdle(struct _DriBufferPool *pool, void *private, pipe_mutex *mutex,
int lazy)
{
drmBO *buf = (drmBO *) private;
int ret;
driReadLockKernelBO();
ret = drmBOWaitIdle(pool->fd, buf, (lazy) ? DRM_BO_HINT_WAIT_LAZY:0);
driReadUnlockKernelBO();
return ret;
}
static void
pool_takedown(struct _DriBufferPool *pool)
{
free(pool);
}
/*static int
pool_setStatus(struct _DriBufferPool *pool, void *private,
uint64_t flag_diff, uint64_t old_flags)
{
drmBO *buf = (drmBO *) private;
uint64_t new_flags = old_flags ^ flag_diff;
int ret;
driReadLockKernelBO();
ret = drmBOSetStatus(pool->fd, buf, new_flags, flag_diff,
0, 0, 0);
driReadUnlockKernelBO();
return ret;
}*/
struct _DriBufferPool *
driDRMPoolInit(int fd)
{
struct _DriBufferPool *pool;
pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
if (!pool)
return NULL;
pool->fd = fd;
pool->map = &pool_map;
pool->unmap = &pool_unmap;
pool->destroy = &pool_destroy;
pool->offset = &pool_offset;
pool->poolOffset = &pool_poolOffset;
pool->flags = &pool_flags;
pool->size = &pool_size;
pool->create = &pool_create;
pool->fence = &pool_fence;
pool->kernel = &pool_kernel;
pool->validate = NULL;
pool->waitIdle = &pool_waitIdle;
pool->takeDown = &pool_takedown;
pool->reference = &pool_reference;
pool->unreference = &pool_unreference;
pool->data = NULL;
return pool;
}

View file

@ -1,377 +0,0 @@
#include "ws_dri_fencemgr.h"
#include "pipe/p_thread.h"
#include <xf86mm.h>
#include <string.h>
#include <unistd.h>
/*
* Note: Locking order is
* _DriFenceObject::mutex
* _DriFenceMgr::mutex
*/
struct _DriFenceMgr {
/*
* Constant members. Need no mutex protection.
*/
struct _DriFenceMgrCreateInfo info;
void *private;
/*
* These members are protected by this->mutex
*/
pipe_mutex mutex;
int refCount;
drmMMListHead *heads;
int num_fences;
};
struct _DriFenceObject {
/*
* These members are constant and need no mutex protection.
*/
struct _DriFenceMgr *mgr;
uint32_t fence_class;
uint32_t fence_type;
/*
* These members are protected by mgr->mutex.
*/
drmMMListHead head;
int refCount;
/*
* These members are protected by this->mutex.
*/
pipe_mutex mutex;
uint32_t signaled_type;
void *private;
};
uint32_t
driFenceType(struct _DriFenceObject *fence)
{
return fence->fence_type;
}
struct _DriFenceMgr *
driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info)
{
struct _DriFenceMgr *tmp;
uint32_t i;
tmp = calloc(1, sizeof(*tmp));
if (!tmp)
return NULL;
pipe_mutex_init(tmp->mutex);
pipe_mutex_lock(tmp->mutex);
tmp->refCount = 1;
tmp->info = *info;
tmp->num_fences = 0;
tmp->heads = calloc(tmp->info.num_classes, sizeof(*tmp->heads));
if (!tmp->heads)
goto out_err;
for (i=0; i<tmp->info.num_classes; ++i) {
DRMINITLISTHEAD(&tmp->heads[i]);
}
pipe_mutex_unlock(tmp->mutex);
return tmp;
out_err:
if (tmp)
free(tmp);
return NULL;
}
static void
driFenceMgrUnrefUnlock(struct _DriFenceMgr **pMgr)
{
struct _DriFenceMgr *mgr = *pMgr;
*pMgr = NULL;
if (--mgr->refCount == 0)
free(mgr);
else
pipe_mutex_unlock(mgr->mutex);
}
void
driFenceMgrUnReference(struct _DriFenceMgr **pMgr)
{
pipe_mutex_lock((*pMgr)->mutex);
driFenceMgrUnrefUnlock(pMgr);
}
static void
driFenceUnReferenceLocked(struct _DriFenceObject **pFence)
{
struct _DriFenceObject *fence = *pFence;
struct _DriFenceMgr *mgr = fence->mgr;
*pFence = NULL;
if (--fence->refCount == 0) {
DRMLISTDELINIT(&fence->head);
if (fence->private)
mgr->info.unreference(mgr, &fence->private);
--mgr->num_fences;
fence->mgr = NULL;
--mgr->refCount;
free(fence);
}
}
static void
driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr,
drmMMListHead *list,
uint32_t fence_class,
uint32_t fence_type)
{
struct _DriFenceObject *entry;
drmMMListHead *prev;
while(list != &mgr->heads[fence_class]) {
entry = DRMLISTENTRY(struct _DriFenceObject, list, head);
/*
* Up refcount so that entry doesn't disappear from under us
* when we unlock-relock mgr to get the correct locking order.
*/
++entry->refCount;
pipe_mutex_unlock(mgr->mutex);
pipe_mutex_lock(entry->mutex);
pipe_mutex_lock(mgr->mutex);
prev = list->prev;
if (list->prev == list) {
/*
* Somebody else removed the entry from the list.
*/
pipe_mutex_unlock(entry->mutex);
driFenceUnReferenceLocked(&entry);
return;
}
entry->signaled_type |= (fence_type & entry->fence_type);
if (entry->signaled_type == entry->fence_type) {
DRMLISTDELINIT(list);
mgr->info.unreference(mgr, &entry->private);
}
pipe_mutex_unlock(entry->mutex);
driFenceUnReferenceLocked(&entry);
list = prev;
}
}
int
driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
int lazy_hint)
{
struct _DriFenceMgr *mgr = fence->mgr;
int ret = 0;
pipe_mutex_lock(fence->mutex);
if ((fence->signaled_type & fence_type) == fence_type)
goto out0;
ret = mgr->info.finish(mgr, fence->private, fence_type, lazy_hint);
if (ret)
goto out0;
pipe_mutex_lock(mgr->mutex);
pipe_mutex_unlock(fence->mutex);
driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
fence_type);
pipe_mutex_unlock(mgr->mutex);
return 0;
out0:
pipe_mutex_unlock(fence->mutex);
return ret;
}
uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence)
{
uint32_t ret;
pipe_mutex_lock(fence->mutex);
ret = fence->signaled_type;
pipe_mutex_unlock(fence->mutex);
return ret;
}
int
driFenceSignaledType(struct _DriFenceObject *fence, uint32_t flush_type,
uint32_t *signaled)
{
int ret = 0;
struct _DriFenceMgr *mgr;
pipe_mutex_lock(fence->mutex);
mgr = fence->mgr;
*signaled = fence->signaled_type;
if ((fence->signaled_type & flush_type) == flush_type)
goto out0;
ret = mgr->info.signaled(mgr, fence->private, flush_type, signaled);
if (ret) {
*signaled = fence->signaled_type;
goto out0;
}
if ((fence->signaled_type | *signaled) == fence->signaled_type)
goto out0;
pipe_mutex_lock(mgr->mutex);
pipe_mutex_unlock(fence->mutex);
driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
*signaled);
pipe_mutex_unlock(mgr->mutex);
return 0;
out0:
pipe_mutex_unlock(fence->mutex);
return ret;
}
struct _DriFenceObject *
driFenceReference(struct _DriFenceObject *fence)
{
pipe_mutex_lock(fence->mgr->mutex);
++fence->refCount;
pipe_mutex_unlock(fence->mgr->mutex);
return fence;
}
void
driFenceUnReference(struct _DriFenceObject **pFence)
{
struct _DriFenceMgr *mgr;
if (*pFence == NULL)
return;
mgr = (*pFence)->mgr;
pipe_mutex_lock(mgr->mutex);
++mgr->refCount;
driFenceUnReferenceLocked(pFence);
driFenceMgrUnrefUnlock(&mgr);
}
struct _DriFenceObject
*driFenceCreate(struct _DriFenceMgr *mgr, uint32_t fence_class,
uint32_t fence_type, void *private, size_t private_size)
{
struct _DriFenceObject *fence;
size_t fence_size = sizeof(*fence);
if (private_size)
fence_size = ((fence_size + 15) & ~15);
fence = calloc(1, fence_size + private_size);
if (!fence) {
int ret = mgr->info.finish(mgr, private, fence_type, 0);
if (ret)
usleep(10000000);
return NULL;
}
pipe_mutex_init(fence->mutex);
pipe_mutex_lock(fence->mutex);
pipe_mutex_lock(mgr->mutex);
fence->refCount = 1;
DRMLISTADDTAIL(&fence->head, &mgr->heads[fence_class]);
fence->mgr = mgr;
++mgr->refCount;
++mgr->num_fences;
pipe_mutex_unlock(mgr->mutex);
fence->fence_class = fence_class;
fence->fence_type = fence_type;
fence->signaled_type = 0;
fence->private = private;
if (private_size) {
fence->private = (void *)(((uint8_t *) fence) + fence_size);
memcpy(fence->private, private, private_size);
}
pipe_mutex_unlock(fence->mutex);
return fence;
}
static int
tSignaled(struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
uint32_t *signaled_type)
{
long fd = (long) mgr->private;
int dummy;
drmFence *fence = (drmFence *) private;
int ret;
*signaled_type = 0;
ret = drmFenceSignaled((int) fd, fence, flush_type, &dummy);
if (ret)
return ret;
*signaled_type = fence->signaled;
return 0;
}
static int
tFinish(struct _DriFenceMgr *mgr, void *private, uint32_t fence_type,
int lazy_hint)
{
long fd = (long) mgr->private;
unsigned flags = lazy_hint ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
return drmFenceWait((int)fd, flags, (drmFence *) private, fence_type);
}
static int
tUnref(struct _DriFenceMgr *mgr, void **private)
{
long fd = (long) mgr->private;
drmFence *fence = (drmFence *) *private;
*private = NULL;
return drmFenceUnreference(fd, fence);
}
struct _DriFenceMgr *driFenceMgrTTMInit(int fd)
{
struct _DriFenceMgrCreateInfo info;
struct _DriFenceMgr *mgr;
info.flags = DRI_FENCE_CLASS_ORDERED;
info.num_classes = 4;
info.signaled = tSignaled;
info.finish = tFinish;
info.unreference = tUnref;
mgr = driFenceMgrCreate(&info);
if (mgr == NULL)
return NULL;
mgr->private = (void *) (long) fd;
return mgr;
}

View file

@ -1,115 +0,0 @@
#ifndef DRI_FENCEMGR_H
#define DRI_FENCEMGR_H
#include <stdint.h>
#include <stdlib.h>
struct _DriFenceObject;
struct _DriFenceMgr;
/*
* Do a quick check to see if the fence manager has registered the fence
* object as signaled. Note that this function may return a false negative
* answer.
*/
extern uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence);
/*
* Check if the fence object is signaled. This function can be substantially
* more expensive to call than the above function, but will not return a false
* negative answer. The argument "flush_type" sets the types that the
* underlying mechanism must make sure will eventually signal.
*/
extern int driFenceSignaledType(struct _DriFenceObject *fence,
uint32_t flush_type, uint32_t *signaled);
/*
* Convenience functions.
*/
static inline int driFenceSignaled(struct _DriFenceObject *fence,
uint32_t flush_type)
{
uint32_t signaled_types;
int ret = driFenceSignaledType(fence, flush_type, &signaled_types);
if (ret)
return 0;
return ((signaled_types & flush_type) == flush_type);
}
static inline int driFenceSignaledCached(struct _DriFenceObject *fence,
uint32_t flush_type)
{
uint32_t signaled_types =
driFenceSignaledTypeCached(fence);
return ((signaled_types & flush_type) == flush_type);
}
/*
* Reference a fence object.
*/
extern struct _DriFenceObject *driFenceReference(struct _DriFenceObject *fence);
/*
* Unreference a fence object. The fence object pointer will be reset to NULL.
*/
extern void driFenceUnReference(struct _DriFenceObject **pFence);
/*
* Wait for a fence to signal the indicated fence_type.
* If "lazy_hint" is true, it indicates that the wait may sleep to avoid
* busy-wait polling.
*/
extern int driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
int lazy_hint);
/*
* Create a DriFenceObject for manager "mgr".
*
* "private" is a pointer that should be used for the callbacks in
* struct _DriFenceMgrCreateInfo.
*
* if private_size is nonzero, then the info stored at *private, with size
* private size will be copied and the fence manager will instead use a
* pointer to the copied data for the callbacks in
* struct _DriFenceMgrCreateInfo. In that case, the object pointed to by
* "private" may be destroyed after the call to driFenceCreate.
*/
extern struct _DriFenceObject *driFenceCreate(struct _DriFenceMgr *mgr,
uint32_t fence_class,
uint32_t fence_type,
void *private,
size_t private_size);
extern uint32_t driFenceType(struct _DriFenceObject *fence);
/*
* Fence creations are ordered. If a fence signals a fence_type,
* it is safe to assume that all fences of the same class that was
* created before that fence has signaled the same type.
*/
#define DRI_FENCE_CLASS_ORDERED (1 << 0)
struct _DriFenceMgrCreateInfo {
uint32_t flags;
uint32_t num_classes;
int (*signaled) (struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
uint32_t *signaled_type);
int (*finish) (struct _DriFenceMgr *mgr, void *private, uint32_t fence_type, int lazy_hint);
int (*unreference) (struct _DriFenceMgr *mgr, void **private);
};
extern struct _DriFenceMgr *
driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info);
void
driFenceMgrUnReference(struct _DriFenceMgr **pMgr);
extern struct _DriFenceMgr *
driFenceMgrTTMInit(int fd);
#endif

View file

@ -1,161 +0,0 @@
/**************************************************************************
*
* Copyright 2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include <errno.h>
#include "pipe/p_debug.h"
#include "pipe/p_thread.h"
#include "ws_dri_bufpool.h"
#include "ws_dri_bufmgr.h"
static void *
pool_create(struct _DriBufferPool *pool,
unsigned long size, uint64_t flags, unsigned hint,
unsigned alignment)
{
unsigned long *private = malloc(size + 2*sizeof(unsigned long));
if ((flags & DRM_BO_MASK_MEM) != DRM_BO_FLAG_MEM_LOCAL)
abort();
*private = size;
return (void *)private;
}
static int
pool_destroy(struct _DriBufferPool *pool, void *private)
{
free(private);
return 0;
}
static int
pool_waitIdle(struct _DriBufferPool *pool, void *private,
pipe_mutex *mutex, int lazy)
{
return 0;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, pipe_mutex *mutex, void **virtual)
{
*virtual = (void *)((unsigned long *)private + 2);
return 0;
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
return 0;
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
/*
* BUG
*/
abort();
return 0UL;
}
static unsigned long
pool_poolOffset(struct _DriBufferPool *pool, void *private)
{
/*
* BUG
*/
abort();
}
static uint64_t
pool_flags(struct _DriBufferPool *pool, void *private)
{
return DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
return *(unsigned long *) private;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
abort();
return 0UL;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
abort();
return NULL;
}
static void
pool_takedown(struct _DriBufferPool *pool)
{
free(pool);
}
struct _DriBufferPool *
driMallocPoolInit(void)
{
struct _DriBufferPool *pool;
pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
if (!pool)
return NULL;
pool->data = NULL;
pool->fd = -1;
pool->map = &pool_map;
pool->unmap = &pool_unmap;
pool->destroy = &pool_destroy;
pool->offset = &pool_offset;
pool->poolOffset = &pool_poolOffset;
pool->flags = &pool_flags;
pool->size = &pool_size;
pool->create = &pool_create;
pool->fence = &pool_fence;
pool->kernel = &pool_kernel;
pool->validate = NULL;
pool->waitIdle = &pool_waitIdle;
pool->takeDown = &pool_takedown;
return pool;
}

View file

@ -1,968 +0,0 @@
/**************************************************************************
*
* Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
*/
#include <stdint.h>
#include <sys/time.h>
#include <errno.h>
#include <unistd.h>
#include <assert.h>
#include "ws_dri_bufpool.h"
#include "ws_dri_fencemgr.h"
#include "ws_dri_bufmgr.h"
#include "pipe/p_thread.h"
#define DRI_SLABPOOL_ALLOC_RETRIES 100
struct _DriSlab;
struct _DriSlabBuffer {
int isSlabBuffer;
drmBO *bo;
struct _DriFenceObject *fence;
struct _DriSlab *parent;
drmMMListHead head;
uint32_t mapCount;
uint32_t start;
uint32_t fenceType;
int unFenced;
pipe_condvar event;
};
struct _DriKernelBO {
int fd;
drmBO bo;
drmMMListHead timeoutHead;
drmMMListHead head;
struct timeval timeFreed;
uint32_t pageAlignment;
void *virtual;
};
struct _DriSlab{
drmMMListHead head;
drmMMListHead freeBuffers;
uint32_t numBuffers;
uint32_t numFree;
struct _DriSlabBuffer *buffers;
struct _DriSlabSizeHeader *header;
struct _DriKernelBO *kbo;
};
struct _DriSlabSizeHeader {
drmMMListHead slabs;
drmMMListHead freeSlabs;
drmMMListHead delayedBuffers;
uint32_t numDelayed;
struct _DriSlabPool *slabPool;
uint32_t bufSize;
pipe_mutex mutex;
};
struct _DriFreeSlabManager {
struct timeval slabTimeout;
struct timeval checkInterval;
struct timeval nextCheck;
drmMMListHead timeoutList;
drmMMListHead unCached;
drmMMListHead cached;
pipe_mutex mutex;
};
struct _DriSlabPool {
/*
* The data of this structure remains constant after
* initialization and thus needs no mutex protection.
*/
struct _DriFreeSlabManager *fMan;
uint64_t proposedFlags;
uint64_t validMask;
uint32_t *bucketSizes;
uint32_t numBuckets;
uint32_t pageSize;
int fd;
int pageAlignment;
int maxSlabSize;
int desiredNumBuffers;
struct _DriSlabSizeHeader *headers;
};
/*
* FIXME: Perhaps arrange timeout slabs in size buckets for fast
* retreival??
*/
static inline int
driTimeAfterEq(struct timeval *arg1, struct timeval *arg2)
{
return ((arg1->tv_sec > arg2->tv_sec) ||
((arg1->tv_sec == arg2->tv_sec) &&
(arg1->tv_usec > arg2->tv_usec)));
}
static inline void
driTimeAdd(struct timeval *arg, struct timeval *add)
{
unsigned int sec;
arg->tv_sec += add->tv_sec;
arg->tv_usec += add->tv_usec;
sec = arg->tv_usec / 1000000;
arg->tv_sec += sec;
arg->tv_usec -= sec*1000000;
}
static void
driFreeKernelBO(struct _DriKernelBO *kbo)
{
if (!kbo)
return;
(void) drmBOUnreference(kbo->fd, &kbo->bo);
free(kbo);
}
static void
driFreeTimeoutKBOsLocked(struct _DriFreeSlabManager *fMan,
struct timeval *time)
{
drmMMListHead *list, *next;
struct _DriKernelBO *kbo;
if (!driTimeAfterEq(time, &fMan->nextCheck))
return;
for (list = fMan->timeoutList.next, next = list->next;
list != &fMan->timeoutList;
list = next, next = list->next) {
kbo = DRMLISTENTRY(struct _DriKernelBO, list, timeoutHead);
if (!driTimeAfterEq(time, &kbo->timeFreed))
break;
DRMLISTDELINIT(&kbo->timeoutHead);
DRMLISTDELINIT(&kbo->head);
driFreeKernelBO(kbo);
}
fMan->nextCheck = *time;
driTimeAdd(&fMan->nextCheck, &fMan->checkInterval);
}
/*
* Add a _DriKernelBO to the free slab manager.
* This means that it is available for reuse, but if it's not
* reused in a while, it will be freed.
*/
static void
driSetKernelBOFree(struct _DriFreeSlabManager *fMan,
struct _DriKernelBO *kbo)
{
struct timeval time;
pipe_mutex_lock(fMan->mutex);
gettimeofday(&time, NULL);
driTimeAdd(&time, &fMan->slabTimeout);
kbo->timeFreed = time;
if (kbo->bo.flags & DRM_BO_FLAG_CACHED)
DRMLISTADD(&kbo->head, &fMan->cached);
else
DRMLISTADD(&kbo->head, &fMan->unCached);
DRMLISTADDTAIL(&kbo->timeoutHead, &fMan->timeoutList);
driFreeTimeoutKBOsLocked(fMan, &time);
pipe_mutex_unlock(fMan->mutex);
}
/*
* Get a _DriKernelBO for us to use as storage for a slab.
*
*/
static struct _DriKernelBO *
driAllocKernelBO(struct _DriSlabSizeHeader *header)
{
struct _DriSlabPool *slabPool = header->slabPool;
struct _DriFreeSlabManager *fMan = slabPool->fMan;
drmMMListHead *list, *next, *head;
uint32_t size = header->bufSize * slabPool->desiredNumBuffers;
struct _DriKernelBO *kbo;
struct _DriKernelBO *kboTmp;
int ret;
/*
* FIXME: We should perhaps allow some variation in slabsize in order
* to efficiently reuse slabs.
*/
size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1);
pipe_mutex_lock(fMan->mutex);
kbo = NULL;
retry:
head = (slabPool->proposedFlags & DRM_BO_FLAG_CACHED) ?
&fMan->cached : &fMan->unCached;
for (list = head->next, next = list->next;
list != head;
list = next, next = list->next) {
kboTmp = DRMLISTENTRY(struct _DriKernelBO, list, head);
if ((kboTmp->bo.size == size) &&
(slabPool->pageAlignment == 0 ||
(kboTmp->pageAlignment % slabPool->pageAlignment) == 0)) {
if (!kbo)
kbo = kboTmp;
if ((kbo->bo.proposedFlags ^ slabPool->proposedFlags) == 0)
break;
}
}
if (kbo) {
DRMLISTDELINIT(&kbo->head);
DRMLISTDELINIT(&kbo->timeoutHead);
}
pipe_mutex_unlock(fMan->mutex);
if (kbo) {
uint64_t new_mask = kbo->bo.proposedFlags ^ slabPool->proposedFlags;
ret = 0;
if (new_mask) {
ret = drmBOSetStatus(kbo->fd, &kbo->bo, slabPool->proposedFlags,
new_mask, DRM_BO_HINT_DONT_FENCE, 0, 0);
}
if (ret == 0)
return kbo;
driFreeKernelBO(kbo);
kbo = NULL;
goto retry;
}
kbo = calloc(1, sizeof(struct _DriKernelBO));
if (!kbo)
return NULL;
kbo->fd = slabPool->fd;
DRMINITLISTHEAD(&kbo->head);
DRMINITLISTHEAD(&kbo->timeoutHead);
ret = drmBOCreate(kbo->fd, size, slabPool->pageAlignment, NULL,
slabPool->proposedFlags,
DRM_BO_HINT_DONT_FENCE, &kbo->bo);
if (ret)
goto out_err0;
ret = drmBOMap(kbo->fd, &kbo->bo,
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
0, &kbo->virtual);
if (ret)
goto out_err1;
ret = drmBOUnmap(kbo->fd, &kbo->bo);
if (ret)
goto out_err1;
return kbo;
out_err1:
drmBOUnreference(kbo->fd, &kbo->bo);
out_err0:
free(kbo);
return NULL;
}
static int
driAllocSlab(struct _DriSlabSizeHeader *header)
{
struct _DriSlab *slab;
struct _DriSlabBuffer *buf;
uint32_t numBuffers;
int ret;
int i;
slab = calloc(1, sizeof(*slab));
if (!slab)
return -ENOMEM;
slab->kbo = driAllocKernelBO(header);
if (!slab->kbo) {
ret = -ENOMEM;
goto out_err0;
}
numBuffers = slab->kbo->bo.size / header->bufSize;
slab->buffers = calloc(numBuffers, sizeof(*slab->buffers));
if (!slab->buffers) {
ret = -ENOMEM;
goto out_err1;
}
DRMINITLISTHEAD(&slab->head);
DRMINITLISTHEAD(&slab->freeBuffers);
slab->numBuffers = numBuffers;
slab->numFree = 0;
slab->header = header;
buf = slab->buffers;
for (i=0; i < numBuffers; ++i) {
buf->parent = slab;
buf->start = i* header->bufSize;
buf->mapCount = 0;
buf->isSlabBuffer = 1;
pipe_condvar_init(buf->event);
DRMLISTADDTAIL(&buf->head, &slab->freeBuffers);
slab->numFree++;
buf++;
}
DRMLISTADDTAIL(&slab->head, &header->slabs);
return 0;
out_err1:
driSetKernelBOFree(header->slabPool->fMan, slab->kbo);
free(slab->buffers);
out_err0:
free(slab);
return ret;
}
/*
* Delete a buffer from the slab header delayed list and put
* it on the slab free list.
*/
static void
driSlabFreeBufferLocked(struct _DriSlabBuffer *buf)
{
struct _DriSlab *slab = buf->parent;
struct _DriSlabSizeHeader *header = slab->header;
drmMMListHead *list = &buf->head;
DRMLISTDEL(list);
DRMLISTADDTAIL(list, &slab->freeBuffers);
slab->numFree++;
if (slab->head.next == &slab->head)
DRMLISTADDTAIL(&slab->head, &header->slabs);
if (slab->numFree == slab->numBuffers) {
list = &slab->head;
DRMLISTDEL(list);
DRMLISTADDTAIL(list, &header->freeSlabs);
}
if (header->slabs.next == &header->slabs ||
slab->numFree != slab->numBuffers) {
drmMMListHead *next;
struct _DriFreeSlabManager *fMan = header->slabPool->fMan;
for (list = header->freeSlabs.next, next = list->next;
list != &header->freeSlabs;
list = next, next = list->next) {
slab = DRMLISTENTRY(struct _DriSlab, list, head);
DRMLISTDELINIT(list);
driSetKernelBOFree(fMan, slab->kbo);
free(slab->buffers);
free(slab);
}
}
}
static void
driSlabCheckFreeLocked(struct _DriSlabSizeHeader *header, int wait)
{
drmMMListHead *list, *prev, *first;
struct _DriSlabBuffer *buf;
struct _DriSlab *slab;
int firstWasSignaled = 1;
int signaled;
int i;
int ret;
/*
* Rerun the freeing test if the youngest tested buffer
* was signaled, since there might be more idle buffers
* in the delay list.
*/
while (firstWasSignaled) {
firstWasSignaled = 0;
signaled = 0;
first = header->delayedBuffers.next;
/* Only examine the oldest 1/3 of delayed buffers:
*/
if (header->numDelayed > 3) {
for (i = 0; i < header->numDelayed; i += 3) {
first = first->next;
}
}
for (list = first, prev = list->prev;
list != &header->delayedBuffers;
list = prev, prev = list->prev) {
buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
slab = buf->parent;
if (!signaled) {
if (wait) {
ret = driFenceFinish(buf->fence, buf->fenceType, 0);
if (ret)
break;
signaled = 1;
wait = 0;
} else {
signaled = driFenceSignaled(buf->fence, buf->fenceType);
}
if (signaled) {
if (list == first)
firstWasSignaled = 1;
driFenceUnReference(&buf->fence);
header->numDelayed--;
driSlabFreeBufferLocked(buf);
}
} else if (driFenceSignaledCached(buf->fence, buf->fenceType)) {
driFenceUnReference(&buf->fence);
header->numDelayed--;
driSlabFreeBufferLocked(buf);
}
}
}
}
static struct _DriSlabBuffer *
driSlabAllocBuffer(struct _DriSlabSizeHeader *header)
{
static struct _DriSlabBuffer *buf;
struct _DriSlab *slab;
drmMMListHead *list;
int count = DRI_SLABPOOL_ALLOC_RETRIES;
pipe_mutex_lock(header->mutex);
while(header->slabs.next == &header->slabs && count > 0) {
driSlabCheckFreeLocked(header, 0);
if (header->slabs.next != &header->slabs)
break;
pipe_mutex_unlock(header->mutex);
if (count != DRI_SLABPOOL_ALLOC_RETRIES)
usleep(1);
pipe_mutex_lock(header->mutex);
(void) driAllocSlab(header);
count--;
}
list = header->slabs.next;
if (list == &header->slabs) {
pipe_mutex_unlock(header->mutex);
return NULL;
}
slab = DRMLISTENTRY(struct _DriSlab, list, head);
if (--slab->numFree == 0)
DRMLISTDELINIT(list);
list = slab->freeBuffers.next;
DRMLISTDELINIT(list);
pipe_mutex_unlock(header->mutex);
buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
return buf;
}
static void *
pool_create(struct _DriBufferPool *driPool, unsigned long size,
uint64_t flags, unsigned hint, unsigned alignment)
{
struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
struct _DriSlabSizeHeader *header;
struct _DriSlabBuffer *buf;
void *dummy;
int i;
int ret;
/*
* FIXME: Check for compatibility.
*/
header = pool->headers;
for (i=0; i<pool->numBuckets; ++i) {
if (header->bufSize >= size)
break;
header++;
}
if (i < pool->numBuckets)
return driSlabAllocBuffer(header);
/*
* Fall back to allocate a buffer object directly from DRM.
* and wrap it in a driBO structure.
*/
buf = calloc(1, sizeof(*buf));
if (!buf)
return NULL;
buf->bo = calloc(1, sizeof(*buf->bo));
if (!buf->bo)
goto out_err0;
if (alignment) {
if ((alignment < pool->pageSize) && (pool->pageSize % alignment))
goto out_err1;
if ((alignment > pool->pageSize) && (alignment % pool->pageSize))
goto out_err1;
}
ret = drmBOCreate(pool->fd, size, alignment / pool->pageSize, NULL,
flags, hint, buf->bo);
if (ret)
goto out_err1;
ret = drmBOMap(pool->fd, buf->bo, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
0, &dummy);
if (ret)
goto out_err2;
ret = drmBOUnmap(pool->fd, buf->bo);
if (ret)
goto out_err2;
return buf;
out_err2:
drmBOUnreference(pool->fd, buf->bo);
out_err1:
free(buf->bo);
out_err0:
free(buf);
return NULL;
}
static int
pool_destroy(struct _DriBufferPool *driPool, void *private)
{
struct _DriSlabBuffer *buf =
(struct _DriSlabBuffer *) private;
struct _DriSlab *slab;
struct _DriSlabSizeHeader *header;
if (!buf->isSlabBuffer) {
struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
int ret;
ret = drmBOUnreference(pool->fd, buf->bo);
free(buf->bo);
free(buf);
return ret;
}
slab = buf->parent;
header = slab->header;
pipe_mutex_lock(header->mutex);
buf->unFenced = 0;
buf->mapCount = 0;
if (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType)) {
DRMLISTADDTAIL(&buf->head, &header->delayedBuffers);
header->numDelayed++;
} else {
if (buf->fence)
driFenceUnReference(&buf->fence);
driSlabFreeBufferLocked(buf);
}
pipe_mutex_unlock(header->mutex);
return 0;
}
static int
pool_waitIdle(struct _DriBufferPool *driPool, void *private,
pipe_mutex *mutex, int lazy)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
while(buf->unFenced)
pipe_condvar_wait(buf->event, *mutex);
if (!buf->fence)
return 0;
driFenceFinish(buf->fence, buf->fenceType, lazy);
driFenceUnReference(&buf->fence);
return 0;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, pipe_mutex *mutex, void **virtual)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
int busy;
if (buf->isSlabBuffer)
busy = buf->unFenced || (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType));
else
busy = buf->fence && !driFenceSignaled(buf->fence, buf->fenceType);
if (busy) {
if (hint & DRM_BO_HINT_DONT_BLOCK)
return -EBUSY;
else {
(void) pool_waitIdle(pool, private, mutex, 0);
}
}
++buf->mapCount;
*virtual = (buf->isSlabBuffer) ?
(void *) ((uint8_t *) buf->parent->kbo->virtual + buf->start) :
(void *) buf->bo->virtual;
return 0;
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
--buf->mapCount;
if (buf->mapCount == 0 && buf->isSlabBuffer)
pipe_condvar_broadcast(buf->event);
return 0;
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
struct _DriSlab *slab;
struct _DriSlabSizeHeader *header;
if (!buf->isSlabBuffer) {
assert(buf->bo->proposedFlags & DRM_BO_FLAG_NO_MOVE);
return buf->bo->offset;
}
slab = buf->parent;
header = slab->header;
(void) header;
assert(header->slabPool->proposedFlags & DRM_BO_FLAG_NO_MOVE);
return slab->kbo->bo.offset + buf->start;
}
static unsigned long
pool_poolOffset(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
return buf->start;
}
static uint64_t
pool_flags(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
if (!buf->isSlabBuffer)
return buf->bo->flags;
return buf->parent->kbo->bo.flags;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
if (!buf->isSlabBuffer)
return buf->bo->size;
return buf->parent->header->bufSize;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
drmBO *bo;
if (buf->fence)
driFenceUnReference(&buf->fence);
buf->fence = driFenceReference(fence);
bo = (buf->isSlabBuffer) ?
&buf->parent->kbo->bo:
buf->bo;
buf->fenceType = bo->fenceFlags;
buf->unFenced = 0;
pipe_condvar_broadcast(buf->event);
return 0;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
return (buf->isSlabBuffer) ? &buf->parent->kbo->bo : buf->bo;
}
static int
pool_validate(struct _DriBufferPool *pool, void *private,
pipe_mutex *mutex)
{
struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
if (!buf->isSlabBuffer)
return 0;
while(buf->mapCount != 0)
pipe_condvar_wait(buf->event, *mutex);
buf->unFenced = 1;
return 0;
}
struct _DriFreeSlabManager *
driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
{
struct _DriFreeSlabManager *tmp;
tmp = calloc(1, sizeof(*tmp));
if (!tmp)
return NULL;
pipe_mutex_init(tmp->mutex);
pipe_mutex_lock(tmp->mutex);
tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
tmp->checkInterval.tv_usec = checkIntervalMsec*1000;
tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000;
tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000;
gettimeofday(&tmp->nextCheck, NULL);
driTimeAdd(&tmp->nextCheck, &tmp->checkInterval);
DRMINITLISTHEAD(&tmp->timeoutList);
DRMINITLISTHEAD(&tmp->unCached);
DRMINITLISTHEAD(&tmp->cached);
pipe_mutex_unlock(tmp->mutex);
return tmp;
}
void
driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan)
{
struct timeval time;
time = fMan->nextCheck;
driTimeAdd(&time, &fMan->checkInterval);
pipe_mutex_lock(fMan->mutex);
driFreeTimeoutKBOsLocked(fMan, &time);
pipe_mutex_unlock(fMan->mutex);
assert(fMan->timeoutList.next == &fMan->timeoutList);
assert(fMan->unCached.next == &fMan->unCached);
assert(fMan->cached.next == &fMan->cached);
free(fMan);
}
static void
driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size,
struct _DriSlabSizeHeader *header)
{
pipe_mutex_init(header->mutex);
pipe_mutex_lock(header->mutex);
DRMINITLISTHEAD(&header->slabs);
DRMINITLISTHEAD(&header->freeSlabs);
DRMINITLISTHEAD(&header->delayedBuffers);
header->numDelayed = 0;
header->slabPool = pool;
header->bufSize = size;
pipe_mutex_unlock(header->mutex);
}
static void
driFinishSizeHeader(struct _DriSlabSizeHeader *header)
{
drmMMListHead *list, *next;
struct _DriSlabBuffer *buf;
pipe_mutex_lock(header->mutex);
for (list = header->delayedBuffers.next, next = list->next;
list != &header->delayedBuffers;
list = next, next = list->next) {
buf = DRMLISTENTRY(struct _DriSlabBuffer, list , head);
if (buf->fence) {
(void) driFenceFinish(buf->fence, buf->fenceType, 0);
driFenceUnReference(&buf->fence);
}
header->numDelayed--;
driSlabFreeBufferLocked(buf);
}
pipe_mutex_unlock(header->mutex);
}
static void
pool_takedown(struct _DriBufferPool *driPool)
{
struct _DriSlabPool *pool = driPool->data;
int i;
for (i=0; i<pool->numBuckets; ++i) {
driFinishSizeHeader(&pool->headers[i]);
}
free(pool->headers);
free(pool->bucketSizes);
free(pool);
free(driPool);
}
struct _DriBufferPool *
driSlabPoolInit(int fd, uint64_t flags,
uint64_t validMask,
uint32_t smallestSize,
uint32_t numSizes,
uint32_t desiredNumBuffers,
uint32_t maxSlabSize,
uint32_t pageAlignment,
struct _DriFreeSlabManager *fMan)
{
struct _DriBufferPool *driPool;
struct _DriSlabPool *pool;
uint32_t i;
driPool = calloc(1, sizeof(*driPool));
if (!driPool)
return NULL;
pool = calloc(1, sizeof(*pool));
if (!pool)
goto out_err0;
pool->bucketSizes = calloc(numSizes, sizeof(*pool->bucketSizes));
if (!pool->bucketSizes)
goto out_err1;
pool->headers = calloc(numSizes, sizeof(*pool->headers));
if (!pool->headers)
goto out_err2;
pool->fMan = fMan;
pool->proposedFlags = flags;
pool->validMask = validMask;
pool->numBuckets = numSizes;
pool->pageSize = getpagesize();
pool->fd = fd;
pool->pageAlignment = pageAlignment;
pool->maxSlabSize = maxSlabSize;
pool->desiredNumBuffers = desiredNumBuffers;
for (i=0; i<pool->numBuckets; ++i) {
pool->bucketSizes[i] = (smallestSize << i);
driInitSizeHeader(pool, pool->bucketSizes[i],
&pool->headers[i]);
}
driPool->data = (void *) pool;
driPool->map = &pool_map;
driPool->unmap = &pool_unmap;
driPool->destroy = &pool_destroy;
driPool->offset = &pool_offset;
driPool->poolOffset = &pool_poolOffset;
driPool->flags = &pool_flags;
driPool->size = &pool_size;
driPool->create = &pool_create;
driPool->fence = &pool_fence;
driPool->kernel = &pool_kernel;
driPool->validate = &pool_validate;
driPool->waitIdle = &pool_waitIdle;
driPool->takeDown = &pool_takedown;
return driPool;
out_err2:
free(pool->bucketSizes);
out_err1:
free(pool);
out_err0:
free(driPool);
return NULL;
}