i965/drm: Devirtualize the bufmgr.

libdrm_bacon used to have a GEM-based bufmgr and a legacy fake bufmgr,
but that's long since dead (and we never imported it to i965).  So,
drop the extra layer of function pointers.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jason Ekstrand <jason@jlekstrand.net>
This commit is contained in:
Kenneth Graunke 2017-03-21 17:27:25 -07:00
parent dca224a9ef
commit 2c257ff226
5 changed files with 243 additions and 646 deletions

View file

@ -131,7 +131,6 @@ i965_FILES = \
intel_buffer_objects.h \
intel_buffers.c \
intel_buffers.h \
intel_bufmgr.c \
intel_bufmgr_gem.c \
intel_bufmgr_priv.h \
intel_chipset.h \

View file

@ -34,7 +34,7 @@
#ifndef INTEL_BUFMGR_H
#define INTEL_BUFMGR_H
#include <stdio.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
@ -98,56 +98,209 @@ struct _drm_bacon_bo {
#define BO_ALLOC_FOR_RENDER (1<<0)
/**
* Allocate a buffer object.
*
* Buffer objects are not necessarily initially mapped into CPU virtual
* address space or graphics device aperture. They must be mapped
* using bo_map() or drm_bacon_gem_bo_map_gtt() to be used by the CPU.
*/
drm_bacon_bo *drm_bacon_bo_alloc(drm_bacon_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
/**
* Allocate a buffer object, hinting that it will be used as a
* render target.
*
* This is otherwise the same as bo_alloc.
*/
drm_bacon_bo *drm_bacon_bo_alloc_for_render(drm_bacon_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
bool drm_bacon_has_userptr(drm_bacon_bufmgr *bufmgr);
/**
* Allocate a buffer object from an existing user accessible
* address malloc'd with the provided size.
* Alignment is used when mapping to the gtt.
* Flags may be I915_VMAP_READ_ONLY or I915_USERPTR_UNSYNCHRONIZED
*/
drm_bacon_bo *drm_bacon_bo_alloc_userptr(drm_bacon_bufmgr *bufmgr,
const char *name,
void *addr, uint32_t tiling_mode,
uint32_t stride, unsigned long size,
unsigned long flags);
/**
* Allocate a tiled buffer object.
*
* Alignment for tiled objects is set automatically; the 'flags'
* argument provides a hint about how the object will be used initially.
*
* Valid tiling formats are:
* I915_TILING_NONE
* I915_TILING_X
* I915_TILING_Y
*
* Note the tiling format may be rejected; callers should check the
* 'tiling_mode' field on return, as well as the pitch value, which
* may have been rounded up to accommodate for tiling restrictions.
*/
drm_bacon_bo *drm_bacon_bo_alloc_tiled(drm_bacon_bufmgr *bufmgr,
const char *name,
int x, int y, int cpp,
uint32_t *tiling_mode,
unsigned long *pitch,
unsigned long flags);
/** Takes a reference on a buffer object */
void drm_bacon_bo_reference(drm_bacon_bo *bo);
/**
* Releases a reference on a buffer object, freeing the data if
* no references remain.
*/
void drm_bacon_bo_unreference(drm_bacon_bo *bo);
/**
* Maps the buffer into userspace.
*
* This function will block waiting for any existing execution on the
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
int drm_bacon_bo_map(drm_bacon_bo *bo, int write_enable);
/**
* Reduces the refcount on the userspace mapping of the buffer
* object.
*/
int drm_bacon_bo_unmap(drm_bacon_bo *bo);
/** Write data into an object. */
int drm_bacon_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
unsigned long size, const void *data);
/** Read data from an object. */
int drm_bacon_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
unsigned long size, void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
*
* This is not required for any access to the BO by bo_map,
* bo_subdata, etc. It is merely a way for the driver to implement
* glFinish.
*/
void drm_bacon_bo_wait_rendering(drm_bacon_bo *bo);
/**
* Tears down the buffer manager instance.
*/
void drm_bacon_bufmgr_destroy(drm_bacon_bufmgr *bufmgr);
/** Executes the command buffer pointed to by bo. */
int drm_bacon_bo_exec(drm_bacon_bo *bo, int used,
struct drm_clip_rect *cliprects, int num_cliprects, int DR4);
/** Executes the command buffer pointed to by bo on the selected ring buffer */
int drm_bacon_bo_mrb_exec(drm_bacon_bo *bo, int used,
struct drm_clip_rect *cliprects, int num_cliprects, int DR4,
unsigned int flags);
int drm_bacon_bufmgr_check_aperture_space(drm_bacon_bo ** bo_array, int count);
/**
* Add relocation entry in reloc_buf, which will be updated with the
* target buffer's real offset on on command submission.
*
* Relocations remain in place for the lifetime of the buffer object.
*
* \param bo Buffer to write the relocation into.
* \param offset Byte offset within reloc_bo of the pointer to
* target_bo.
* \param target_bo Buffer whose offset should be written into the
* relocation entry.
* \param target_offset Constant value to be added to target_bo's
* offset in relocation entry.
* \param read_domains GEM read domains which the buffer will be
* read into by the command that this relocation
* is part of.
* \param write_domains GEM read domains which the buffer will be
* dirtied in by the command that this
* relocation is part of.
*/
int drm_bacon_bo_emit_reloc(drm_bacon_bo *bo, uint32_t offset,
drm_bacon_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
/**
* Ask that the buffer be placed in tiling mode
*
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
int drm_bacon_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
/**
* Get the current tiling (and resulting swizzling) mode for the bo.
*
* \param buf Buffer to get tiling mode for
* \param tiling_mode returned tiling mode
* \param swizzle_mode returned swizzling mode
*/
int drm_bacon_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
/**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
* \param name Returned name
*/
int drm_bacon_bo_flink(drm_bacon_bo *bo, uint32_t * name);
/**
* Returns 1 if mapping the buffer for write could cause the process
* to block, due to the object being active in the GPU.
*/
int drm_bacon_bo_busy(drm_bacon_bo *bo);
/**
* Specify the volatility of the buffer.
* \param bo Buffer to create a name for
* \param madv The purgeable status
*
* Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
* reclaimed under memory pressure. If you subsequently require the buffer,
* then you must pass I915_MADV_WILLNEED to mark the buffer as required.
*
* Returns 1 if the buffer was retained, or 0 if it was discarded whilst
* marked as I915_MADV_DONTNEED.
*/
int drm_bacon_bo_madvise(drm_bacon_bo *bo, int madv);
/**
* Set the offset at which this buffer will be softpinned
* \param bo Buffer to set the softpin offset for
* \param offset Softpin offset
*/
int drm_bacon_bo_set_softpin_offset(drm_bacon_bo *bo, uint64_t offset);
/**
* Disable buffer reuse for buffers which will be shared in some way,
* as with scanout buffers. When the buffer reference count goes to
* zero, it will be freed and not placed in the reuse list.
*
* \param bo Buffer to disable reuse for
*/
int drm_bacon_bo_disable_reuse(drm_bacon_bo *bo);
/**
* Query whether a buffer is reusable.
*
* \param bo Buffer to query
*/
int drm_bacon_bo_is_reusable(drm_bacon_bo *bo);
/** Returns true if target_bo is in the relocation tree rooted at bo. */
int drm_bacon_bo_references(drm_bacon_bo *bo, drm_bacon_bo *target_bo);
/* drm_bacon_bufmgr_gem.c */

View file

@ -1,271 +0,0 @@
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <errno.h>
#include <drm.h>
#include <i915_drm.h>
#include "libdrm_macros.h"
#include "brw_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "xf86drm.h"
/** @file intel_bufmgr.c
*
* Convenience functions for buffer management methods.
*/
drm_bacon_bo *
drm_bacon_bo_alloc(drm_bacon_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
drm_bacon_bo *
drm_bacon_bo_alloc_for_render(drm_bacon_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
}
drm_bacon_bo *
drm_bacon_bo_alloc_userptr(drm_bacon_bufmgr *bufmgr,
const char *name, void *addr,
uint32_t tiling_mode,
uint32_t stride,
unsigned long size,
unsigned long flags)
{
if (bufmgr->bo_alloc_userptr)
return bufmgr->bo_alloc_userptr(bufmgr, name, addr, tiling_mode,
stride, size, flags);
return NULL;
}
drm_bacon_bo *
drm_bacon_bo_alloc_tiled(drm_bacon_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
{
return bufmgr->bo_alloc_tiled(bufmgr, name, x, y, cpp,
tiling_mode, pitch, flags);
}
void
drm_bacon_bo_reference(drm_bacon_bo *bo)
{
bo->bufmgr->bo_reference(bo);
}
void
drm_bacon_bo_unreference(drm_bacon_bo *bo)
{
if (bo == NULL)
return;
bo->bufmgr->bo_unreference(bo);
}
int
drm_bacon_bo_map(drm_bacon_bo *buf, int write_enable)
{
return buf->bufmgr->bo_map(buf, write_enable);
}
int
drm_bacon_bo_unmap(drm_bacon_bo *buf)
{
return buf->bufmgr->bo_unmap(buf);
}
int
drm_bacon_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
return bo->bufmgr->bo_subdata(bo, offset, size, data);
}
int
drm_bacon_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
int ret;
if (bo->bufmgr->bo_get_subdata)
return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
if (size == 0 || data == NULL)
return 0;
ret = drm_bacon_bo_map(bo, 0);
if (ret)
return ret;
memcpy(data, (unsigned char *)bo->virtual + offset, size);
drm_bacon_bo_unmap(bo);
return 0;
}
void
drm_bacon_bo_wait_rendering(drm_bacon_bo *bo)
{
bo->bufmgr->bo_wait_rendering(bo);
}
void
drm_bacon_bufmgr_destroy(drm_bacon_bufmgr *bufmgr)
{
bufmgr->destroy(bufmgr);
}
int
drm_bacon_bo_exec(drm_bacon_bo *bo, int used,
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
{
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
}
int
drm_bacon_bo_mrb_exec(drm_bacon_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
unsigned int rings)
{
if (bo->bufmgr->bo_mrb_exec)
return bo->bufmgr->bo_mrb_exec(bo, used,
cliprects, num_cliprects, DR4,
rings);
switch (rings) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
return bo->bufmgr->bo_exec(bo, used,
cliprects, num_cliprects, DR4);
default:
return -ENODEV;
}
}
int
drm_bacon_bufmgr_check_aperture_space(drm_bacon_bo ** bo_array, int count)
{
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
}
int
drm_bacon_bo_flink(drm_bacon_bo *bo, uint32_t * name)
{
if (bo->bufmgr->bo_flink)
return bo->bufmgr->bo_flink(bo, name);
return -ENODEV;
}
int
drm_bacon_bo_emit_reloc(drm_bacon_bo *bo, uint32_t offset,
drm_bacon_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
return bo->bufmgr->bo_emit_reloc(bo, offset,
target_bo, target_offset,
read_domains, write_domain);
}
int
drm_bacon_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
uint32_t stride)
{
if (bo->bufmgr->bo_set_tiling)
return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
*tiling_mode = I915_TILING_NONE;
return 0;
}
int
drm_bacon_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode)
{
if (bo->bufmgr->bo_get_tiling)
return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
*tiling_mode = I915_TILING_NONE;
*swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
return 0;
}
int
drm_bacon_bo_set_softpin_offset(drm_bacon_bo *bo, uint64_t offset)
{
if (bo->bufmgr->bo_set_softpin_offset)
return bo->bufmgr->bo_set_softpin_offset(bo, offset);
return -ENODEV;
}
int
drm_bacon_bo_disable_reuse(drm_bacon_bo *bo)
{
if (bo->bufmgr->bo_disable_reuse)
return bo->bufmgr->bo_disable_reuse(bo);
return 0;
}
int
drm_bacon_bo_is_reusable(drm_bacon_bo *bo)
{
if (bo->bufmgr->bo_is_reusable)
return bo->bufmgr->bo_is_reusable(bo);
return 0;
}
int
drm_bacon_bo_busy(drm_bacon_bo *bo)
{
if (bo->bufmgr->bo_busy)
return bo->bufmgr->bo_busy(bo);
return 0;
}
int
drm_bacon_bo_madvise(drm_bacon_bo *bo, int madv)
{
if (bo->bufmgr->bo_madvise)
return bo->bufmgr->bo_madvise(bo, madv);
return -1;
}
int
drm_bacon_bo_references(drm_bacon_bo *bo, drm_bacon_bo *target_bo)
{
return bo->bufmgr->bo_references(bo, target_bo);
}

View file

@ -287,10 +287,6 @@ drm_bacon_gem_estimate_batch_space(drm_bacon_bo ** bo_array, int count);
static unsigned int
drm_bacon_gem_compute_batch_space(drm_bacon_bo ** bo_array, int count);
static int
drm_bacon_gem_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
static int
drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo *bo,
uint32_t tiling_mode,
@ -299,8 +295,6 @@ drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo *bo,
static void drm_bacon_gem_bo_unreference_locked_timed(drm_bacon_bo *bo,
time_t time);
static void drm_bacon_gem_bo_unreference(drm_bacon_bo *bo);
static void drm_bacon_gem_bo_free(drm_bacon_bo *bo);
static inline drm_bacon_bo_gem *to_bo_gem(drm_bacon_bo *bo)
@ -416,8 +410,8 @@ drm_bacon_gem_dump_validation_list(drm_bacon_bufmgr_gem *bufmgr_gem)
}
}
static inline void
drm_bacon_gem_bo_reference(drm_bacon_bo *bo)
inline void
drm_bacon_bo_reference(drm_bacon_bo *bo)
{
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -514,8 +508,8 @@ drm_bacon_setup_reloc_list(drm_bacon_bo *bo)
return 0;
}
static int
drm_bacon_gem_bo_busy(drm_bacon_bo *bo)
int
drm_bacon_bo_busy(drm_bacon_bo *bo)
{
drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -553,8 +547,8 @@ drm_bacon_gem_bo_madvise_internal(drm_bacon_bufmgr_gem *bufmgr_gem,
return madv.retained;
}
static int
drm_bacon_gem_bo_madvise(drm_bacon_bo *bo, int madv)
int
drm_bacon_bo_madvise(drm_bacon_bo *bo, int madv)
{
return drm_bacon_gem_bo_madvise_internal
((drm_bacon_bufmgr_gem *) bo->bufmgr,
@ -642,7 +636,7 @@ retry:
*/
bo_gem = LIST_ENTRY(drm_bacon_bo_gem,
bucket->head.next, head);
if (!drm_bacon_gem_bo_busy(&bo_gem->bo)) {
if (!drm_bacon_bo_busy(&bo_gem->bo)) {
alloc_from_cache = true;
list_del(&bo_gem->head);
}
@ -731,11 +725,11 @@ err:
return NULL;
}
static drm_bacon_bo *
drm_bacon_gem_bo_alloc_for_render(drm_bacon_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
drm_bacon_bo *
drm_bacon_bo_alloc_for_render(drm_bacon_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
{
return drm_bacon_gem_bo_alloc_internal(bufmgr, name, size,
BO_ALLOC_FOR_RENDER,
@ -743,20 +737,20 @@ drm_bacon_gem_bo_alloc_for_render(drm_bacon_bufmgr *bufmgr,
alignment);
}
static drm_bacon_bo *
drm_bacon_gem_bo_alloc(drm_bacon_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
drm_bacon_bo *
drm_bacon_bo_alloc(drm_bacon_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
{
return drm_bacon_gem_bo_alloc_internal(bufmgr, name, size, 0,
I915_TILING_NONE, 0, 0);
}
static drm_bacon_bo *
drm_bacon_gem_bo_alloc_tiled(drm_bacon_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
drm_bacon_bo *
drm_bacon_bo_alloc_tiled(drm_bacon_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
{
drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *)bufmgr;
unsigned long size, stride;
@ -801,14 +795,14 @@ drm_bacon_gem_bo_alloc_tiled(drm_bacon_bufmgr *bufmgr, const char *name,
tiling, stride, 0);
}
static drm_bacon_bo *
drm_bacon_gem_bo_alloc_userptr(drm_bacon_bufmgr *bufmgr,
const char *name,
void *addr,
uint32_t tiling_mode,
uint32_t stride,
unsigned long size,
unsigned long flags)
drm_bacon_bo *
drm_bacon_bo_alloc_userptr(drm_bacon_bufmgr *bufmgr,
const char *name,
void *addr,
uint32_t tiling_mode,
uint32_t stride,
unsigned long size,
unsigned long flags)
{
drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bufmgr;
drm_bacon_bo_gem *bo_gem;
@ -880,9 +874,10 @@ drm_bacon_gem_bo_alloc_userptr(drm_bacon_bufmgr *bufmgr,
return &bo_gem->bo;
}
static bool
has_userptr(drm_bacon_bufmgr_gem *bufmgr_gem)
bool
drm_bacon_has_userptr(drm_bacon_bufmgr *bufmgr)
{
drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bufmgr;
int ret;
void *ptr;
long pgsz;
@ -926,24 +921,6 @@ retry:
return true;
}
static drm_bacon_bo *
check_bo_alloc_userptr(drm_bacon_bufmgr *bufmgr,
const char *name,
void *addr,
uint32_t tiling_mode,
uint32_t stride,
unsigned long size,
unsigned long flags)
{
if (has_userptr((drm_bacon_bufmgr_gem *)bufmgr))
bufmgr->bo_alloc_userptr = drm_bacon_gem_bo_alloc_userptr;
else
bufmgr->bo_alloc_userptr = NULL;
return drm_bacon_bo_alloc_userptr(bufmgr, name, addr,
tiling_mode, stride, size, flags);
}
/**
* Returns a drm_bacon_bo wrapping the given buffer object handle.
*
@ -971,7 +948,7 @@ drm_bacon_bo_gem_create_from_name(drm_bacon_bufmgr *bufmgr,
HASH_FIND(name_hh, bufmgr_gem->name_table,
&handle, sizeof(handle), bo_gem);
if (bo_gem) {
drm_bacon_gem_bo_reference(&bo_gem->bo);
drm_bacon_bo_reference(&bo_gem->bo);
goto out;
}
@ -993,7 +970,7 @@ drm_bacon_bo_gem_create_from_name(drm_bacon_bufmgr *bufmgr,
HASH_FIND(handle_hh, bufmgr_gem->handle_table,
&open_arg.handle, sizeof(open_arg.handle), bo_gem);
if (bo_gem) {
drm_bacon_gem_bo_reference(&bo_gem->bo);
drm_bacon_bo_reference(&bo_gem->bo);
goto out;
}
@ -1277,10 +1254,14 @@ static void drm_bacon_gem_bo_unreference_locked_timed(drm_bacon_bo *bo,
drm_bacon_gem_bo_unreference_final(bo, time);
}
static void drm_bacon_gem_bo_unreference(drm_bacon_bo *bo)
void
drm_bacon_bo_unreference(drm_bacon_bo *bo)
{
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
if (bo == NULL)
return;
assert(p_atomic_read(&bo_gem->refcount) > 0);
if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
@ -1301,7 +1282,8 @@ static void drm_bacon_gem_bo_unreference(drm_bacon_bo *bo)
}
}
static int drm_bacon_gem_bo_map(drm_bacon_bo *bo, int write_enable)
int
drm_bacon_bo_map(drm_bacon_bo *bo, int write_enable)
{
drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -1528,7 +1510,8 @@ drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo *bo)
return ret;
}
static int drm_bacon_gem_bo_unmap(drm_bacon_bo *bo)
int
drm_bacon_bo_unmap(drm_bacon_bo *bo)
{
drm_bacon_bufmgr_gem *bufmgr_gem;
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -1585,9 +1568,9 @@ static int drm_bacon_gem_bo_unmap(drm_bacon_bo *bo)
return ret;
}
static int
drm_bacon_gem_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
unsigned long size, const void *data)
int
drm_bacon_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -1615,9 +1598,9 @@ drm_bacon_gem_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
return ret;
}
static int
drm_bacon_gem_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
unsigned long size, void *data)
int
drm_bacon_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -1646,8 +1629,8 @@ drm_bacon_gem_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
}
/** Waits for all GPU rendering with the object to have completed. */
static void
drm_bacon_gem_bo_wait_rendering(drm_bacon_bo *bo)
void
drm_bacon_bo_wait_rendering(drm_bacon_bo *bo)
{
drm_bacon_gem_bo_start_gtt_access(bo, 1);
}
@ -1691,10 +1674,10 @@ drm_bacon_gem_bo_wait(drm_bacon_bo *bo, int64_t timeout_ns)
DBG("%s:%d: Timed wait is not supported. Falling back to "
"infinite wait\n", __FILE__, __LINE__);
if (timeout_ns) {
drm_bacon_gem_bo_wait_rendering(bo);
drm_bacon_bo_wait_rendering(bo);
return 0;
} else {
return drm_bacon_gem_bo_busy(bo) ? -ETIME : 0;
return drm_bacon_bo_busy(bo) ? -ETIME : 0;
}
}
@ -1828,7 +1811,7 @@ do_bo_emit_reloc(drm_bacon_bo *bo, uint32_t offset,
bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
if (target_bo != bo)
drm_bacon_gem_bo_reference(target_bo);
drm_bacon_bo_reference(target_bo);
bo_gem->relocs[bo_gem->reloc_count].offset = offset;
bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
@ -1848,6 +1831,7 @@ drm_bacon_gem_bo_add_softpin_target(drm_bacon_bo *bo, drm_bacon_bo *target_bo)
drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
drm_bacon_bo_gem *target_bo_gem = (drm_bacon_bo_gem *) target_bo;
if (bo_gem->has_error)
return -ENOMEM;
@ -1874,16 +1858,16 @@ drm_bacon_gem_bo_add_softpin_target(drm_bacon_bo *bo, drm_bacon_bo *target_bo)
bo_gem->softpin_target_size = new_size;
}
bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
drm_bacon_gem_bo_reference(target_bo);
drm_bacon_bo_reference(target_bo);
bo_gem->softpin_target_count++;
return 0;
}
static int
drm_bacon_gem_bo_emit_reloc(drm_bacon_bo *bo, uint32_t offset,
drm_bacon_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
int
drm_bacon_bo_emit_reloc(drm_bacon_bo *bo, uint32_t offset,
drm_bacon_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
drm_bacon_bo_gem *target_bo_gem = (drm_bacon_bo_gem *)target_bo;
@ -2123,19 +2107,19 @@ skip_execution:
return ret;
}
static int
drm_bacon_gem_bo_exec2(drm_bacon_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4)
int
drm_bacon_bo_exec(drm_bacon_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4)
{
return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
-1, NULL, I915_EXEC_RENDER);
}
static int
drm_bacon_gem_bo_mrb_exec2(drm_bacon_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
unsigned int flags)
int
drm_bacon_bo_mrb_exec(drm_bacon_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
unsigned int flags)
{
return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
-1, NULL, flags);
@ -2197,9 +2181,9 @@ drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo *bo,
return 0;
}
static int
drm_bacon_gem_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
uint32_t stride)
int
drm_bacon_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
uint32_t stride)
{
drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -2225,9 +2209,9 @@ drm_bacon_gem_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
return ret;
}
static int
drm_bacon_gem_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode)
int
drm_bacon_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
uint32_t *swizzle_mode)
{
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -2236,8 +2220,8 @@ drm_bacon_gem_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
return 0;
}
static int
drm_bacon_gem_bo_set_softpin_offset(drm_bacon_bo *bo, uint64_t offset)
int
drm_bacon_bo_set_softpin_offset(drm_bacon_bo *bo, uint64_t offset)
{
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -2273,7 +2257,7 @@ drm_bacon_bo_gem_create_from_prime(drm_bacon_bufmgr *bufmgr, int prime_fd, int s
HASH_FIND(handle_hh, bufmgr_gem->handle_table,
&handle, sizeof(handle), bo_gem);
if (bo_gem) {
drm_bacon_gem_bo_reference(&bo_gem->bo);
drm_bacon_bo_reference(&bo_gem->bo);
goto out;
}
@ -2345,8 +2329,8 @@ drm_bacon_bo_gem_export_to_prime(drm_bacon_bo *bo, int *prime_fd)
return 0;
}
static int
drm_bacon_gem_bo_flink(drm_bacon_bo *bo, uint32_t * name)
int
drm_bacon_bo_flink(drm_bacon_bo *bo, uint32_t *name)
{
drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -2556,8 +2540,8 @@ drm_bacon_gem_compute_batch_space(drm_bacon_bo **bo_array, int count)
* performance. By emitting smaller batchbuffers, we eat some CPU overhead to
* get better parallelism.
*/
static int
drm_bacon_gem_check_aperture_space(drm_bacon_bo **bo_array, int count)
int
drm_bacon_bufmgr_check_aperture_space(drm_bacon_bo **bo_array, int count)
{
drm_bacon_bufmgr_gem *bufmgr_gem =
(drm_bacon_bufmgr_gem *) bo_array[0]->bufmgr;
@ -2585,8 +2569,8 @@ drm_bacon_gem_check_aperture_space(drm_bacon_bo **bo_array, int count)
* Disable buffer reuse for objects which are shared with the kernel
* as scanout buffers
*/
static int
drm_bacon_gem_bo_disable_reuse(drm_bacon_bo *bo)
int
drm_bacon_bo_disable_reuse(drm_bacon_bo *bo)
{
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -2594,8 +2578,8 @@ drm_bacon_gem_bo_disable_reuse(drm_bacon_bo *bo)
return 0;
}
static int
drm_bacon_gem_bo_is_reusable(drm_bacon_bo *bo)
int
drm_bacon_bo_is_reusable(drm_bacon_bo *bo)
{
drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
@ -2629,8 +2613,8 @@ _drm_bacon_gem_bo_references(drm_bacon_bo *bo, drm_bacon_bo *target_bo)
}
/** Return true if target_bo is referenced by bo's relocation tree. */
static int
drm_bacon_gem_bo_references(drm_bacon_bo *bo, drm_bacon_bo *target_bo)
int
drm_bacon_bo_references(drm_bacon_bo *bo, drm_bacon_bo *target_bo)
{
drm_bacon_bo_gem *target_bo_gem = (drm_bacon_bo_gem *) target_bo;
@ -2887,8 +2871,8 @@ drm_bacon_bufmgr_gem_find(int fd)
return NULL;
}
static void
drm_bacon_bufmgr_gem_unref(drm_bacon_bufmgr *bufmgr)
void
drm_bacon_bufmgr_destroy(drm_bacon_bufmgr *bufmgr)
{
drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *)bufmgr;
@ -3124,8 +3108,6 @@ drm_bacon_bufmgr_gem_init(int fd, int batch_size)
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_exec_async = ret == 0;
bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_wait_timeout = ret == 0;
@ -3145,11 +3127,6 @@ drm_bacon_bufmgr_gem_init(int fd, int batch_size)
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret == 0 && *gp.value > 0)
bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_bacon_gem_bo_set_softpin_offset;
/* Let's go with one relocation per every 2 dwords (but round down a bit
* since a power of two will mean an extra page allocation for the reloc
* buffer).
@ -3158,32 +3135,6 @@ drm_bacon_bufmgr_gem_init(int fd, int batch_size)
*/
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
bufmgr_gem->bufmgr.bo_alloc = drm_bacon_gem_bo_alloc;
bufmgr_gem->bufmgr.bo_alloc_for_render =
drm_bacon_gem_bo_alloc_for_render;
bufmgr_gem->bufmgr.bo_alloc_tiled = drm_bacon_gem_bo_alloc_tiled;
bufmgr_gem->bufmgr.bo_reference = drm_bacon_gem_bo_reference;
bufmgr_gem->bufmgr.bo_unreference = drm_bacon_gem_bo_unreference;
bufmgr_gem->bufmgr.bo_map = drm_bacon_gem_bo_map;
bufmgr_gem->bufmgr.bo_unmap = drm_bacon_gem_bo_unmap;
bufmgr_gem->bufmgr.bo_subdata = drm_bacon_gem_bo_subdata;
bufmgr_gem->bufmgr.bo_get_subdata = drm_bacon_gem_bo_get_subdata;
bufmgr_gem->bufmgr.bo_wait_rendering = drm_bacon_gem_bo_wait_rendering;
bufmgr_gem->bufmgr.bo_emit_reloc = drm_bacon_gem_bo_emit_reloc;
bufmgr_gem->bufmgr.bo_get_tiling = drm_bacon_gem_bo_get_tiling;
bufmgr_gem->bufmgr.bo_set_tiling = drm_bacon_gem_bo_set_tiling;
bufmgr_gem->bufmgr.bo_flink = drm_bacon_gem_bo_flink;
bufmgr_gem->bufmgr.bo_exec = drm_bacon_gem_bo_exec2;
bufmgr_gem->bufmgr.bo_mrb_exec = drm_bacon_gem_bo_mrb_exec2;
bufmgr_gem->bufmgr.bo_busy = drm_bacon_gem_bo_busy;
bufmgr_gem->bufmgr.bo_madvise = drm_bacon_gem_bo_madvise;
bufmgr_gem->bufmgr.destroy = drm_bacon_bufmgr_gem_unref;
bufmgr_gem->bufmgr.check_aperture_space =
drm_bacon_gem_check_aperture_space;
bufmgr_gem->bufmgr.bo_disable_reuse = drm_bacon_gem_bo_disable_reuse;
bufmgr_gem->bufmgr.bo_is_reusable = drm_bacon_gem_bo_is_reusable;
bufmgr_gem->bufmgr.bo_references = drm_bacon_gem_bo_references;
init_cache_buckets(bufmgr_gem);
list_inithead(&bufmgr_gem->vma_cache);

View file

@ -40,241 +40,6 @@
* Contains public methods followed by private storage for the buffer manager.
*/
struct _drm_bacon_bufmgr {
/**
* Allocate a buffer object.
*
* Buffer objects are not necessarily initially mapped into CPU virtual
* address space or graphics device aperture. They must be mapped
* using bo_map() or drm_bacon_gem_bo_map_gtt() to be used by the CPU.
*/
drm_bacon_bo *(*bo_alloc) (drm_bacon_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
/**
* Allocate a buffer object, hinting that it will be used as a
* render target.
*
* This is otherwise the same as bo_alloc.
*/
drm_bacon_bo *(*bo_alloc_for_render) (drm_bacon_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
/**
* Allocate a buffer object from an existing user accessible
* address malloc'd with the provided size.
* Alignment is used when mapping to the gtt.
* Flags may be I915_VMAP_READ_ONLY or I915_USERPTR_UNSYNCHRONIZED
*/
drm_bacon_bo *(*bo_alloc_userptr)(drm_bacon_bufmgr *bufmgr,
const char *name, void *addr,
uint32_t tiling_mode, uint32_t stride,
unsigned long size,
unsigned long flags);
/**
* Allocate a tiled buffer object.
*
* Alignment for tiled objects is set automatically; the 'flags'
* argument provides a hint about how the object will be used initially.
*
* Valid tiling formats are:
* I915_TILING_NONE
* I915_TILING_X
* I915_TILING_Y
*
* Note the tiling format may be rejected; callers should check the
* 'tiling_mode' field on return, as well as the pitch value, which
* may have been rounded up to accommodate for tiling restrictions.
*/
drm_bacon_bo *(*bo_alloc_tiled) (drm_bacon_bufmgr *bufmgr,
const char *name,
int x, int y, int cpp,
uint32_t *tiling_mode,
unsigned long *pitch,
unsigned long flags);
/** Takes a reference on a buffer object */
void (*bo_reference) (drm_bacon_bo *bo);
/**
* Releases a reference on a buffer object, freeing the data if
* no references remain.
*/
void (*bo_unreference) (drm_bacon_bo *bo);
/**
* Maps the buffer into userspace.
*
* This function will block waiting for any existing execution on the
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
int (*bo_map) (drm_bacon_bo *bo, int write_enable);
/**
* Reduces the refcount on the userspace mapping of the buffer
* object.
*/
int (*bo_unmap) (drm_bacon_bo *bo);
/**
* Write data into an object.
*
* This is an optional function, if missing,
* drm_bacon_bo will map/memcpy/unmap.
*/
int (*bo_subdata) (drm_bacon_bo *bo, unsigned long offset,
unsigned long size, const void *data);
/**
* Read data from an object
*
* This is an optional function, if missing,
* drm_bacon_bo will map/memcpy/unmap.
*/
int (*bo_get_subdata) (drm_bacon_bo *bo, unsigned long offset,
unsigned long size, void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
*
* This is not required for any access to the BO by bo_map,
* bo_subdata, etc. It is merely a way for the driver to implement
* glFinish.
*/
void (*bo_wait_rendering) (drm_bacon_bo *bo);
/**
* Tears down the buffer manager instance.
*/
void (*destroy) (drm_bacon_bufmgr *bufmgr);
/**
* Indicate if the buffer can be placed anywhere in the full ppgtt
* address range (2^48).
*
* Any resource used with flat/heapless (0x00000000-0xfffff000)
* General State Heap (GSH) or Intructions State Heap (ISH) must
* be in a 32-bit range. 48-bit range will only be used when explicitly
* requested.
*
* \param bo Buffer to set the use_48b_address_range flag.
* \param enable The flag value.
*/
void (*bo_use_48b_address_range) (drm_bacon_bo *bo, uint32_t enable);
/**
* Add relocation entry in reloc_buf, which will be updated with the
* target buffer's real offset on on command submission.
*
* Relocations remain in place for the lifetime of the buffer object.
*
* \param bo Buffer to write the relocation into.
* \param offset Byte offset within reloc_bo of the pointer to
* target_bo.
* \param target_bo Buffer whose offset should be written into the
* relocation entry.
* \param target_offset Constant value to be added to target_bo's
* offset in relocation entry.
* \param read_domains GEM read domains which the buffer will be
* read into by the command that this relocation
* is part of.
* \param write_domains GEM read domains which the buffer will be
* dirtied in by the command that this
* relocation is part of.
*/
int (*bo_emit_reloc) (drm_bacon_bo *bo, uint32_t offset,
drm_bacon_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
/** Executes the command buffer pointed to by bo. */
int (*bo_exec) (drm_bacon_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4);
/** Executes the command buffer pointed to by bo on the selected
* ring buffer
*/
int (*bo_mrb_exec) (drm_bacon_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4, unsigned flags);
/**
* Ask that the buffer be placed in tiling mode
*
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
int (*bo_set_tiling) (drm_bacon_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
/**
* Get the current tiling (and resulting swizzling) mode for the bo.
*
* \param buf Buffer to get tiling mode for
* \param tiling_mode returned tiling mode
* \param swizzle_mode returned swizzling mode
*/
int (*bo_get_tiling) (drm_bacon_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
/**
* Set the offset at which this buffer will be softpinned
* \param bo Buffer to set the softpin offset for
* \param offset Softpin offset
*/
int (*bo_set_softpin_offset) (drm_bacon_bo *bo, uint64_t offset);
/**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
* \param name Returned name
*/
int (*bo_flink) (drm_bacon_bo *bo, uint32_t * name);
/**
* Returns 1 if mapping the buffer for write could cause the process
* to block, due to the object being active in the GPU.
*/
int (*bo_busy) (drm_bacon_bo *bo);
/**
* Specify the volatility of the buffer.
* \param bo Buffer to create a name for
* \param madv The purgeable status
*
* Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
* reclaimed under memory pressure. If you subsequently require the buffer,
* then you must pass I915_MADV_WILLNEED to mark the buffer as required.
*
* Returns 1 if the buffer was retained, or 0 if it was discarded whilst
* marked as I915_MADV_DONTNEED.
*/
int (*bo_madvise) (drm_bacon_bo *bo, int madv);
int (*check_aperture_space) (drm_bacon_bo ** bo_array, int count);
/**
* Disable buffer reuse for buffers which will be shared in some way,
* as with scanout buffers. When the buffer reference count goes to
* zero, it will be freed and not placed in the reuse list.
*
* \param bo Buffer to disable reuse for
*/
int (*bo_disable_reuse) (drm_bacon_bo *bo);
/**
* Query whether a buffer is reusable.
*
* \param bo Buffer to query
*/
int (*bo_is_reusable) (drm_bacon_bo *bo);
/** Returns true if target_bo is in the relocation tree rooted at bo. */
int (*bo_references) (drm_bacon_bo *bo, drm_bacon_bo *target_bo);
};
struct _drm_bacon_context {