mirror of
https://gitlab.freedesktop.org/mesa/drm.git
synced 2026-05-07 21:38:05 +02:00
Rename inappropriately named 'mask' fields to 'proposed_flags' instead.
Flags pending validation were stored in a misleadingly named field, 'mask'. As 'mask' is already used to indicate pieces of a flags field which are changing, it seems better to use a name reflecting the actual purpose of this field. I chose 'proposed_flags' as they may not actually end up in 'flags', and in an case will be modified when they are moved over. This affects the API, but not ABI of the user-mode interface.
This commit is contained in:
parent
37fb2ac407
commit
d1187641d6
14 changed files with 168 additions and 81 deletions
|
|
@ -2578,7 +2578,7 @@ static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)
|
|||
buf->size = rep->size;
|
||||
buf->offset = rep->offset;
|
||||
buf->mapHandle = rep->arg_handle;
|
||||
buf->mask = rep->mask;
|
||||
buf->proposedFlags = rep->proposed_flags;
|
||||
buf->start = rep->buffer_start;
|
||||
buf->fenceFlags = rep->fence_flags;
|
||||
buf->replyFlags = rep->rep_flags;
|
||||
|
|
@ -2592,7 +2592,7 @@ static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)
|
|||
|
||||
int drmBOCreate(int fd, unsigned long size,
|
||||
unsigned pageAlignment, void *user_buffer,
|
||||
uint64_t mask,
|
||||
uint64_t flags,
|
||||
unsigned hint, drmBO *buf)
|
||||
{
|
||||
struct drm_bo_create_arg arg;
|
||||
|
|
@ -2602,7 +2602,7 @@ int drmBOCreate(int fd, unsigned long size,
|
|||
|
||||
memset(buf, 0, sizeof(*buf));
|
||||
memset(&arg, 0, sizeof(arg));
|
||||
req->mask = mask;
|
||||
req->flags = flags;
|
||||
req->hint = hint;
|
||||
req->size = size;
|
||||
req->page_alignment = pageAlignment;
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ typedef struct _drmBO
|
|||
unsigned handle;
|
||||
uint64_t mapHandle;
|
||||
uint64_t flags;
|
||||
uint64_t mask;
|
||||
uint64_t proposedFlags;
|
||||
unsigned mapFlags;
|
||||
unsigned long size;
|
||||
unsigned long offset;
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ void drm_bo_add_to_lru(struct drm_buffer_object *bo)
|
|||
|
||||
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
|
||||
|
||||
if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
|
||||
if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
|
||||
|| bo->mem.mem_type != bo->pinned_mem_type) {
|
||||
man = &bo->dev->bm.man[bo->mem.mem_type];
|
||||
list_add_tail(&bo->lru, &man->lru);
|
||||
|
|
@ -142,7 +142,7 @@ static int drm_bo_add_ttm(struct drm_buffer_object *bo)
|
|||
DRM_ASSERT_LOCKED(&bo->mutex);
|
||||
bo->ttm = NULL;
|
||||
|
||||
if (bo->mem.mask & DRM_BO_FLAG_WRITE)
|
||||
if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
|
||||
page_flags |= DRM_TTM_PAGE_WRITE;
|
||||
|
||||
switch (bo->type) {
|
||||
|
|
@ -214,11 +214,11 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
|
|||
|
||||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
||||
uint64_t save_flags = old_mem->flags;
|
||||
uint64_t save_mask = old_mem->mask;
|
||||
uint64_t save_proposed_flags = old_mem->proposed_flags;
|
||||
|
||||
*old_mem = *mem;
|
||||
mem->mm_node = NULL;
|
||||
old_mem->mask = save_mask;
|
||||
old_mem->proposed_flags = save_proposed_flags;
|
||||
DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
|
||||
|
||||
} else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
|
||||
|
|
@ -708,7 +708,7 @@ static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
|
|||
evict_mem.mm_node = NULL;
|
||||
|
||||
evict_mem = bo->mem;
|
||||
evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
|
||||
evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
|
||||
ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
|
||||
|
||||
if (ret) {
|
||||
|
|
@ -872,7 +872,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
|
|||
|
||||
type_ok = drm_bo_mt_compatible(man,
|
||||
bo->type == drm_bo_type_user,
|
||||
mem_type, mem->mask,
|
||||
mem_type, mem->proposed_flags,
|
||||
&cur_flags);
|
||||
|
||||
if (!type_ok)
|
||||
|
|
@ -924,7 +924,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
|
|||
if (!drm_bo_mt_compatible(man,
|
||||
bo->type == drm_bo_type_user,
|
||||
mem_type,
|
||||
mem->mask,
|
||||
mem->proposed_flags,
|
||||
&cur_flags))
|
||||
continue;
|
||||
|
||||
|
|
@ -944,11 +944,25 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_bo_mem_space);
|
||||
|
||||
static int drm_bo_new_mask(struct drm_buffer_object *bo,
|
||||
uint64_t new_flags, uint64_t used_mask)
|
||||
/*
|
||||
* drm_bo_propose_flags:
|
||||
*
|
||||
* @bo: the buffer object getting new flags
|
||||
*
|
||||
* @new_flags: the new set of proposed flag bits
|
||||
*
|
||||
* @new_mask: the mask of bits changed in new_flags
|
||||
*
|
||||
* Modify the proposed_flag bits in @bo
|
||||
*/
|
||||
static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
|
||||
uint64_t new_flags, uint64_t new_mask)
|
||||
{
|
||||
uint32_t new_props;
|
||||
uint32_t new_access;
|
||||
|
||||
/* Copy unchanging bits from existing proposed_flags */
|
||||
DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
|
||||
|
||||
if (bo->type == drm_bo_type_user &&
|
||||
((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
|
||||
(DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
|
||||
|
|
@ -956,7 +970,7 @@ static int drm_bo_new_mask(struct drm_buffer_object *bo,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
|
||||
if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
|
||||
DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
|
@ -966,15 +980,15 @@ static int drm_bo_new_mask(struct drm_buffer_object *bo,
|
|||
return -EPERM;
|
||||
}
|
||||
|
||||
new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
|
||||
DRM_BO_FLAG_READ);
|
||||
new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
|
||||
DRM_BO_FLAG_READ);
|
||||
|
||||
if (!new_props) {
|
||||
if (new_access == 0) {
|
||||
DRM_ERROR("Invalid buffer object rwx properties\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bo->mem.mask = new_flags;
|
||||
bo->mem.proposed_flags = new_flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -1109,8 +1123,8 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
|
|||
|
||||
ret = 0;
|
||||
mutex_unlock(&bo->mutex);
|
||||
DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
|
||||
!drm_bo_check_unfenced(bo));
|
||||
DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ,
|
||||
!drm_bo_check_unfenced(bo));
|
||||
mutex_lock(&bo->mutex);
|
||||
if (ret == -EINTR)
|
||||
return -EAGAIN;
|
||||
|
|
@ -1146,7 +1160,7 @@ static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
|
|||
else
|
||||
rep->arg_handle = 0;
|
||||
|
||||
rep->mask = bo->mem.mask;
|
||||
rep->proposed_flags = bo->mem.proposed_flags;
|
||||
rep->buffer_start = bo->buffer_start;
|
||||
rep->fence_flags = bo->fence_type;
|
||||
rep->rep_flags = 0;
|
||||
|
|
@ -1292,7 +1306,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
|
|||
|
||||
/*
|
||||
* bo->mutex locked.
|
||||
* Note that new_mem_flags are NOT transferred to the bo->mem.mask.
|
||||
* Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
|
||||
*/
|
||||
|
||||
int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
|
||||
|
|
@ -1318,7 +1332,7 @@ int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
|
|||
|
||||
mem.num_pages = bo->num_pages;
|
||||
mem.size = mem.num_pages << PAGE_SHIFT;
|
||||
mem.mask = new_mem_flags;
|
||||
mem.proposed_flags = new_mem_flags;
|
||||
mem.page_alignment = bo->mem.page_alignment;
|
||||
|
||||
mutex_lock(&bm->evict_mutex);
|
||||
|
|
@ -1361,18 +1375,18 @@ out_unlock:
|
|||
|
||||
static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
|
||||
{
|
||||
uint32_t flag_diff = (mem->mask ^ mem->flags);
|
||||
uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
|
||||
|
||||
if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
|
||||
if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
|
||||
return 0;
|
||||
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
|
||||
(/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
|
||||
(mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
|
||||
(/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
|
||||
(mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
|
||||
return 0;
|
||||
|
||||
if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
|
||||
((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
|
||||
(mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
|
||||
((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
|
||||
(mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
|
@ -1408,8 +1422,8 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
|
|||
uint32_t ftype;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
|
||||
(unsigned long long) bo->mem.mask,
|
||||
DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
|
||||
(unsigned long long) bo->mem.proposed_flags,
|
||||
(unsigned long long) bo->mem.flags);
|
||||
|
||||
ret = driver->fence_type(bo, &fence_class, &ftype);
|
||||
|
|
@ -1450,7 +1464,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
|
|||
*/
|
||||
|
||||
if (!drm_bo_mem_compat(&bo->mem)) {
|
||||
ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
|
||||
ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
|
||||
move_unfenced);
|
||||
if (ret) {
|
||||
if (ret != -EAGAIN)
|
||||
|
|
@ -1463,7 +1477,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
|
|||
* Pinned buffers.
|
||||
*/
|
||||
|
||||
if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
|
||||
if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
|
||||
bo->pinned_mem_type = bo->mem.mem_type;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_del_init(&bo->pinned_lru);
|
||||
|
|
@ -1499,7 +1513,13 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
|
|||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
|
||||
/*
|
||||
* Validation has succeeded, move the access and other
|
||||
* non-mapping-related flag bits from the proposed flags to
|
||||
* the active flags
|
||||
*/
|
||||
|
||||
DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
|
||||
|
||||
/*
|
||||
* Finally, adjust lru to be sure.
|
||||
|
|
@ -1563,9 +1583,7 @@ int drm_bo_do_validate(struct drm_buffer_object *bo,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
||||
DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
|
||||
ret = drm_bo_new_mask(bo, flags, mask);
|
||||
ret = drm_bo_modify_proposed_flags (bo, flags, mask);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
|
@ -1712,7 +1730,7 @@ out:
|
|||
int drm_buffer_object_create(struct drm_device *dev,
|
||||
unsigned long size,
|
||||
enum drm_bo_type type,
|
||||
uint64_t mask,
|
||||
uint64_t flags,
|
||||
uint32_t hint,
|
||||
uint32_t page_alignment,
|
||||
unsigned long buffer_start,
|
||||
|
|
@ -1757,12 +1775,14 @@ int drm_buffer_object_create(struct drm_device *dev,
|
|||
bo->mem.page_alignment = page_alignment;
|
||||
bo->buffer_start = buffer_start & PAGE_MASK;
|
||||
bo->priv_flags = 0;
|
||||
bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
|
||||
DRM_BO_FLAG_MAPPABLE;
|
||||
bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
|
||||
DRM_BO_FLAG_MAPPABLE;
|
||||
bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
|
||||
DRM_BO_FLAG_MAPPABLE);
|
||||
bo->mem.proposed_flags = 0;
|
||||
atomic_inc(&bm->count);
|
||||
ret = drm_bo_new_mask(bo, mask, mask);
|
||||
/*
|
||||
* Use drm_bo_modify_proposed_flags to error-check the proposed flags
|
||||
*/
|
||||
ret = drm_bo_modify_proposed_flags (bo, flags, flags);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
|
|
@ -1831,18 +1851,21 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
|
|||
|
||||
bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
|
||||
|
||||
/*
|
||||
* User buffers cannot be shared
|
||||
*/
|
||||
if (bo_type == drm_bo_type_user)
|
||||
req->mask &= ~DRM_BO_FLAG_SHAREABLE;
|
||||
req->flags &= ~DRM_BO_FLAG_SHAREABLE;
|
||||
|
||||
ret = drm_buffer_object_create(file_priv->head->dev,
|
||||
req->size, bo_type, req->mask,
|
||||
req->size, bo_type, req->flags,
|
||||
req->hint, req->page_alignment,
|
||||
req->buffer_start, &entry);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = drm_bo_add_user_object(file_priv, entry,
|
||||
req->mask & DRM_BO_FLAG_SHAREABLE);
|
||||
req->flags & DRM_BO_FLAG_SHAREABLE);
|
||||
if (ret) {
|
||||
drm_bo_usage_deref_unlocked(&entry);
|
||||
goto out;
|
||||
|
|
@ -2034,7 +2057,7 @@ static int drm_bo_leave_list(struct drm_buffer_object *bo,
|
|||
DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
|
||||
"cleanup. Removing flag and evicting.\n");
|
||||
bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
|
||||
bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
|
||||
bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
|
||||
}
|
||||
|
||||
if (bo->mem.mem_type == mem_type)
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ int drm_bo_move_ttm(struct drm_buffer_object *bo,
|
|||
struct drm_ttm *ttm = bo->ttm;
|
||||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
||||
uint64_t save_flags = old_mem->flags;
|
||||
uint64_t save_mask = old_mem->mask;
|
||||
uint64_t save_proposed_flags = old_mem->proposed_flags;
|
||||
int ret;
|
||||
|
||||
if (old_mem->mem_type == DRM_BO_MEM_TT) {
|
||||
|
|
@ -78,7 +78,7 @@ int drm_bo_move_ttm(struct drm_buffer_object *bo,
|
|||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
old_mem->mask = save_mask;
|
||||
old_mem->proposed_flags = save_proposed_flags;
|
||||
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -210,7 +210,7 @@ int drm_bo_move_memcpy(struct drm_buffer_object *bo,
|
|||
void *new_iomap;
|
||||
int ret;
|
||||
uint64_t save_flags = old_mem->flags;
|
||||
uint64_t save_mask = old_mem->mask;
|
||||
uint64_t save_proposed_flags = old_mem->proposed_flags;
|
||||
unsigned long i;
|
||||
unsigned long page;
|
||||
unsigned long add = 0;
|
||||
|
|
@ -255,7 +255,7 @@ out2:
|
|||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
old_mem->mask = save_mask;
|
||||
old_mem->proposed_flags = save_proposed_flags;
|
||||
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
||||
|
||||
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
|
||||
|
|
@ -330,7 +330,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
|
|||
struct drm_bo_mem_reg *old_mem = &bo->mem;
|
||||
int ret;
|
||||
uint64_t save_flags = old_mem->flags;
|
||||
uint64_t save_mask = old_mem->mask;
|
||||
uint64_t save_proposed_flags = old_mem->proposed_flags;
|
||||
struct drm_buffer_object *old_obj;
|
||||
|
||||
if (bo->fence)
|
||||
|
|
@ -399,7 +399,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
|
|||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
old_mem->mask = save_mask;
|
||||
old_mem->proposed_flags = save_proposed_flags;
|
||||
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -385,8 +385,20 @@ struct drm_bo_mem_reg {
|
|||
unsigned long num_pages;
|
||||
uint32_t page_alignment;
|
||||
uint32_t mem_type;
|
||||
/*
|
||||
* Current buffer status flags, indicating
|
||||
* where the buffer is located and which
|
||||
* access modes are in effect
|
||||
*/
|
||||
uint64_t flags;
|
||||
uint64_t mask;
|
||||
/**
|
||||
* These are the flags proposed for
|
||||
* a validate operation. If the
|
||||
* validate succeeds, they'll get moved
|
||||
* into the flags field
|
||||
*/
|
||||
uint64_t proposed_flags;
|
||||
|
||||
uint32_t desired_tile_stride;
|
||||
uint32_t hw_tile_stride;
|
||||
};
|
||||
|
|
@ -511,9 +523,36 @@ struct drm_bo_driver {
|
|||
int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
|
||||
int (*init_mem_type) (struct drm_device *dev, uint32_t type,
|
||||
struct drm_mem_type_manager *man);
|
||||
uint32_t(*evict_mask) (struct drm_buffer_object *bo);
|
||||
/*
|
||||
* evict_flags:
|
||||
*
|
||||
* @bo: the buffer object to be evicted
|
||||
*
|
||||
* Return the bo flags for a buffer which is not mapped to the hardware.
|
||||
* These will be placed in proposed_flags so that when the move is
|
||||
* finished, they'll end up in bo->mem.flags
|
||||
*/
|
||||
uint64_t(*evict_flags) (struct drm_buffer_object *bo);
|
||||
/*
|
||||
* move:
|
||||
*
|
||||
* @bo: the buffer to move
|
||||
*
|
||||
* @evict: whether this motion is evicting the buffer from
|
||||
* the graphics address space
|
||||
*
|
||||
* @no_wait: whether this should give up and return -EBUSY
|
||||
* if this move would require sleeping
|
||||
*
|
||||
* @new_mem: the new memory region receiving the buffer
|
||||
*
|
||||
* Move a buffer between two memory regions.
|
||||
*/
|
||||
int (*move) (struct drm_buffer_object *bo,
|
||||
int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
|
||||
/*
|
||||
* ttm_cache_flush
|
||||
*/
|
||||
void (*ttm_cache_flush)(struct drm_ttm *ttm);
|
||||
};
|
||||
|
||||
|
|
@ -554,7 +593,7 @@ extern int drm_fence_buffer_objects(struct drm_device *dev,
|
|||
struct drm_fence_object **used_fence);
|
||||
extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
|
||||
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
|
||||
enum drm_bo_type type, uint64_t mask,
|
||||
enum drm_bo_type type, uint64_t flags,
|
||||
uint32_t hint, uint32_t page_alignment,
|
||||
unsigned long buffer_start,
|
||||
struct drm_buffer_object **bo);
|
||||
|
|
|
|||
|
|
@ -751,10 +751,10 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|||
*/
|
||||
|
||||
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
|
||||
uint32_t new_mask = bo->mem.mask |
|
||||
uint32_t new_flags = bo->mem.proposed_flags |
|
||||
DRM_BO_FLAG_MAPPABLE |
|
||||
DRM_BO_FLAG_FORCE_MAPPABLE;
|
||||
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
|
||||
err = drm_bo_move_buffer(bo, new_flags, 0, 0);
|
||||
if (err) {
|
||||
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
|
||||
goto out_unlock;
|
||||
|
|
|
|||
|
|
@ -38,11 +38,11 @@ struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
|
|||
return drm_agp_init_ttm(dev);
|
||||
}
|
||||
|
||||
int i915_fence_types(struct drm_buffer_object *bo,
|
||||
int i915_fence_type(struct drm_buffer_object *bo,
|
||||
uint32_t *fclass,
|
||||
uint32_t *type)
|
||||
{
|
||||
if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
||||
if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
||||
*type = 3;
|
||||
else
|
||||
*type = 1;
|
||||
|
|
@ -110,7 +110,16 @@ int i915_init_mem_type(struct drm_device *dev, uint32_t type,
|
|||
return 0;
|
||||
}
|
||||
|
||||
uint32_t i915_evict_mask(struct drm_buffer_object *bo)
|
||||
/*
|
||||
* i915_evict_flags:
|
||||
*
|
||||
* @bo: the buffer object to be evicted
|
||||
*
|
||||
* Return the bo flags for a buffer which is not mapped to the hardware.
|
||||
* These will be placed in proposed_flags so that when the move is
|
||||
* finished, they'll end up in bo->mem.flags
|
||||
*/
|
||||
uint64_t i915_evict_flags(struct drm_buffer_object *bo)
|
||||
{
|
||||
switch (bo->mem.mem_type) {
|
||||
case DRM_BO_MEM_LOCAL:
|
||||
|
|
|
|||
|
|
@ -61,10 +61,10 @@ static struct drm_bo_driver i915_bo_driver = {
|
|||
.num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
|
||||
.num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
|
||||
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
|
||||
.fence_type = i915_fence_types,
|
||||
.fence_type = i915_fence_type,
|
||||
.invalidate_caches = i915_invalidate_caches,
|
||||
.init_mem_type = i915_init_mem_type,
|
||||
.evict_mask = i915_evict_mask,
|
||||
.evict_flags = i915_evict_flags,
|
||||
.move = i915_move,
|
||||
.ttm_cache_flush = i915_flush_ttm,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ nouveau_bo_fence_type(struct drm_buffer_object *bo,
|
|||
{
|
||||
/* When we get called, *fclass is set to the requested fence class */
|
||||
|
||||
if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
||||
if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
|
||||
*type = 3;
|
||||
else
|
||||
*type = 1;
|
||||
|
|
@ -130,8 +130,8 @@ nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
nouveau_bo_evict_mask(struct drm_buffer_object *bo)
|
||||
static uint64_t
|
||||
nouveau_bo_evict_flags(struct drm_buffer_object *bo)
|
||||
{
|
||||
switch (bo->mem.mem_type) {
|
||||
case DRM_BO_MEM_LOCAL:
|
||||
|
|
@ -207,8 +207,9 @@ nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait,
|
|||
|
||||
tmp_mem = *new_mem;
|
||||
tmp_mem.mm_node = NULL;
|
||||
tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
|
||||
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
|
||||
tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT |
|
||||
DRM_BO_FLAG_CACHED |
|
||||
DRM_BO_FLAG_FORCE_CACHING);
|
||||
|
||||
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
|
||||
|
||||
|
|
@ -291,7 +292,7 @@ struct drm_bo_driver nouveau_bo_driver = {
|
|||
.fence_type = nouveau_bo_fence_type,
|
||||
.invalidate_caches = nouveau_bo_invalidate_caches,
|
||||
.init_mem_type = nouveau_bo_init_mem_type,
|
||||
.evict_mask = nouveau_bo_evict_mask,
|
||||
.evict_flags = nouveau_bo_evict_flags,
|
||||
.move = nouveau_bo_move,
|
||||
.ttm_cache_flush= nouveau_bo_flush_ttm
|
||||
};
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,
|
|||
return 0;
|
||||
}
|
||||
|
||||
uint32_t via_evict_mask(struct drm_buffer_object *bo)
|
||||
uint64_t via_evict_flags(struct drm_buffer_object *bo)
|
||||
{
|
||||
switch (bo->mem.mem_type) {
|
||||
case DRM_BO_MEM_LOCAL:
|
||||
|
|
|
|||
|
|
@ -661,6 +661,10 @@ struct drm_fence_arg {
|
|||
#define DRM_BO_FLAG_WRITE (1ULL << 1)
|
||||
#define DRM_BO_FLAG_EXE (1ULL << 2)
|
||||
|
||||
/*
|
||||
* All of the bits related to access mode
|
||||
*/
|
||||
#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
|
||||
/*
|
||||
* Status flags. Can be read to determine the actual state of a buffer.
|
||||
* Can also be set in the buffer mask before validation.
|
||||
|
|
@ -741,10 +745,21 @@ struct drm_fence_arg {
|
|||
#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
|
||||
/* We can add more of these now with a 64-bit flag type */
|
||||
|
||||
/* Memory flag mask */
|
||||
/*
|
||||
* This is a mask covering all of the memory type flags; easier to just
|
||||
* use a single constant than a bunch of | values. It covers
|
||||
* DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
|
||||
*/
|
||||
#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
|
||||
#define DRM_BO_MASK_MEMTYPE 0x00000000FF0800A0ULL
|
||||
|
||||
/*
|
||||
* This adds all of the CPU-mapping options in with the memory
|
||||
* type to label all bits which change how the page gets mapped
|
||||
*/
|
||||
#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
|
||||
DRM_BO_FLAG_CACHED_MAPPED | \
|
||||
DRM_BO_FLAG_CACHED | \
|
||||
DRM_BO_FLAG_MAPPABLE)
|
||||
|
||||
/* Driver-private flags */
|
||||
#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
|
||||
|
||||
|
|
@ -794,7 +809,7 @@ struct drm_bo_info_req {
|
|||
};
|
||||
|
||||
struct drm_bo_create_req {
|
||||
uint64_t mask;
|
||||
uint64_t flags;
|
||||
uint64_t size;
|
||||
uint64_t buffer_start;
|
||||
unsigned int hint;
|
||||
|
|
@ -810,7 +825,7 @@ struct drm_bo_create_req {
|
|||
|
||||
struct drm_bo_info_rep {
|
||||
uint64_t flags;
|
||||
uint64_t mask;
|
||||
uint64_t proposed_flags;
|
||||
uint64_t size;
|
||||
uint64_t offset;
|
||||
uint64_t arg_handle;
|
||||
|
|
|
|||
|
|
@ -303,12 +303,12 @@ extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t f
|
|||
#ifdef I915_HAVE_BUFFER
|
||||
/* i915_buffer.c */
|
||||
extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
|
||||
extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
|
||||
uint32_t *type);
|
||||
extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass,
|
||||
uint32_t *type);
|
||||
extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
||||
extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
|
||||
struct drm_mem_type_manager *man);
|
||||
extern uint32_t i915_evict_mask(struct drm_buffer_object *bo);
|
||||
extern uint64_t i915_evict_flags(struct drm_buffer_object *bo);
|
||||
extern int i915_move(struct drm_buffer_object *bo, int evict,
|
||||
int no_wait, struct drm_bo_mem_reg *new_mem);
|
||||
void i915_flush_ttm(struct drm_ttm *ttm);
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ static struct drm_bo_driver via_bo_driver = {
|
|||
.fence_type = via_fence_types,
|
||||
.invalidate_caches = via_invalidate_caches,
|
||||
.init_mem_type = via_init_mem_type,
|
||||
.evict_mask = via_evict_mask,
|
||||
.evict_flags = via_evict_flags,
|
||||
.move = NULL,
|
||||
};
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -211,7 +211,7 @@ extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
|
|||
extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
|
||||
extern int via_init_mem_type(struct drm_device *dev, uint32_t type,
|
||||
struct drm_mem_type_manager *man);
|
||||
extern uint32_t via_evict_mask(struct drm_buffer_object *bo);
|
||||
extern uint64_t via_evict_flags(struct drm_buffer_object *bo);
|
||||
extern int via_move(struct drm_buffer_object *bo, int evict,
|
||||
int no_wait, struct drm_bo_mem_reg *new_mem);
|
||||
#endif
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue