mirror of
https://gitlab.freedesktop.org/mesa/drm.git
synced 2025-12-24 13:40:13 +01:00
Checkpoint: Use bool for boolean.
This commit is contained in:
parent
5c7574555d
commit
41c3f1ac56
19 changed files with 189 additions and 189 deletions
|
|
@ -128,7 +128,7 @@ uint32_t via_evict_flags(struct ttm_buffer_object * bo)
|
|||
}
|
||||
|
||||
static int via_move_dmablit(struct ttm_buffer_object *bo,
|
||||
int evict, int no_wait, struct ttm_mem_reg *new_mem)
|
||||
bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_via_private *dev_priv =
|
||||
container_of(bo->bdev, struct drm_via_private, bdev);
|
||||
|
|
@ -165,7 +165,7 @@ static int via_move_dmablit(struct ttm_buffer_object *bo,
|
|||
}
|
||||
|
||||
static int via_move_vram_tt(struct ttm_buffer_object *bo,
|
||||
int evict, int no_wait, struct ttm_mem_reg *new_mem)
|
||||
bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
int ret;
|
||||
|
|
@ -191,7 +191,7 @@ static int via_move_vram_tt(struct ttm_buffer_object *bo,
|
|||
tmp_mem.proposed_flags |= (TTM_PL_FLAG_SYSTEM |
|
||||
TTM_PL_FLAG_CACHED);
|
||||
tmp_mem.mem_type = TTM_PL_SYSTEM;
|
||||
ret = via_move_dmablit(bo, 1, no_wait, &tmp_mem);
|
||||
ret = via_move_dmablit(bo, true, no_wait, &tmp_mem);
|
||||
if (ret)
|
||||
return ret;
|
||||
return ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
|
||||
|
|
@ -212,7 +212,7 @@ static void via_move_null(struct ttm_buffer_object *bo,
|
|||
}
|
||||
|
||||
int via_bo_move(struct ttm_buffer_object *bo,
|
||||
int evict, int interruptible, int no_wait,
|
||||
bool evict, bool interruptible, bool no_wait,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
|
|
|
|||
|
|
@ -282,8 +282,8 @@ extern int via_invalidate_caches(struct ttm_bo_device *bdev,
|
|||
extern int via_init_mem_type(struct ttm_bo_device *dev, uint32_t type,
|
||||
struct ttm_mem_type_manager *man);
|
||||
extern uint32_t via_evict_flags(struct ttm_buffer_object *bo);
|
||||
extern int via_bo_move(struct ttm_buffer_object *bo, int evict,
|
||||
int interruptible, int no_wait, struct ttm_mem_reg *new_mem);
|
||||
extern int via_bo_move(struct ttm_buffer_object *bo, bool evict,
|
||||
bool interruptible, bool no_wait, struct ttm_mem_reg *new_mem);
|
||||
extern void via_dma_initialize(struct drm_via_private *dev_priv);
|
||||
extern void via_dma_takedown(struct drm_via_private *dev_priv);
|
||||
extern void via_wait_idle(struct drm_via_private *dev_priv);
|
||||
|
|
|
|||
|
|
@ -105,7 +105,7 @@ via_placement_fence_type(struct ttm_buffer_object *bo,
|
|||
if (old_fence && ((new_fence_class != old_fence->fence_class) ||
|
||||
((n_fence_type ^ old_fence_types) &
|
||||
old_fence_types))) {
|
||||
ret = ttm_bo_wait(bo, 0, 0, 0);
|
||||
ret = ttm_bo_wait(bo, false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -559,7 +559,7 @@ static int via_validate_buffer_list(struct drm_file *file_priv,
|
|||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
||||
ret = ttm_buffer_object_validate(bo, 1, 0);
|
||||
ret = ttm_buffer_object_validate(bo, true, false);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
|
|
|||
|
|
@ -252,7 +252,7 @@ static int via_exec_init(struct drm_device *dev)
|
|||
{
|
||||
struct drm_via_private *dev_priv = via_priv(dev);
|
||||
int ret;
|
||||
int dummy;
|
||||
bool dummy;
|
||||
|
||||
memset(dev_priv->barriers, 0, sizeof(dev_priv->barriers));
|
||||
|
||||
|
|
@ -264,7 +264,7 @@ static int via_exec_init(struct drm_device *dev)
|
|||
ttm_bo_type_kernel,
|
||||
TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_NO_EVICT,
|
||||
0, 0, 0, NULL, &dev_priv->fence_bo);
|
||||
0, 0, false, NULL, &dev_priv->fence_bo);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
|
|
@ -286,7 +286,7 @@ static int via_exec_init(struct drm_device *dev)
|
|||
ttm_bo_type_kernel,
|
||||
TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_NO_EVICT,
|
||||
0, 0, 0, NULL, &dev_priv->vq_bo);
|
||||
0, 0, false, NULL, &dev_priv->vq_bo);
|
||||
if (unlikely(ret))
|
||||
goto out_err1;
|
||||
|
||||
|
|
@ -300,7 +300,7 @@ static int via_exec_init(struct drm_device *dev)
|
|||
ttm_bo_type_kernel,
|
||||
TTM_PL_FLAG_TT |
|
||||
TTM_PL_FLAG_NO_EVICT,
|
||||
0, 0, 0, NULL, &dev_priv->agpc_bo);
|
||||
0, 0, false, NULL, &dev_priv->agpc_bo);
|
||||
if (unlikely(ret))
|
||||
goto out_err2;
|
||||
|
||||
|
|
@ -774,7 +774,7 @@ static int via_firstopen_locked(struct drm_device *dev)
|
|||
ttm_bo_type_kernel,
|
||||
TTM_PL_FLAG_TT |
|
||||
TTM_PL_FLAG_NO_EVICT,
|
||||
0, 0, 0, NULL, &dev_priv->agp_bo);
|
||||
0, 0, false, NULL, &dev_priv->agp_bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ ssize_t via_ttm_write(struct file * filp, const char __user * buf,
|
|||
struct drm_file *file_priv = (struct drm_file *)filp->private_data;
|
||||
struct drm_via_private *dev_priv = via_priv(file_priv->minor->dev);
|
||||
|
||||
return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, 1);
|
||||
return ttm_bo_io(&dev_priv->bdev, filp, buf, NULL, count, f_pos, true);
|
||||
}
|
||||
|
||||
ssize_t via_ttm_read(struct file * filp, char __user * buf,
|
||||
|
|
@ -190,7 +190,7 @@ ssize_t via_ttm_read(struct file * filp, char __user * buf,
|
|||
struct drm_file *file_priv = (struct drm_file *)filp->private_data;
|
||||
struct drm_via_private *dev_priv = via_priv(file_priv->minor->dev);
|
||||
|
||||
return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, 1);
|
||||
return ttm_bo_io(&dev_priv->bdev, filp, NULL, buf, count, f_pos, false);
|
||||
}
|
||||
|
||||
int via_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
|
|
|
|||
|
|
@ -70,12 +70,12 @@ static void ttm_bo_release_list(struct kref *list_kref)
|
|||
if (bo->destroy)
|
||||
bo->destroy(bo);
|
||||
else {
|
||||
ttm_mem_global_free(bdev->mem_glob, bo->acc_size, 0);
|
||||
ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
|
||||
kfree(bo);
|
||||
}
|
||||
}
|
||||
|
||||
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, int interruptible)
|
||||
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
|
||||
{
|
||||
|
||||
if (interruptible) {
|
||||
|
|
@ -139,14 +139,14 @@ static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
|||
}
|
||||
|
||||
int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
|
||||
int interruptible,
|
||||
int no_wait, int use_sequence, uint32_t sequence)
|
||||
bool interruptible,
|
||||
bool no_wait, bool use_sequence, uint32_t sequence)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
|
||||
while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
|
||||
if (use_sequence && bo->seq_valid &&
|
||||
(sequence - bo->val_seq < (1 << 31))) {
|
||||
(sequence - bo->val_seq < (1 << 31))) {
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
|
@ -163,9 +163,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
|
|||
|
||||
if (use_sequence) {
|
||||
bo->val_seq = sequence;
|
||||
bo->seq_valid = 1;
|
||||
bo->seq_valid = true;
|
||||
} else {
|
||||
bo->seq_valid = 0;
|
||||
bo->seq_valid = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
@ -177,8 +177,8 @@ static void ttm_bo_ref_bug(struct kref *list_kref)
|
|||
}
|
||||
|
||||
int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
||||
int interruptible,
|
||||
int no_wait, int use_sequence, uint32_t sequence)
|
||||
bool interruptible,
|
||||
bool no_wait, bool use_sequence, uint32_t sequence)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
int put_count;
|
||||
|
|
@ -252,11 +252,11 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo)
|
|||
|
||||
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem,
|
||||
int evict, int interruptible, int no_wait)
|
||||
bool evict, bool interruptible, bool no_wait)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
int old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
|
||||
int new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
|
||||
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
|
||||
bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
|
||||
struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
|
||||
struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
|
||||
int ret = 0;
|
||||
|
|
@ -341,7 +341,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
|||
}
|
||||
|
||||
static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo,
|
||||
int allow_errors)
|
||||
bool allow_errors)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_driver *driver = bdev->driver;
|
||||
|
|
@ -351,14 +351,14 @@ static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo,
|
|||
unsigned long _end = jiffies + 3 * HZ;
|
||||
int ret;
|
||||
do {
|
||||
ret = ttm_bo_wait(bo, 0, 0, 0);
|
||||
ret = ttm_bo_wait(bo, false, false, false);
|
||||
if (ret && allow_errors)
|
||||
return ret;
|
||||
|
||||
} while (ret && !time_after_eq(jiffies, _end));
|
||||
|
||||
if (bo->sync_obj) {
|
||||
bdev->nice_mode = 0;
|
||||
bdev->nice_mode = false;
|
||||
printk(KERN_ERR "Detected probable GPU lockup. "
|
||||
"Evicting buffer.\n");
|
||||
}
|
||||
|
|
@ -378,7 +378,7 @@ static int ttm_bo_expire_sync_obj(struct ttm_buffer_object *bo,
|
|||
* up the list_kref and schedule a delayed list check.
|
||||
*/
|
||||
|
||||
static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, int remove_all)
|
||||
static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_driver *driver = bdev->driver;
|
||||
|
|
@ -392,7 +392,7 @@ static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, int remove_all)
|
|||
}
|
||||
|
||||
if (bo->sync_obj && remove_all)
|
||||
(void)ttm_bo_expire_sync_obj(bo, 0);
|
||||
(void)ttm_bo_expire_sync_obj(bo, false);
|
||||
|
||||
if (!bo->sync_obj) {
|
||||
int put_count;
|
||||
|
|
@ -441,7 +441,7 @@ static void ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, int remove_all)
|
|||
* encountered buffers.
|
||||
*/
|
||||
|
||||
static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, int remove_all)
|
||||
static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
|
||||
{
|
||||
struct ttm_buffer_object *entry, *nentry;
|
||||
struct list_head *list, *next;
|
||||
|
|
@ -494,7 +494,7 @@ static void ttm_bo_delayed_workqueue(struct work_struct *work)
|
|||
struct ttm_bo_device *bdev =
|
||||
container_of(work, struct ttm_bo_device, wq.work);
|
||||
|
||||
if (ttm_bo_delayed_delete(bdev, 0)) {
|
||||
if (ttm_bo_delayed_delete(bdev, false)) {
|
||||
schedule_delayed_work(&bdev->wq,
|
||||
((HZ / 100) < 1) ? 1 : HZ / 100);
|
||||
}
|
||||
|
|
@ -511,7 +511,7 @@ static void ttm_bo_release(struct kref *kref)
|
|||
drm_mm_put_block(bo->vm_node);
|
||||
}
|
||||
write_unlock(&bdev->vm_lock);
|
||||
ttm_bo_cleanup_refs(bo, 0);
|
||||
ttm_bo_cleanup_refs(bo, false);
|
||||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
write_lock(&bdev->vm_lock);
|
||||
}
|
||||
|
|
@ -528,7 +528,7 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
|
|||
}
|
||||
|
||||
static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
|
||||
int interruptible, int no_wait)
|
||||
bool interruptible, bool no_wait)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
|
|
@ -537,7 +537,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
|
|||
if (bo->mem.mem_type != mem_type)
|
||||
goto out;
|
||||
|
||||
ret = ttm_bo_wait(bo, 0, interruptible, no_wait);
|
||||
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
|
||||
if (ret && ret != -ERESTART) {
|
||||
printk(KERN_ERR "Failed to expire sync object before "
|
||||
"buffer eviction.\n");
|
||||
|
|
@ -566,7 +566,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = ttm_bo_handle_move_mem(bo, &evict_mem, 1, interruptible, no_wait);
|
||||
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait);
|
||||
if (ret) {
|
||||
if (ret != -ERESTART)
|
||||
printk(KERN_ERR "Buffer eviction failed\n");
|
||||
|
|
@ -594,7 +594,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
|
|||
static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem,
|
||||
uint32_t mem_type,
|
||||
int interruptible, int no_wait)
|
||||
bool interruptible, bool no_wait)
|
||||
{
|
||||
struct drm_mm_node *node;
|
||||
struct ttm_buffer_object *entry;
|
||||
|
|
@ -624,7 +624,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
|
|||
kref_get(&entry->list_kref);
|
||||
|
||||
ret =
|
||||
ttm_bo_reserve_locked(entry, interruptible, no_wait, 0, 0);
|
||||
ttm_bo_reserve_locked(entry, interruptible, no_wait, false, 0);
|
||||
|
||||
if (likely(ret == 0))
|
||||
put_count = ttm_bo_del_from_lru(entry);
|
||||
|
|
@ -667,21 +667,21 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
|
||||
int disallow_fixed,
|
||||
uint32_t mem_type,
|
||||
uint32_t mask, uint32_t * res_mask)
|
||||
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
|
||||
bool disallow_fixed,
|
||||
uint32_t mem_type,
|
||||
uint32_t mask, uint32_t * res_mask)
|
||||
{
|
||||
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if ((mask & man->available_caching) == 0)
|
||||
return 0;
|
||||
return false;
|
||||
if (mask & man->default_caching)
|
||||
cur_flags |= man->default_caching;
|
||||
else if (mask & TTM_PL_FLAG_CACHED)
|
||||
|
|
@ -692,7 +692,7 @@ static int ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
|
|||
cur_flags |= TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
*res_mask = cur_flags;
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -704,7 +704,7 @@ static int ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
|
|||
* space.
|
||||
*/
|
||||
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem, int interruptible, int no_wait)
|
||||
struct ttm_mem_reg *mem, bool interruptible, bool no_wait)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_type_manager *man;
|
||||
|
|
@ -714,9 +714,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
uint32_t i;
|
||||
uint32_t mem_type = TTM_PL_SYSTEM;
|
||||
uint32_t cur_flags = 0;
|
||||
int type_found = 0;
|
||||
int type_ok = 0;
|
||||
int has_eagain = 0;
|
||||
bool type_found = false;
|
||||
bool type_ok = false;
|
||||
bool has_eagain = false;
|
||||
struct drm_mm_node *node = NULL;
|
||||
int ret;
|
||||
|
||||
|
|
@ -737,7 +737,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
break;
|
||||
|
||||
if (man->has_type && man->use_type) {
|
||||
type_found = 1;
|
||||
type_found = true;
|
||||
do {
|
||||
ret = drm_mm_pre_get(&man->manager);
|
||||
if (unlikely(ret))
|
||||
|
|
@ -798,7 +798,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
}
|
||||
|
||||
if (ret == -ERESTART)
|
||||
has_eagain = 1;
|
||||
has_eagain = true;
|
||||
}
|
||||
|
||||
ret = (has_eagain) ? -ERESTART : -ENOMEM;
|
||||
|
|
@ -832,7 +832,7 @@ static int ttm_bo_busy(struct ttm_buffer_object *bo)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, int no_wait)
|
||||
int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
|
@ -854,7 +854,7 @@ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, int no_wait)
|
|||
*/
|
||||
|
||||
int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags,
|
||||
int interruptible, int no_wait)
|
||||
bool interruptible, bool no_wait)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
int ret = 0;
|
||||
|
|
@ -869,7 +869,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags,
|
|||
*/
|
||||
|
||||
ttm_bo_busy(bo);
|
||||
ret = ttm_bo_wait(bo, 0, interruptible, no_wait);
|
||||
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
@ -886,7 +886,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t new_mem_flags,
|
|||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = ttm_bo_handle_move_mem(bo, &mem, 0, interruptible, no_wait);
|
||||
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
|
||||
|
||||
out_unlock:
|
||||
if (ret && mem.mm_node) {
|
||||
|
|
@ -908,7 +908,7 @@ static int ttm_bo_mem_compat(struct ttm_mem_reg *mem)
|
|||
}
|
||||
|
||||
int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
|
||||
int interruptible, int no_wait)
|
||||
bool interruptible, bool no_wait)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
@ -995,7 +995,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
|
|||
uint32_t flags,
|
||||
uint32_t page_alignment,
|
||||
unsigned long buffer_start,
|
||||
int interruptible,
|
||||
bool interruptible,
|
||||
struct file *persistant_swap_storage,
|
||||
size_t acc_size,
|
||||
void (*destroy) (struct ttm_buffer_object *))
|
||||
|
|
@ -1031,7 +1031,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
|
|||
bo->buffer_start = buffer_start & PAGE_MASK;
|
||||
bo->priv_flags = 0;
|
||||
bo->mem.flags = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
|
||||
bo->seq_valid = 0;
|
||||
bo->seq_valid = false;
|
||||
bo->persistant_swap_storage = persistant_swap_storage;
|
||||
bo->acc_size = acc_size;
|
||||
|
||||
|
|
@ -1060,7 +1060,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
ret = ttm_buffer_object_validate(bo, interruptible, 0);
|
||||
ret = ttm_buffer_object_validate(bo, interruptible, false);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
|
|
@ -1091,7 +1091,7 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
|
|||
uint32_t flags,
|
||||
uint32_t page_alignment,
|
||||
unsigned long buffer_start,
|
||||
int interruptible,
|
||||
bool interruptible,
|
||||
struct file *persistant_swap_storage,
|
||||
struct ttm_buffer_object **p_bo)
|
||||
{
|
||||
|
|
@ -1101,14 +1101,14 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
|
|||
|
||||
size_t acc_size =
|
||||
ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
|
||||
ret = ttm_mem_global_alloc(mem_glob, acc_size, 0, 0, 0);
|
||||
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
||||
|
||||
if (unlikely(bo == NULL)) {
|
||||
ttm_mem_global_free(mem_glob, acc_size, 0);
|
||||
ttm_mem_global_free(mem_glob, acc_size, false);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -1123,7 +1123,7 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
|
|||
}
|
||||
|
||||
static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
|
||||
uint32_t mem_type, int allow_errors)
|
||||
uint32_t mem_type, bool allow_errors)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
@ -1134,7 +1134,7 @@ static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
|
|||
goto out;
|
||||
|
||||
if (bo->mem.mem_type == mem_type)
|
||||
ret = ttm_bo_evict(bo, mem_type, 0, 0);
|
||||
ret = ttm_bo_evict(bo, mem_type, false, false);
|
||||
|
||||
if (ret) {
|
||||
if (allow_errors) {
|
||||
|
|
@ -1152,7 +1152,7 @@ static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
|
|||
|
||||
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
||||
struct list_head *head,
|
||||
unsigned mem_type, int allow_errors)
|
||||
unsigned mem_type, bool allow_errors)
|
||||
{
|
||||
struct ttm_buffer_object *entry;
|
||||
int ret;
|
||||
|
|
@ -1167,7 +1167,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
|
|||
while (!list_empty(head)) {
|
||||
entry = list_first_entry(head, struct ttm_buffer_object, lru);
|
||||
kref_get(&entry->list_kref);
|
||||
ret = ttm_bo_reserve_locked(entry, 0, 0, 0, 0);
|
||||
ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
|
||||
put_count = ttm_bo_del_from_lru(entry);
|
||||
spin_unlock(&bdev->lru_lock);
|
||||
while (put_count--)
|
||||
|
|
@ -1200,12 +1200,12 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
|||
return ret;
|
||||
}
|
||||
|
||||
man->use_type = 0;
|
||||
man->has_type = 0;
|
||||
man->use_type = false;
|
||||
man->has_type = false;
|
||||
|
||||
ret = 0;
|
||||
if (mem_type > 0) {
|
||||
ttm_bo_force_list_clean(bdev, &man->lru, mem_type, 0);
|
||||
ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
|
||||
|
||||
spin_lock(&bdev->lru_lock);
|
||||
if (drm_mm_clean(&man->manager)) {
|
||||
|
|
@ -1235,7 +1235,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, 1);
|
||||
return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
|
||||
}
|
||||
|
||||
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
||||
|
|
@ -1272,8 +1272,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
|||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
man->has_type = 1;
|
||||
man->use_type = 1;
|
||||
man->has_type = true;
|
||||
man->use_type = true;
|
||||
man->size = p_size;
|
||||
|
||||
INIT_LIST_HEAD(&man->lru);
|
||||
|
|
@ -1290,20 +1290,20 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
|
|||
while (i--) {
|
||||
man = &bdev->man[i];
|
||||
if (man->has_type) {
|
||||
man->use_type = 0;
|
||||
man->use_type = false;
|
||||
if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
|
||||
ret = -EBUSY;
|
||||
printk(KERN_ERR "DRM memory manager type %d "
|
||||
"is not clean.\n", i);
|
||||
}
|
||||
man->has_type = 0;
|
||||
man->has_type = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!cancel_delayed_work(&bdev->wq))
|
||||
flush_scheduled_work();
|
||||
|
||||
while (ttm_bo_delayed_delete(bdev, 1)) ;
|
||||
while (ttm_bo_delayed_delete(bdev, true)) ;
|
||||
|
||||
spin_lock(&bdev->lru_lock);
|
||||
if (list_empty(&bdev->ddestroy))
|
||||
|
|
@ -1365,7 +1365,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
|||
goto out_err2;
|
||||
|
||||
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
|
||||
bdev->nice_mode = 1;
|
||||
bdev->nice_mode = true;
|
||||
INIT_LIST_HEAD(&bdev->ddestroy);
|
||||
INIT_LIST_HEAD(&bdev->swap_lru);
|
||||
bdev->dev_mapping = NULL;
|
||||
|
|
@ -1388,21 +1388,21 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
|||
* buffer object vm functions.
|
||||
*/
|
||||
|
||||
int ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
|
||||
if (mem->mem_type == TTM_PL_SYSTEM)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (man->flags & TTM_MEMTYPE_FLAG_CMA)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (mem->flags & TTM_PL_FLAG_CACHED)
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
|
||||
|
|
@ -1519,7 +1519,7 @@ static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
|
|||
}
|
||||
|
||||
int ttm_bo_wait(struct ttm_buffer_object *bo,
|
||||
int lazy, int interruptible, int no_wait)
|
||||
bool lazy, bool interruptible, bool no_wait)
|
||||
{
|
||||
struct ttm_bo_driver *driver = bo->bdev->driver;
|
||||
void *sync_obj;
|
||||
|
|
@ -1564,8 +1564,8 @@ void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
|
|||
wake_up_all(&bo->event_queue);
|
||||
}
|
||||
|
||||
int ttm_bo_block_reservation(struct ttm_buffer_object *bo, int interruptible,
|
||||
int no_wait)
|
||||
int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
|
||||
bool no_wait)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
@ -1585,7 +1585,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, int interruptible,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, int no_wait)
|
||||
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
|
@ -1594,11 +1594,11 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, int no_wait)
|
|||
* makes sure the lru lists are updated.
|
||||
*/
|
||||
|
||||
ret = ttm_bo_reserve(bo, 1, no_wait, 0, 0);
|
||||
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
mutex_lock(&bo->mutex);
|
||||
ret = ttm_bo_wait(bo, 0, 1, no_wait);
|
||||
ret = ttm_bo_wait(bo, false, true, no_wait);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err0;
|
||||
atomic_inc(&bo->cpu_writers);
|
||||
|
|
@ -1645,10 +1645,10 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
|||
* we slept.
|
||||
*/
|
||||
|
||||
ret = ttm_bo_reserve_locked(bo, 0, 1, 0, 0);
|
||||
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
||||
if (unlikely(ret == -EBUSY)) {
|
||||
spin_unlock(&bdev->lru_lock);
|
||||
ttm_bo_wait_unreserved(bo, 0);
|
||||
ttm_bo_wait_unreserved(bo, false);
|
||||
spin_lock(&bdev->lru_lock);
|
||||
}
|
||||
}
|
||||
|
|
@ -1665,7 +1665,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
|||
*/
|
||||
|
||||
mutex_lock(&bo->mutex);
|
||||
ret = ttm_bo_wait(bo, 0, 0, 0);
|
||||
ret = ttm_bo_wait(bo, false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
|
||||
|
|
@ -1679,7 +1679,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
|||
evict_mem.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
|
||||
evict_mem.mem_type = TTM_PL_SYSTEM;
|
||||
|
||||
ret = ttm_bo_handle_move_mem(bo, &evict_mem, 1, 0, 0);
|
||||
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -169,7 +169,7 @@ struct ttm_buffer_object {
|
|||
unsigned long offset;
|
||||
struct ttm_mem_reg mem;
|
||||
uint32_t val_seq;
|
||||
int seq_valid;
|
||||
bool seq_valid;
|
||||
|
||||
struct list_head lru;
|
||||
struct list_head ddestroy;
|
||||
|
|
@ -251,8 +251,8 @@ static inline struct ttm_buffer_object *ttm_bo_reference(struct
|
|||
* Returns -EBUSY if no_wait is true and the buffer is busy.
|
||||
* Returns -ERESTART if interrupted by a signal.
|
||||
*/
|
||||
extern int ttm_bo_wait(struct ttm_buffer_object *bo, int lazy,
|
||||
int interruptible, int no_wait);
|
||||
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
|
||||
bool interruptible, bool no_wait);
|
||||
/**
|
||||
* ttm_buffer_object_validate
|
||||
*
|
||||
|
|
@ -269,7 +269,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, int lazy,
|
|||
* -ERESTART if interrupted by a signal.
|
||||
*/
|
||||
extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
|
||||
int interruptible, int no_wait);
|
||||
bool interruptible, bool no_wait);
|
||||
/**
|
||||
* ttm_bo_unref
|
||||
*
|
||||
|
|
@ -293,7 +293,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo);
|
|||
* -ERESTART if interrupted by a signal.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, int no_wait);
|
||||
extern int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
|
||||
/**
|
||||
* ttm_bo_synccpu_write_release:
|
||||
*
|
||||
|
|
@ -342,7 +342,7 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
|
|||
uint32_t flags,
|
||||
uint32_t page_alignment,
|
||||
unsigned long buffer_start,
|
||||
int interrubtible,
|
||||
bool interrubtible,
|
||||
struct file *persistant_swap_storage,
|
||||
size_t acc_size,
|
||||
void (*destroy) (struct ttm_buffer_object *));
|
||||
|
|
@ -381,7 +381,7 @@ extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
|
|||
uint32_t flags,
|
||||
uint32_t page_alignment,
|
||||
unsigned long buffer_start,
|
||||
int interruptible,
|
||||
bool interruptible,
|
||||
struct file *persistant_swap_storage,
|
||||
struct ttm_buffer_object **p_bo);
|
||||
|
||||
|
|
@ -483,7 +483,7 @@ extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
|
|||
*/
|
||||
|
||||
static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
|
||||
int *is_iomem)
|
||||
bool *is_iomem)
|
||||
{
|
||||
*is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
|
||||
map->bo_kmap_type == ttm_bo_map_premapped);
|
||||
|
|
@ -571,7 +571,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
|
|||
|
||||
extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
|
||||
const char __user * wbuf, char __user * rbuf,
|
||||
size_t count, loff_t * f_pos, int write);
|
||||
size_t count, loff_t * f_pos, bool write);
|
||||
|
||||
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
|
||||
|
||||
|
|
|
|||
|
|
@ -217,8 +217,8 @@ struct ttm_mem_type_manager {
|
|||
* No protection. Constant from start.
|
||||
*/
|
||||
|
||||
int has_type;
|
||||
int use_type;
|
||||
bool has_type;
|
||||
bool use_type;
|
||||
uint32_t flags;
|
||||
unsigned long gpu_offset;
|
||||
unsigned long io_offset;
|
||||
|
|
@ -318,8 +318,8 @@ struct ttm_bo_driver {
|
|||
* Move a buffer between two memory regions.
|
||||
*/
|
||||
int (*move) (struct ttm_buffer_object * bo,
|
||||
int evict, int interruptible,
|
||||
int no_wait, struct ttm_mem_reg * new_mem);
|
||||
bool evict, bool interruptible,
|
||||
bool no_wait, struct ttm_mem_reg * new_mem);
|
||||
|
||||
/**
|
||||
* struct ttm_bo_driver_member verify_access
|
||||
|
|
@ -348,7 +348,7 @@ struct ttm_bo_driver {
|
|||
|
||||
int (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
|
||||
int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
|
||||
int lazy, int interruptible);
|
||||
bool lazy, bool interruptible);
|
||||
int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
|
||||
void (*sync_obj_unref) (void **sync_obj);
|
||||
void *(*sync_obj_ref) (void *sync_obj);
|
||||
|
|
@ -420,7 +420,7 @@ struct ttm_bo_device {
|
|||
* Protected by load / firstopen / lastclose /unload sync.
|
||||
*/
|
||||
|
||||
int nice_mode;
|
||||
bool nice_mode;
|
||||
struct address_space *dev_mapping;
|
||||
|
||||
/*
|
||||
|
|
@ -564,11 +564,11 @@ extern int ttm_tt_swapout(struct ttm_tt *ttm,
|
|||
* @bdev: Pointer to a struct ttm_bo_device.
|
||||
* @mem: A valid struct ttm_mem_reg.
|
||||
*
|
||||
* Returns 1 if the memory described by @mem is PCI memory,
|
||||
* 0 otherwise.
|
||||
* Returns true if the memory described by @mem is PCI memory,
|
||||
* false otherwise.
|
||||
*/
|
||||
extern int ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem);
|
||||
extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem);
|
||||
|
||||
/**
|
||||
* ttm_bo_mem_space
|
||||
|
|
@ -591,7 +591,7 @@ extern int ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
|
|||
*/
|
||||
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem,
|
||||
int interruptible, int no_wait);
|
||||
bool interruptible, bool no_wait);
|
||||
/**
|
||||
* ttm_bo_wait_for_cpu
|
||||
*
|
||||
|
|
@ -604,7 +604,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
* -ERESTART: An interruptible sleep was interrupted by a signal.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, int no_wait);
|
||||
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
|
||||
|
||||
/**
|
||||
* ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
|
||||
|
|
@ -697,8 +697,8 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
|||
* a signal. Release all buffer reservations and return to user-space.
|
||||
*/
|
||||
extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
||||
int interruptible,
|
||||
int no_wait, int use_sequence, uint32_t sequence);
|
||||
bool interruptible,
|
||||
bool no_wait, bool use_sequence, uint32_t sequence);
|
||||
|
||||
/**
|
||||
* ttm_bo_unreserve
|
||||
|
|
@ -719,7 +719,7 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
|
|||
* a potential deadlock condition backoff.
|
||||
*/
|
||||
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
|
||||
int interruptible);
|
||||
bool interruptible);
|
||||
|
||||
/**
|
||||
* ttm_bo_block_reservation
|
||||
|
|
@ -736,7 +736,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
|
|||
* -ERESTART: If interruptible == 1 and the process received a signal while sleeping.
|
||||
*/
|
||||
extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
|
||||
int interruptible, int no_wait);
|
||||
bool interruptible, bool no_wait);
|
||||
|
||||
/**
|
||||
* ttm_bo_unblock_reservation
|
||||
|
|
@ -770,7 +770,7 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
|
|||
*/
|
||||
|
||||
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
int evict, int no_wait, struct ttm_mem_reg *new_mem);
|
||||
bool evict, bool no_wait, struct ttm_mem_reg *new_mem);
|
||||
|
||||
/**
|
||||
* ttm_bo_move_memcpy
|
||||
|
|
@ -791,8 +791,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
|||
*/
|
||||
|
||||
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
int evict,
|
||||
int no_wait, struct ttm_mem_reg *new_mem);
|
||||
bool evict,
|
||||
bool no_wait, struct ttm_mem_reg *new_mem);
|
||||
|
||||
/**
|
||||
* ttm_bo_free_old_node
|
||||
|
|
@ -825,7 +825,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
|
|||
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
void *sync_obj,
|
||||
void *sync_obj_arg,
|
||||
int evict, int no_wait,
|
||||
bool evict, bool no_wait,
|
||||
struct ttm_mem_reg *new_mem);
|
||||
/**
|
||||
* ttm_io_prot
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
|
|||
}
|
||||
|
||||
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
int evict, int no_wait, struct ttm_mem_reg *new_mem)
|
||||
bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
|
|
@ -181,7 +181,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
|||
}
|
||||
|
||||
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
int evict, int no_wait, struct ttm_mem_reg *new_mem)
|
||||
bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
|
||||
|
|
@ -479,7 +479,7 @@ int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
|
|||
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
void *sync_obj,
|
||||
void *sync_obj_arg,
|
||||
int evict, int no_wait,
|
||||
bool evict, bool no_wait,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device * bdev = bo->bdev;
|
||||
|
|
@ -495,7 +495,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
|||
bo->sync_obj = driver->sync_obj_ref(sync_obj);
|
||||
bo->sync_obj_arg = sync_obj_arg;
|
||||
if (evict) {
|
||||
ret = ttm_bo_wait(bo, 0, 0, 0);
|
||||
ret = ttm_bo_wait(bo, false, false, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
ttm_bo_free_old_node(bo);
|
||||
|
|
|
|||
|
|
@ -91,11 +91,11 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
struct page *page;
|
||||
int ret;
|
||||
int i;
|
||||
int is_iomem;
|
||||
bool is_iomem;
|
||||
unsigned long address = (unsigned long)vmf->virtual_address;
|
||||
int retval = VM_FAULT_NOPAGE;
|
||||
|
||||
ret = ttm_bo_reserve(bo, 1, 0, 0, 0);
|
||||
ret = ttm_bo_reserve(bo, true, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
return VM_FAULT_NOPAGE;
|
||||
|
||||
|
|
@ -107,7 +107,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
*/
|
||||
|
||||
if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
|
||||
ret = ttm_bo_wait(bo, 0, 1, 0);
|
||||
ret = ttm_bo_wait(bo, false, true, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
retval = (ret != -ERESTART) ?
|
||||
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
|
||||
|
|
@ -227,10 +227,10 @@ static unsigned long ttm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|||
struct page *page;
|
||||
int ret;
|
||||
int i;
|
||||
int is_iomem;
|
||||
bool is_iomem;
|
||||
unsigned long retval = NOPFN_REFAULT;
|
||||
|
||||
ret = ttm_bo_reserve(bo, 1, 0, 0, 0);
|
||||
ret = ttm_bo_reserve(bo, true, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
return NOPFN_REFAULT;
|
||||
|
||||
|
|
@ -242,7 +242,7 @@ static unsigned long ttm_bo_vm_nopfn(struct vm_area_struct *vma,
|
|||
*/
|
||||
|
||||
if (bo->priv_flags & TTM_BO_PRIV_FLAG_MOVING) {
|
||||
ret = ttm_bo_wait(bo, 0, 1, 0);
|
||||
ret = ttm_bo_wait(bo, false, true, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
retval = (ret != -ERESTART) ?
|
||||
NOPFN_SIGBUS : NOPFN_REFAULT;
|
||||
|
|
@ -440,7 +440,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
|
|||
|
||||
ssize_t ttm_bo_io(struct ttm_bo_device * bdev, struct file * filp,
|
||||
const char __user * wbuf, char __user * rbuf, size_t count,
|
||||
loff_t * f_pos, int write)
|
||||
loff_t * f_pos, bool write)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_bo_driver *driver;
|
||||
|
|
@ -453,8 +453,8 @@ ssize_t ttm_bo_io(struct ttm_bo_device * bdev, struct file * filp,
|
|||
unsigned int page_offset;
|
||||
char *virtual;
|
||||
int ret;
|
||||
int no_wait = 0;
|
||||
int dummy;
|
||||
bool no_wait = false;
|
||||
bool dummy;
|
||||
|
||||
driver = bo->bdev->driver;
|
||||
read_lock(&bdev->vm_lock);
|
||||
|
|
@ -488,7 +488,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device * bdev, struct file * filp,
|
|||
kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
|
||||
kmap_num = kmap_end - kmap_offset + 1;
|
||||
|
||||
ret = ttm_bo_reserve(bo, 1, no_wait, 0, 0);
|
||||
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
|
|
@ -532,7 +532,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device * bdev, struct file * filp,
|
|||
|
||||
ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object * bo, const char __user * wbuf,
|
||||
char __user * rbuf, size_t count, loff_t * f_pos,
|
||||
int write)
|
||||
bool write)
|
||||
{
|
||||
struct ttm_bo_kmap_obj map;
|
||||
unsigned long kmap_offset;
|
||||
|
|
@ -542,8 +542,8 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object * bo, const char __user * wbuf,
|
|||
unsigned int page_offset;
|
||||
char *virtual;
|
||||
int ret;
|
||||
int no_wait = 0;
|
||||
int dummy;
|
||||
bool no_wait = false;
|
||||
bool dummy;
|
||||
|
||||
kmap_offset = (*f_pos >> PAGE_SHIFT);
|
||||
if (unlikely(kmap_offset) >= bo->num_pages)
|
||||
|
|
@ -558,7 +558,7 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object * bo, const char __user * wbuf,
|
|||
kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
|
||||
kmap_num = kmap_end - kmap_offset + 1;
|
||||
|
||||
ret = ttm_bo_reserve(bo, 1, no_wait, 0, 0);
|
||||
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
|
|
|
|||
|
|
@ -69,11 +69,11 @@ int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
|
|||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
entry->reserved = 0;
|
||||
ret = ttm_bo_reserve(bo, 1, 0, 1, val_seq);
|
||||
ret = ttm_bo_reserve(bo, true, false, true, val_seq);
|
||||
if (ret != 0) {
|
||||
ttm_eu_backoff_reservation(list);
|
||||
if (ret == -EAGAIN) {
|
||||
ret = ttm_bo_wait_unreserved(bo, 1);
|
||||
ret = ttm_bo_wait_unreserved(bo, true);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
goto retry;
|
||||
|
|
@ -84,7 +84,7 @@ int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
|
|||
entry->reserved = 1;
|
||||
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
|
||||
ttm_eu_backoff_reservation(list);
|
||||
ret = ttm_bo_wait_cpu(bo, 0);
|
||||
ret = ttm_bo_wait_cpu(bo, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto retry;
|
||||
|
|
|
|||
|
|
@ -220,10 +220,10 @@ static void ttm_fence_unring(struct ttm_fence_object *fence)
|
|||
write_unlock_irqrestore(&fc->lock, irq_flags);
|
||||
}
|
||||
|
||||
int ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
|
||||
bool ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
int signaled;
|
||||
bool signaled;
|
||||
const struct ttm_fence_driver *driver = ttm_fence_driver(fence);
|
||||
struct ttm_fence_class_manager *fc = ttm_fence_fc(fence);
|
||||
|
||||
|
|
@ -458,7 +458,7 @@ int ttm_fence_object_create(struct ttm_fence_device *fdev,
|
|||
struct ttm_fence_object *fence;
|
||||
int ret;
|
||||
|
||||
ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*fence), 0, 0, 0);
|
||||
ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*fence), false, false, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
printk(KERN_ERR "Out of memory creating fence object\n");
|
||||
return ret;
|
||||
|
|
@ -467,7 +467,7 @@ int ttm_fence_object_create(struct ttm_fence_device *fdev,
|
|||
fence = kmalloc(sizeof(*fence), GFP_KERNEL);
|
||||
if (!fence) {
|
||||
printk(KERN_ERR "Out of memory creating fence object\n");
|
||||
ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), 0);
|
||||
ttm_mem_global_free(fdev->mem_glob, sizeof(*fence), false);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -497,7 +497,7 @@ static void ttm_fence_object_destroy(struct kref *kref)
|
|||
if (fence->destroy)
|
||||
fence->destroy(fence);
|
||||
else {
|
||||
ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*fence), 0);
|
||||
ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*fence), false);
|
||||
kfree(fence);
|
||||
}
|
||||
}
|
||||
|
|
@ -570,7 +570,7 @@ void ttm_fence_object_unref(struct ttm_fence_object **p_fence)
|
|||
* Placement / BO sync object glue.
|
||||
*/
|
||||
|
||||
int ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
|
||||
bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg)
|
||||
{
|
||||
struct ttm_fence_object *fence = (struct ttm_fence_object *)sync_obj;
|
||||
uint32_t fence_types = (uint32_t) (unsigned long)sync_arg;
|
||||
|
|
|
|||
|
|
@ -228,7 +228,7 @@ extern void ttm_fence_object_unref(struct ttm_fence_object **p_fence);
|
|||
* an implicit fence flush.
|
||||
*/
|
||||
|
||||
extern int
|
||||
extern bool
|
||||
ttm_fence_object_signaled(struct ttm_fence_object *fence, uint32_t mask);
|
||||
|
||||
/**
|
||||
|
|
@ -267,7 +267,7 @@ static inline uint32_t ttm_fence_types(const struct ttm_fence_object *fence)
|
|||
* a fence_type argument.
|
||||
*/
|
||||
|
||||
extern int ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg);
|
||||
extern bool ttm_fence_sync_obj_signaled(void *sync_obj, void *sync_arg);
|
||||
extern int ttm_fence_sync_obj_wait(void *sync_obj, void *sync_arg,
|
||||
int lazy, int interruptible);
|
||||
extern int ttm_fence_sync_obj_flush(void *sync_obj, void *sync_arg);
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ static void ttm_fence_user_destroy(struct ttm_fence_object *fence)
|
|||
struct ttm_fence_user_object *ufence =
|
||||
container_of(fence, struct ttm_fence_user_object, fence);
|
||||
|
||||
ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), 0);
|
||||
ttm_mem_global_free(fence->fdev->mem_glob, sizeof(*ufence), false);
|
||||
kfree(ufence);
|
||||
}
|
||||
|
||||
|
|
@ -121,13 +121,13 @@ ttm_fence_user_create(struct ttm_fence_device *fdev,
|
|||
struct ttm_fence_object *tmp;
|
||||
struct ttm_fence_user_object *ufence;
|
||||
|
||||
ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*ufence), 0, 0, 0);
|
||||
ret = ttm_mem_global_alloc(fdev->mem_glob, sizeof(*ufence), false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return -ENOMEM;
|
||||
|
||||
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
|
||||
if (unlikely(ufence == NULL)) {
|
||||
ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), 0);
|
||||
ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -164,7 +164,7 @@ ttm_fence_user_create(struct ttm_fence_device *fdev,
|
|||
ttm_fence_object_unref(&tmp);
|
||||
return ret;
|
||||
out_err0:
|
||||
ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), 0);
|
||||
ttm_mem_global_free(fdev->mem_glob, sizeof(*ufence), false);
|
||||
kfree(ufence);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@
|
|||
* many threads may try to swap out at any given time.
|
||||
*/
|
||||
|
||||
static void ttm_shrink(struct ttm_mem_global *glob, int from_workqueue)
|
||||
static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue)
|
||||
{
|
||||
int ret;
|
||||
struct ttm_mem_shrink *shrink;
|
||||
|
|
@ -82,7 +82,7 @@ static void ttm_shrink_work(struct work_struct *work)
|
|||
struct ttm_mem_global *glob =
|
||||
container_of(work, struct ttm_mem_global, work);
|
||||
|
||||
ttm_shrink(glob, 1);
|
||||
ttm_shrink(glob, true);
|
||||
}
|
||||
|
||||
int ttm_mem_global_init(struct ttm_mem_global *glob)
|
||||
|
|
@ -134,7 +134,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
|
|||
|
||||
static inline void ttm_check_swapping(struct ttm_mem_global *glob)
|
||||
{
|
||||
int needs_swapping;
|
||||
bool needs_swapping;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
needs_swapping = (glob->used_memory > glob->swap_limit ||
|
||||
|
|
@ -148,7 +148,7 @@ static inline void ttm_check_swapping(struct ttm_mem_global *glob)
|
|||
}
|
||||
|
||||
void ttm_mem_global_free(struct ttm_mem_global *glob,
|
||||
uint64_t amount, int himem)
|
||||
uint64_t amount, bool himem)
|
||||
{
|
||||
spin_lock(&glob->lock);
|
||||
glob->used_total_memory -= amount;
|
||||
|
|
@ -159,7 +159,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
|
|||
}
|
||||
|
||||
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
|
||||
uint64_t amount, int himem, int reserve)
|
||||
uint64_t amount, bool himem, bool reserve)
|
||||
{
|
||||
uint64_t limit;
|
||||
uint64_t lomem_limit;
|
||||
|
|
@ -194,16 +194,16 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
|
|||
}
|
||||
|
||||
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
int no_wait, int interruptible, int himem)
|
||||
bool no_wait, bool interruptible, bool himem)
|
||||
{
|
||||
int count = TTM_MEMORY_ALLOC_RETRIES;
|
||||
|
||||
while (unlikely(ttm_mem_global_reserve(glob, memory, himem, 1) != 0)) {
|
||||
while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) != 0)) {
|
||||
if (no_wait)
|
||||
return -ENOMEM;
|
||||
if (unlikely(count-- == 0))
|
||||
return -ENOMEM;
|
||||
ttm_shrink(glob, 0);
|
||||
ttm_shrink(glob, false);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -147,8 +147,8 @@ static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
|
|||
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
|
||||
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
|
||||
extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
int no_wait, int interruptible, int himem);
|
||||
bool no_wait, bool interruptible, bool himem);
|
||||
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
|
||||
uint64_t amount, int himem);
|
||||
uint64_t amount, bool himem);
|
||||
extern size_t ttm_round_pot(size_t size);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -265,12 +265,12 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
|||
}
|
||||
|
||||
read_unlock(&tfile->lock);
|
||||
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), 0, 0, 0);
|
||||
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
|
||||
if (unlikely(ref == NULL)) {
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref), 0);
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref), false);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
@ -295,7 +295,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
|
|||
write_unlock(&tfile->lock);
|
||||
BUG_ON(ret != -EINVAL);
|
||||
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref), 0);
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref), false);
|
||||
kfree(ref);
|
||||
}
|
||||
|
||||
|
|
@ -320,7 +320,7 @@ static void ttm_ref_object_release(struct kref *kref)
|
|||
base->ref_obj_release(base, ref->ref_type);
|
||||
|
||||
ttm_base_object_unref(&ref->obj);
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref), 0);
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref), false);
|
||||
kfree(ref);
|
||||
write_lock(&tfile->lock);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ static void ttm_bo_user_destroy(struct ttm_buffer_object *bo)
|
|||
struct ttm_bo_user_object *user_bo =
|
||||
container_of(bo, struct ttm_bo_user_object, bo);
|
||||
|
||||
ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, 0);
|
||||
ttm_mem_global_free(bo->bdev->mem_glob, bo->acc_size, false);
|
||||
kfree(user_bo);
|
||||
}
|
||||
|
||||
|
|
@ -162,28 +162,28 @@ int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
|
|||
struct ttm_mem_global *mem_glob = bdev->mem_glob;
|
||||
size_t acc_size =
|
||||
ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
|
||||
ret = ttm_mem_global_alloc(mem_glob, acc_size, 0, 0, 0);
|
||||
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
flags = req->placement;
|
||||
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
|
||||
if (unlikely(user_bo == NULL)) {
|
||||
ttm_mem_global_free(mem_glob, acc_size, 0);
|
||||
ttm_mem_global_free(mem_glob, acc_size, false);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bo = &user_bo->bo;
|
||||
ret = ttm_read_lock(lock, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_mem_global_free(mem_glob, acc_size, 0);
|
||||
ttm_mem_global_free(mem_glob, acc_size, false);
|
||||
kfree(user_bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ttm_buffer_object_init(bdev, bo, req->size,
|
||||
ttm_bo_type_device, flags,
|
||||
req->page_alignment, 0, 1,
|
||||
req->page_alignment, 0, true,
|
||||
NULL, acc_size, &ttm_bo_user_destroy);
|
||||
ttm_read_unlock(lock);
|
||||
|
||||
|
|
@ -231,19 +231,19 @@ int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
|
|||
struct ttm_mem_global *mem_glob = bdev->mem_glob;
|
||||
size_t acc_size =
|
||||
ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
|
||||
ret = ttm_mem_global_alloc(mem_glob, acc_size, 0, 0, 0);
|
||||
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
flags = req->placement;
|
||||
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
|
||||
if (unlikely(user_bo == NULL)) {
|
||||
ttm_mem_global_free(mem_glob, acc_size, 0);
|
||||
ttm_mem_global_free(mem_glob, acc_size, false);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = ttm_read_lock(lock, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_mem_global_free(mem_glob, acc_size, 0);
|
||||
ttm_mem_global_free(mem_glob, acc_size, false);
|
||||
kfree(user_bo);
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -251,7 +251,7 @@ int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
|
|||
ret = ttm_buffer_object_init(bdev, bo, req->size,
|
||||
ttm_bo_type_user, flags,
|
||||
req->page_alignment, req->user_address,
|
||||
1, NULL, acc_size, &ttm_bo_user_destroy);
|
||||
true, NULL, acc_size, &ttm_bo_user_destroy);
|
||||
|
||||
/*
|
||||
* Note that the ttm_buffer_object_init function
|
||||
|
|
@ -389,11 +389,11 @@ int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
|
|||
if (unlikely(ret != 0))
|
||||
goto out_err0;
|
||||
|
||||
ret = ttm_bo_reserve(bo, 1, 0, 0, 0);
|
||||
ret = ttm_bo_reserve(bo, true, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err1;
|
||||
|
||||
ret = ttm_bo_wait_cpu(bo, 0);
|
||||
ret = ttm_bo_wait_cpu(bo, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err2;
|
||||
|
||||
|
|
@ -405,7 +405,7 @@ int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
|
|||
|
||||
bo->proposed_flags = (bo->proposed_flags | req->set_placement)
|
||||
& ~req->clr_placement;
|
||||
ret = ttm_buffer_object_validate(bo, 1, 0);
|
||||
ret = ttm_buffer_object_validate(bo, true, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err2;
|
||||
|
||||
|
|
@ -433,14 +433,14 @@ int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
|
|||
}
|
||||
|
||||
ret =
|
||||
ttm_bo_block_reservation(bo, 1,
|
||||
ttm_bo_block_reservation(bo, true,
|
||||
arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
mutex_lock(&bo->mutex);
|
||||
ret = ttm_bo_wait(bo,
|
||||
arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
|
||||
1, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
|
||||
true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
|
||||
mutex_unlock(&bo->mutex);
|
||||
ttm_bo_unblock_reservation(bo);
|
||||
out:
|
||||
|
|
|
|||
|
|
@ -155,7 +155,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
|
|||
set_page_dirty_lock(page);
|
||||
|
||||
ttm->pages[i] = NULL;
|
||||
ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, 0);
|
||||
ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
|
||||
put_page(page);
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
|
|
@ -178,13 +178,13 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
|
|||
|
||||
if (PageHighMem(p)) {
|
||||
ret =
|
||||
ttm_mem_global_alloc(mem_glob, PAGE_SIZE, 0, 0, 1);
|
||||
ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, true);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
ttm->pages[--ttm->first_himem_page] = p;
|
||||
} else {
|
||||
ret =
|
||||
ttm_mem_global_alloc(mem_glob, PAGE_SIZE, 0, 0, 0);
|
||||
ttm_mem_global_alloc(mem_glob, PAGE_SIZE, false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
ttm->pages[++ttm->last_lomem_page] = p;
|
||||
|
|
@ -398,7 +398,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
|
|||
* Account user pages as lowmem pages for now.
|
||||
*/
|
||||
|
||||
ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, 0, 0, 0);
|
||||
ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, false, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
|
@ -409,7 +409,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
|
|||
|
||||
if (ret != num_pages && write) {
|
||||
ttm_tt_free_user_pages(ttm);
|
||||
ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, 0);
|
||||
ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue