mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-03 11:30:21 +01:00
util: remove LIST_IS_EMPTY macro
Just use the inlined function directly. The new function was introduced
in addcf410.
Reviewed-by: Eric Engestrom <eric@engestrom.ch>
This commit is contained in:
parent
7f106a2b5d
commit
1909bc526d
25 changed files with 53 additions and 56 deletions
|
|
@ -78,7 +78,7 @@ void etna_bo_cache_cleanup(struct etna_bo_cache *cache, time_t time)
|
|||
struct etna_bo_bucket *bucket = &cache->cache_bucket[i];
|
||||
struct etna_bo *bo;
|
||||
|
||||
while (!LIST_IS_EMPTY(&bucket->list)) {
|
||||
while (!list_is_empty(&bucket->list)) {
|
||||
bo = LIST_ENTRY(struct etna_bo, bucket->list.next, list);
|
||||
|
||||
/* keep things in cache for at least 1 second: */
|
||||
|
|
@ -125,7 +125,7 @@ static struct etna_bo *find_in_bucket(struct etna_bo_bucket *bucket, uint32_t fl
|
|||
|
||||
pthread_mutex_lock(&etna_drm_table_lock);
|
||||
|
||||
if (LIST_IS_EMPTY(&bucket->list))
|
||||
if (list_is_empty(&bucket->list))
|
||||
goto out_unlock;
|
||||
|
||||
LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &bucket->list, list) {
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
|
|||
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
|
||||
struct fd_bo *bo;
|
||||
|
||||
while (!LIST_IS_EMPTY(&bucket->list)) {
|
||||
while (!list_is_empty(&bucket->list)) {
|
||||
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
|
||||
|
||||
/* keep things in cache for at least 1 second: */
|
||||
|
|
@ -141,7 +141,7 @@ static struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
|
|||
* (MRU, since likely to be in GPU cache), rather than head (LRU)..
|
||||
*/
|
||||
pthread_mutex_lock(&table_lock);
|
||||
if (!LIST_IS_EMPTY(&bucket->list)) {
|
||||
if (!list_is_empty(&bucket->list)) {
|
||||
bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
|
||||
/* TODO check for compatible flags? */
|
||||
if (is_idle(bo)) {
|
||||
|
|
|
|||
|
|
@ -1431,7 +1431,7 @@ hud_parse_env_var(struct hud_context *hud, struct pipe_screen *screen,
|
|||
env += num;
|
||||
|
||||
strip_hyphens(s);
|
||||
if (added && !LIST_IS_EMPTY(&pane->graph_list)) {
|
||||
if (added && !list_is_empty(&pane->graph_list)) {
|
||||
struct hud_graph *graph;
|
||||
graph = LIST_ENTRY(struct hud_graph, pane->graph_list.prev, head);
|
||||
strncpy(graph->name, s, sizeof(graph->name)-1);
|
||||
|
|
|
|||
|
|
@ -391,7 +391,7 @@ pb_debug_manager_create_buffer(struct pb_manager *_mgr,
|
|||
#if 0
|
||||
mtx_lock(&mgr->mutex);
|
||||
debug_printf("%s: failed to create buffer\n", __FUNCTION__);
|
||||
if(!LIST_IS_EMPTY(&mgr->list))
|
||||
if(!list_is_empty(&mgr->list))
|
||||
pb_debug_manager_dump_locked(mgr);
|
||||
mtx_unlock(&mgr->mutex);
|
||||
#endif
|
||||
|
|
@ -444,7 +444,7 @@ pb_debug_manager_destroy(struct pb_manager *_mgr)
|
|||
struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
|
||||
|
||||
mtx_lock(&mgr->mutex);
|
||||
if(!LIST_IS_EMPTY(&mgr->list)) {
|
||||
if(!list_is_empty(&mgr->list)) {
|
||||
debug_printf("%s: unfreed buffers\n", __FUNCTION__);
|
||||
pb_debug_manager_dump_locked(mgr);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ pb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
|
|||
static void
|
||||
pb_slabs_reclaim_locked(struct pb_slabs *slabs)
|
||||
{
|
||||
while (!LIST_IS_EMPTY(&slabs->reclaim)) {
|
||||
while (!list_is_empty(&slabs->reclaim)) {
|
||||
struct pb_slab_entry *entry =
|
||||
LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
|
||||
|
||||
|
|
@ -114,20 +114,20 @@ pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
|
|||
/* If there is no candidate slab at all, or the first slab has no free
|
||||
* entries, try reclaiming entries.
|
||||
*/
|
||||
if (LIST_IS_EMPTY(&group->slabs) ||
|
||||
LIST_IS_EMPTY(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
|
||||
if (list_is_empty(&group->slabs) ||
|
||||
list_is_empty(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
|
||||
pb_slabs_reclaim_locked(slabs);
|
||||
|
||||
/* Remove slabs without free entries. */
|
||||
while (!LIST_IS_EMPTY(&group->slabs)) {
|
||||
while (!list_is_empty(&group->slabs)) {
|
||||
slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head);
|
||||
if (!LIST_IS_EMPTY(&slab->free))
|
||||
if (!list_is_empty(&slab->free))
|
||||
break;
|
||||
|
||||
list_del(&slab->head);
|
||||
}
|
||||
|
||||
if (LIST_IS_EMPTY(&group->slabs)) {
|
||||
if (list_is_empty(&group->slabs)) {
|
||||
/* Drop the mutex temporarily to prevent a deadlock where the allocation
|
||||
* calls back into slab functions (most likely to happen for
|
||||
* pb_slab_reclaim if memory is low).
|
||||
|
|
@ -241,7 +241,7 @@ pb_slabs_deinit(struct pb_slabs *slabs)
|
|||
/* Reclaim all slab entries (even those that are still in flight). This
|
||||
* implicitly calls slab_free for everything.
|
||||
*/
|
||||
while (!LIST_IS_EMPTY(&slabs->reclaim)) {
|
||||
while (!list_is_empty(&slabs->reclaim)) {
|
||||
struct pb_slab_entry *entry =
|
||||
LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
|
||||
pb_slab_reclaim(slabs, entry);
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ util_dirty_surfaces_use_levels_for_sampling(struct pipe_context *pipe, struct ut
|
|||
static inline void
|
||||
util_dirty_surfaces_use_for_sampling_with(struct pipe_context *pipe, struct util_dirty_surfaces *dss, struct pipe_sampler_view *psv, struct pipe_sampler_state *pss, util_dirty_surface_flush_t flush)
|
||||
{
|
||||
if(!LIST_IS_EMPTY(&dss->dirty_list))
|
||||
if(!list_is_empty(&dss->dirty_list))
|
||||
util_dirty_surfaces_use_levels_for_sampling(pipe, dss, (unsigned)pss->min_lod + psv->u.tex.first_level,
|
||||
MIN2((unsigned)ceilf(pss->max_lod) + psv->u.tex.first_level, psv->u.tex.last_level), flush);
|
||||
}
|
||||
|
|
@ -99,20 +99,20 @@ util_dirty_surface_init(struct util_dirty_surface *ds)
|
|||
static inline boolean
|
||||
util_dirty_surface_is_dirty(struct util_dirty_surface *ds)
|
||||
{
|
||||
return !LIST_IS_EMPTY(&ds->dirty_list);
|
||||
return !list_is_empty(&ds->dirty_list);
|
||||
}
|
||||
|
||||
static inline void
|
||||
util_dirty_surface_set_dirty(struct util_dirty_surfaces *dss, struct util_dirty_surface *ds)
|
||||
{
|
||||
if(LIST_IS_EMPTY(&ds->dirty_list))
|
||||
if(list_is_empty(&ds->dirty_list))
|
||||
list_addtail(&ds->dirty_list, &dss->dirty_list);
|
||||
}
|
||||
|
||||
static inline void
|
||||
util_dirty_surface_set_clean(struct util_dirty_surfaces *dss, struct util_dirty_surface *ds)
|
||||
{
|
||||
if(!LIST_IS_EMPTY(&ds->dirty_list))
|
||||
if(!list_is_empty(&ds->dirty_list))
|
||||
list_delinit(&ds->dirty_list);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ etna_hw_get_query_result(struct etna_context *ctx, struct etna_query *q,
|
|||
struct etna_resource *rsc = etna_resource(hq->prsc);
|
||||
const struct etna_hw_sample_provider *p = hq->provider;
|
||||
|
||||
assert(LIST_IS_EMPTY(&hq->node));
|
||||
assert(list_is_empty(&hq->node));
|
||||
|
||||
if (!wait) {
|
||||
int ret;
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ fd_acc_get_query_result(struct fd_context *ctx, struct fd_query *q,
|
|||
|
||||
DBG("%p: wait=%d, active=%d", q, wait, q->active);
|
||||
|
||||
assert(LIST_IS_EMPTY(&aq->node));
|
||||
assert(list_is_empty(&aq->node));
|
||||
|
||||
/* if !wait, then check the last sample (the one most likely to
|
||||
* not be ready yet) and bail if it is not ready:
|
||||
|
|
|
|||
|
|
@ -184,10 +184,10 @@ fd_hw_get_query_result(struct fd_context *ctx, struct fd_query *q,
|
|||
|
||||
DBG("%p: wait=%d, active=%d", q, wait, q->active);
|
||||
|
||||
if (LIST_IS_EMPTY(&hq->periods))
|
||||
if (list_is_empty(&hq->periods))
|
||||
return true;
|
||||
|
||||
assert(LIST_IS_EMPTY(&hq->list));
|
||||
assert(list_is_empty(&hq->list));
|
||||
assert(!hq->period);
|
||||
|
||||
/* if !wait, then check the last sample (the one most likely to
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ nouveau_fence_del(struct nouveau_fence *fence)
|
|||
}
|
||||
}
|
||||
|
||||
if (!LIST_IS_EMPTY(&fence->work)) {
|
||||
if (!list_is_empty(&fence->work)) {
|
||||
debug_printf("WARNING: deleting fence with work still pending !\n");
|
||||
nouveau_fence_trigger_work(fence);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -181,10 +181,10 @@ nouveau_mm_allocate(struct nouveau_mman *cache,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (!LIST_IS_EMPTY(&bucket->used)) {
|
||||
if (!list_is_empty(&bucket->used)) {
|
||||
slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
|
||||
} else {
|
||||
if (LIST_IS_EMPTY(&bucket->free)) {
|
||||
if (list_is_empty(&bucket->free)) {
|
||||
mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
|
||||
}
|
||||
slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
|
||||
|
|
@ -284,8 +284,8 @@ nouveau_mm_destroy(struct nouveau_mman *cache)
|
|||
return;
|
||||
|
||||
for (i = 0; i < MM_NUM_BUCKETS; ++i) {
|
||||
if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
|
||||
!LIST_IS_EMPTY(&cache->bucket[i].full))
|
||||
if (!list_is_empty(&cache->bucket[i].used) ||
|
||||
!list_is_empty(&cache->bucket[i].full))
|
||||
debug_printf("WARNING: destroying GPU memory cache "
|
||||
"with some buffers still in use\n");
|
||||
|
||||
|
|
|
|||
|
|
@ -298,7 +298,7 @@ void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
|
|||
void r600_preflush_suspend_features(struct r600_common_context *ctx)
|
||||
{
|
||||
/* suspend queries */
|
||||
if (!LIST_IS_EMPTY(&ctx->active_queries))
|
||||
if (!list_is_empty(&ctx->active_queries))
|
||||
r600_suspend_queries(ctx);
|
||||
|
||||
ctx->streamout.suspended = false;
|
||||
|
|
@ -316,7 +316,7 @@ void r600_postflush_resume_features(struct r600_common_context *ctx)
|
|||
}
|
||||
|
||||
/* resume queries */
|
||||
if (!LIST_IS_EMPTY(&ctx->active_queries))
|
||||
if (!list_is_empty(&ctx->active_queries))
|
||||
r600_resume_queries(ctx);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ struct gfx10_sh_query {
|
|||
|
||||
static void emit_shader_query(struct si_context *sctx)
|
||||
{
|
||||
assert(!LIST_IS_EMPTY(&sctx->shader_query_buffers));
|
||||
assert(!list_is_empty(&sctx->shader_query_buffers));
|
||||
|
||||
struct gfx10_sh_query_buffer *qbuf = list_last_entry(&sctx->shader_query_buffers,
|
||||
struct gfx10_sh_query_buffer, list);
|
||||
|
|
@ -119,7 +119,7 @@ static bool gfx10_alloc_query_buffer(struct si_context *sctx)
|
|||
|
||||
struct gfx10_sh_query_buffer *qbuf = NULL;
|
||||
|
||||
if (!LIST_IS_EMPTY(&sctx->shader_query_buffers)) {
|
||||
if (!list_is_empty(&sctx->shader_query_buffers)) {
|
||||
qbuf = list_last_entry(&sctx->shader_query_buffers,
|
||||
struct gfx10_sh_query_buffer, list);
|
||||
if (qbuf->head + sizeof(struct gfx10_sh_query_buffer_mem) <= qbuf->buf->b.b.width0)
|
||||
|
|
@ -510,7 +510,7 @@ void gfx10_init_query(struct si_context *sctx)
|
|||
|
||||
void gfx10_destroy_query(struct si_context *sctx)
|
||||
{
|
||||
while (!LIST_IS_EMPTY(&sctx->shader_query_buffers)) {
|
||||
while (!list_is_empty(&sctx->shader_query_buffers)) {
|
||||
struct gfx10_sh_query_buffer *qbuf =
|
||||
list_first_entry(&sctx->shader_query_buffers,
|
||||
struct gfx10_sh_query_buffer, list);
|
||||
|
|
|
|||
|
|
@ -154,7 +154,7 @@ void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
|
|||
}
|
||||
|
||||
if (ctx->has_graphics) {
|
||||
if (!LIST_IS_EMPTY(&ctx->active_queries))
|
||||
if (!list_is_empty(&ctx->active_queries))
|
||||
si_suspend_queries(ctx);
|
||||
|
||||
ctx->streamout.suspended = false;
|
||||
|
|
@ -426,7 +426,7 @@ void si_begin_new_gfx_cs(struct si_context *ctx)
|
|||
si_streamout_buffers_dirty(ctx);
|
||||
}
|
||||
|
||||
if (!LIST_IS_EMPTY(&ctx->active_queries))
|
||||
if (!list_is_empty(&ctx->active_queries))
|
||||
si_resume_queries(ctx);
|
||||
|
||||
assert(!ctx->gfx_cs->prev_dw);
|
||||
|
|
|
|||
|
|
@ -256,7 +256,7 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
|
|||
}
|
||||
}
|
||||
|
||||
if (!LIST_IS_EMPTY(&cache->empty)) {
|
||||
if (!list_is_empty(&cache->empty)) {
|
||||
/* An empty entry has no surface associated with it.
|
||||
* Use the first empty entry.
|
||||
*/
|
||||
|
|
@ -266,7 +266,7 @@ svga_screen_cache_add(struct svga_screen *svgascreen,
|
|||
/* Remove from LRU list */
|
||||
list_del(&entry->head);
|
||||
}
|
||||
else if (!LIST_IS_EMPTY(&cache->unused)) {
|
||||
else if (!list_is_empty(&cache->unused)) {
|
||||
/* free the last used buffer and reuse its entry */
|
||||
entry = LIST_ENTRY(struct svga_host_surface_cache_entry,
|
||||
cache->unused.prev, head);
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ NineBaseTexture9_SetLOD( struct NineBaseTexture9 *This,
|
|||
0 : This->base.info.last_level;
|
||||
This->managed.lod = MIN2(LODNew, max_level);
|
||||
|
||||
if (This->managed.lod != old && This->bind_count && LIST_IS_EMPTY(&This->list))
|
||||
if (This->managed.lod != old && This->bind_count && list_is_empty(&This->list))
|
||||
list_add(&This->list, &This->base.base.device->update_textures);
|
||||
|
||||
return old;
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ NineBindTextureToDevice( struct NineDevice9 *device,
|
|||
struct NineBaseTexture9 *old = *slot;
|
||||
|
||||
if (tex) {
|
||||
if ((tex->managed.dirty | tex->dirty_mip) && LIST_IS_EMPTY(&tex->list))
|
||||
if ((tex->managed.dirty | tex->dirty_mip) && list_is_empty(&tex->list))
|
||||
list_add(&tex->list, &device->update_textures);
|
||||
|
||||
tex->bind_count++;
|
||||
|
|
@ -163,7 +163,7 @@ NineBaseTexture9_Dump( struct NineBaseTexture9 *This ) { }
|
|||
|
||||
#define BASETEX_REGISTER_UPDATE(t) do { \
|
||||
if (((t)->managed.dirty | ((t)->dirty_mip)) && (t)->bind_count) \
|
||||
if (LIST_IS_EMPTY(&(t)->list)) \
|
||||
if (list_is_empty(&(t)->list)) \
|
||||
list_add(&(t)->list, &(t)->base.base.device->update_textures); \
|
||||
} while(0)
|
||||
|
||||
|
|
|
|||
|
|
@ -246,7 +246,7 @@ NineBuffer9_Lock( struct NineBuffer9 *This,
|
|||
/* Tests on Win: READONLY doesn't wait for the upload */
|
||||
if (!(Flags & D3DLOCK_READONLY)) {
|
||||
if (!This->managed.dirty) {
|
||||
assert(LIST_IS_EMPTY(&This->managed.list));
|
||||
assert(list_is_empty(&This->managed.list));
|
||||
This->managed.dirty = TRUE;
|
||||
This->managed.dirty_box = box;
|
||||
if (p_atomic_read(&This->managed.pending_upload))
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ NineBindBufferToDevice( struct NineDevice9 *device,
|
|||
struct NineBuffer9 *old = *slot;
|
||||
|
||||
if (buf) {
|
||||
if ((buf->managed.dirty) && LIST_IS_EMPTY(&buf->managed.list))
|
||||
if ((buf->managed.dirty) && list_is_empty(&buf->managed.list))
|
||||
list_add(&buf->managed.list, &device->update_buffers);
|
||||
buf->bind_count++;
|
||||
}
|
||||
|
|
@ -140,7 +140,7 @@ NineBuffer9_SetDirty( struct NineBuffer9 *This );
|
|||
|
||||
#define BASEBUF_REGISTER_UPDATE(b) { \
|
||||
if ((b)->managed.dirty && (b)->bind_count) \
|
||||
if (LIST_IS_EMPTY(&(b)->managed.list)) \
|
||||
if (list_is_empty(&(b)->managed.list)) \
|
||||
list_add(&(b)->managed.list, &(b)->base.base.device->update_buffers); \
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -820,7 +820,7 @@ static void enc_ClearBframes(omx_base_PortType *port, struct input_buf_private *
|
|||
vid_enc_PrivateType *priv = comp->pComponentPrivate;
|
||||
struct encode_task *task;
|
||||
|
||||
if (LIST_IS_EMPTY(&priv->b_frames))
|
||||
if (list_is_empty(&priv->b_frames))
|
||||
return;
|
||||
|
||||
task = LIST_ENTRY(struct encode_task, priv->b_frames.prev, list);
|
||||
|
|
@ -928,7 +928,7 @@ static OMX_ERRORTYPE vid_enc_EncodeFrame(omx_base_PortType *port, OMX_BUFFERHEAD
|
|||
enc_MoveTasks(&priv->b_frames, &inp->tasks);
|
||||
}
|
||||
|
||||
if (LIST_IS_EMPTY(&inp->tasks))
|
||||
if (list_is_empty(&inp->tasks))
|
||||
return port->ReturnBufferFunction(port, buf);
|
||||
else
|
||||
return base_port_SendBufferFunction(port, buf);
|
||||
|
|
|
|||
|
|
@ -268,7 +268,7 @@ static void enc_ClearBframes(vid_enc_PrivateType * priv, struct input_buf_privat
|
|||
{
|
||||
struct encode_task *task;
|
||||
|
||||
if (LIST_IS_EMPTY(&priv->b_frames))
|
||||
if (list_is_empty(&priv->b_frames))
|
||||
return;
|
||||
|
||||
task = LIST_ENTRY(struct encode_task, priv->b_frames.prev, list);
|
||||
|
|
@ -382,7 +382,7 @@ static OMX_ERRORTYPE encode_frame(vid_enc_PrivateType * priv, OMX_BUFFERHEADERTY
|
|||
enc_MoveTasks(&priv->b_frames, &inp->tasks);
|
||||
}
|
||||
|
||||
if (LIST_IS_EMPTY(&inp->tasks)) {
|
||||
if (list_is_empty(&inp->tasks)) {
|
||||
return h264e_buffer_emptied(priv, in_buf);
|
||||
} else {
|
||||
return h264e_manage_buffers(priv);
|
||||
|
|
|
|||
|
|
@ -130,7 +130,7 @@ void vid_enc_BufferEncoded_common(vid_enc_PrivateType * priv, OMX_BUFFERHEADERTY
|
|||
unsigned size;
|
||||
|
||||
#if ENABLE_ST_OMX_BELLAGIO
|
||||
if (!inp || LIST_IS_EMPTY(&inp->tasks)) {
|
||||
if (!inp || list_is_empty(&inp->tasks)) {
|
||||
input->nFilledLen = 0; /* mark buffer as empty */
|
||||
enc_MoveTasks(&priv->used_tasks, &inp->tasks);
|
||||
return;
|
||||
|
|
@ -182,7 +182,7 @@ struct encode_task *enc_NeedTask_common(vid_enc_PrivateType * priv, OMX_VIDEO_PO
|
|||
struct pipe_video_buffer templat = {};
|
||||
struct encode_task *task;
|
||||
|
||||
if (!LIST_IS_EMPTY(&priv->free_tasks)) {
|
||||
if (!list_is_empty(&priv->free_tasks)) {
|
||||
task = LIST_ENTRY(struct encode_task, priv->free_tasks.next, list);
|
||||
list_del(&task->list);
|
||||
return task;
|
||||
|
|
|
|||
|
|
@ -295,7 +295,7 @@ static void radeon_bomgr_free_va(const struct radeon_info *info,
|
|||
if ((va + size) == heap->start) {
|
||||
heap->start = va;
|
||||
/* Delete uppermost hole if it reaches the new top */
|
||||
if (!LIST_IS_EMPTY(&heap->holes)) {
|
||||
if (!list_is_empty(&heap->holes)) {
|
||||
hole = container_of(heap->holes.next, hole, list);
|
||||
if ((hole->offset + hole->size) == va) {
|
||||
heap->start = hole->offset;
|
||||
|
|
|
|||
|
|
@ -353,7 +353,7 @@ free_zombie_sampler_views(struct st_context *st)
|
|||
{
|
||||
struct st_zombie_sampler_view_node *entry, *next;
|
||||
|
||||
if (LIST_IS_EMPTY(&st->zombie_sampler_views.list.node)) {
|
||||
if (list_is_empty(&st->zombie_sampler_views.list.node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -369,7 +369,7 @@ free_zombie_sampler_views(struct st_context *st)
|
|||
free(entry);
|
||||
}
|
||||
|
||||
assert(LIST_IS_EMPTY(&st->zombie_sampler_views.list.node));
|
||||
assert(list_is_empty(&st->zombie_sampler_views.list.node));
|
||||
|
||||
simple_mtx_unlock(&st->zombie_sampler_views.mutex);
|
||||
}
|
||||
|
|
@ -383,7 +383,7 @@ free_zombie_shaders(struct st_context *st)
|
|||
{
|
||||
struct st_zombie_shader_node *entry, *next;
|
||||
|
||||
if (LIST_IS_EMPTY(&st->zombie_shaders.list.node)) {
|
||||
if (list_is_empty(&st->zombie_shaders.list.node)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -418,7 +418,7 @@ free_zombie_shaders(struct st_context *st)
|
|||
free(entry);
|
||||
}
|
||||
|
||||
assert(LIST_IS_EMPTY(&st->zombie_shaders.list.node));
|
||||
assert(list_is_empty(&st->zombie_shaders.list.node));
|
||||
|
||||
simple_mtx_unlock(&st->zombie_shaders.mutex);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -161,9 +161,6 @@ static inline void list_validate(const struct list_head *list)
|
|||
#define LIST_ENTRY(__type, __item, __field) \
|
||||
((__type *)(((char *)(__item)) - offsetof(__type, __field)))
|
||||
|
||||
#define LIST_IS_EMPTY(__list) \
|
||||
((__list)->next == (__list))
|
||||
|
||||
/**
|
||||
* Cast from a pointer to a member of a struct back to the containing struct.
|
||||
*
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue