gallium/pb_bufmgr_cache: use the new pb_cache module

Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Reviewed-by: Edward O'Callaghan <eocallaghan@alterapraxis.com>
Acked-by: Michel Dänzer <michel.daenzer@amd.com>
This commit is contained in:
Marek Olšák 2015-12-07 00:23:06 +01:00
parent 2b396eeed9
commit 3fbf250dfa

View file

@ -43,15 +43,7 @@
#include "pb_buffer.h"
#include "pb_bufmgr.h"
/**
* Convenience macro (type safe).
*/
#define SUPER(__derived) (&(__derived)->base)
struct pb_cache_manager;
#include "pb_cache.h"
/**
@ -60,31 +52,17 @@ struct pb_cache_manager;
struct pb_cache_buffer
{
struct pb_buffer base;
struct pb_buffer *buffer;
struct pb_cache_manager *mgr;
/** Caching time interval */
int64_t start, end;
struct list_head head;
struct pb_cache_entry cache_entry;
};
struct pb_cache_manager
{
struct pb_manager base;
struct pb_manager *provider;
unsigned usecs;
pipe_mutex mutex;
struct list_head delayed;
pb_size numDelayed;
float size_factor;
unsigned bypass_usage;
uint64_t cache_size, max_cache_size;
struct pb_cache cache;
};
@ -104,76 +82,29 @@ pb_cache_manager(struct pb_manager *mgr)
}
static void
_pb_cache_manager_remove_buffer_locked(struct pb_cache_buffer *buf)
{
struct pb_cache_manager *mgr = buf->mgr;
if (buf->head.next) {
LIST_DEL(&buf->head);
assert(mgr->numDelayed);
--mgr->numDelayed;
mgr->cache_size -= buf->base.size;
}
buf->mgr = NULL;
}
void
pb_cache_manager_remove_buffer(struct pb_buffer *pb_buf)
{
struct pb_cache_buffer *buf = (struct pb_cache_buffer*)pb_buf;
struct pb_cache_manager *mgr = buf->mgr;
struct pb_cache_buffer *buf = pb_cache_buffer(pb_buf);
if (!mgr)
return;
pipe_mutex_lock(mgr->mutex);
_pb_cache_manager_remove_buffer_locked(buf);
pipe_mutex_unlock(mgr->mutex);
/* the buffer won't be added if mgr is NULL */
buf->mgr = NULL;
}
/**
* Actually destroy the buffer.
*/
static inline void
_pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
static void
_pb_cache_buffer_destroy(struct pb_buffer *pb_buf)
{
if (buf->mgr)
_pb_cache_manager_remove_buffer_locked(buf);
struct pb_cache_buffer *buf = pb_cache_buffer(pb_buf);
assert(!pipe_is_referenced(&buf->base.reference));
pb_reference(&buf->buffer, NULL);
FREE(buf);
}
/**
* Free as many cache buffers from the list head as possible.
*/
static void
_pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr)
{
struct list_head *curr, *next;
struct pb_cache_buffer *buf;
int64_t now;
now = os_time_get();
curr = mgr->delayed.next;
next = curr->next;
while(curr != &mgr->delayed) {
buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
if(!os_time_timeout(buf->start, buf->end, now))
break;
_pb_cache_buffer_destroy(buf);
curr = next;
next = curr->next;
}
}
static void
pb_cache_buffer_destroy(struct pb_buffer *_buf)
{
@ -186,25 +117,7 @@ pb_cache_buffer_destroy(struct pb_buffer *_buf)
return;
}
pipe_mutex_lock(mgr->mutex);
assert(!pipe_is_referenced(&buf->base.reference));
_pb_cache_buffer_list_check_free(mgr);
/* Directly release any buffer that exceeds the limit. */
if (mgr->cache_size + buf->base.size > mgr->max_cache_size) {
pb_reference(&buf->buffer, NULL);
FREE(buf);
pipe_mutex_unlock(mgr->mutex);
return;
}
buf->start = os_time_get();
buf->end = buf->start + mgr->usecs;
LIST_ADDTAIL(&buf->head, &mgr->delayed);
++mgr->numDelayed;
mgr->cache_size += buf->base.size;
pipe_mutex_unlock(mgr->mutex);
pb_cache_add_buffer(&buf->cache_entry);
}
@ -265,40 +178,24 @@ pb_cache_buffer_vtbl = {
};
static inline int
pb_cache_is_buffer_compat(struct pb_cache_buffer *buf,
pb_size size,
const struct pb_desc *desc)
static bool
pb_cache_can_reclaim_buffer(struct pb_buffer *_buf)
{
if (desc->usage & buf->mgr->bypass_usage)
return 0;
if(buf->base.size < size)
return 0;
/* be lenient with size */
if(buf->base.size > (unsigned) (buf->mgr->size_factor * size))
return 0;
if(!pb_check_alignment(desc->alignment, buf->base.alignment))
return 0;
if(!pb_check_usage(desc->usage, buf->base.usage))
return 0;
struct pb_cache_buffer *buf = pb_cache_buffer(_buf);
if (buf->mgr->provider->is_buffer_busy) {
if (buf->mgr->provider->is_buffer_busy(buf->mgr->provider, buf->buffer))
return -1;
return false;
} else {
void *ptr = pb_map(buf->buffer, PB_USAGE_DONTBLOCK, NULL);
if (!ptr)
return -1;
return false;
pb_unmap(buf->buffer);
}
return 1;
return true;
}
@ -309,63 +206,15 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
{
struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
struct pb_cache_buffer *buf;
struct pb_cache_buffer *curr_buf;
struct list_head *curr, *next;
int64_t now;
int ret = 0;
pipe_mutex_lock(mgr->mutex);
buf = NULL;
curr = mgr->delayed.next;
next = curr->next;
/* search in the expired buffers, freeing them in the process */
now = os_time_get();
while(curr != &mgr->delayed) {
curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
if(!buf && (ret = pb_cache_is_buffer_compat(curr_buf, size, desc) > 0))
buf = curr_buf;
else if(os_time_timeout(curr_buf->start, curr_buf->end, now))
_pb_cache_buffer_destroy(curr_buf);
else
/* This buffer (and all hereafter) are still hot in cache */
break;
if (ret == -1)
break;
curr = next;
next = curr->next;
}
/* keep searching in the hot buffers */
if(!buf && ret != -1) {
while(curr != &mgr->delayed) {
curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
ret = pb_cache_is_buffer_compat(curr_buf, size, desc);
if (ret > 0) {
buf = curr_buf;
break;
}
if (ret == -1)
break;
/* no need to check the timeout here */
curr = next;
next = curr->next;
}
}
if (buf) {
mgr->cache_size -= buf->base.size;
LIST_DEL(&buf->head);
--mgr->numDelayed;
pipe_mutex_unlock(mgr->mutex);
/* Increase refcount */
pipe_reference_init(&buf->base.reference, 1);
/* get a buffer from the cache */
buf = (struct pb_cache_buffer *)
pb_cache_reclaim_buffer(&mgr->cache, size, desc->alignment,
desc->usage);
if (buf)
return &buf->base;
}
pipe_mutex_unlock(mgr->mutex);
/* create a new one */
buf = CALLOC_STRUCT(pb_cache_buffer);
if (!buf)
return NULL;
@ -374,7 +223,7 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
/* Empty the cache and try again. */
if (!buf->buffer) {
mgr->base.flush(&mgr->base);
pb_cache_release_all_buffers(&mgr->cache);
buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
}
@ -385,7 +234,6 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
assert(pipe_is_referenced(&buf->buffer->reference));
assert(pb_check_alignment(desc->alignment, buf->buffer->alignment));
assert(pb_check_usage(desc->usage & ~mgr->bypass_usage, buf->buffer->usage));
assert(buf->buffer->size >= size);
pipe_reference_init(&buf->base.reference, 1);
@ -395,6 +243,7 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
buf->base.vtbl = &pb_cache_buffer_vtbl;
buf->mgr = mgr;
pb_cache_init_entry(&mgr->cache, &buf->cache_entry, &buf->base);
return &buf->base;
}
@ -404,19 +253,8 @@ static void
pb_cache_manager_flush(struct pb_manager *_mgr)
{
struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
struct list_head *curr, *next;
struct pb_cache_buffer *buf;
pipe_mutex_lock(mgr->mutex);
curr = mgr->delayed.next;
next = curr->next;
while(curr != &mgr->delayed) {
buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
_pb_cache_buffer_destroy(buf);
curr = next;
next = curr->next;
}
pipe_mutex_unlock(mgr->mutex);
pb_cache_release_all_buffers(&mgr->cache);
assert(mgr->provider->flush);
if(mgr->provider->flush)
@ -425,9 +263,11 @@ pb_cache_manager_flush(struct pb_manager *_mgr)
static void
pb_cache_manager_destroy(struct pb_manager *mgr)
pb_cache_manager_destroy(struct pb_manager *_mgr)
{
pb_cache_manager_flush(mgr);
struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
pb_cache_deinit(&mgr->cache);
FREE(mgr);
}
@ -465,13 +305,9 @@ pb_cache_manager_create(struct pb_manager *provider,
mgr->base.create_buffer = pb_cache_manager_create_buffer;
mgr->base.flush = pb_cache_manager_flush;
mgr->provider = provider;
mgr->usecs = usecs;
mgr->size_factor = size_factor;
mgr->bypass_usage = bypass_usage;
LIST_INITHEAD(&mgr->delayed);
mgr->numDelayed = 0;
mgr->max_cache_size = maximum_cache_size;
pipe_mutex_init(mgr->mutex);
pb_cache_init(&mgr->cache, usecs, size_factor, bypass_usage,
maximum_cache_size,
_pb_cache_buffer_destroy,
pb_cache_can_reclaim_buffer);
return &mgr->base;
}