fix double deletion

plus, if the current hash is bigger than max size make sure
we delete enough from it
This commit is contained in:
Zack Rusin 2008-03-10 22:10:18 -04:00
parent d9d2ca7a07
commit be9a245738
2 changed files with 4 additions and 8 deletions

View file

@ -207,8 +207,11 @@ static INLINE void sanitize_hash(struct cso_hash *hash, enum cso_cache_type type
{
/* if we're approach the maximum size, remove fourth of the entries
* otherwise every subsequent call will go through the same */
int max_entries = (max_size > cso_hash_size(hash)) ? max_size : cso_hash_size(hash);
int hash_size = cso_hash_size(hash);
int max_entries = (max_size > hash_size) ? max_size : hash_size;
int to_remove = (max_size < max_entries) * max_entries/4;
if (hash_size > max_size)
to_remove += hash_size - max_size;
while (to_remove) {
/*remove elements until we're good */
/*fixme: currently we pick the nodes to remove at random*/

View file

@ -101,13 +101,6 @@ static void *cso_data_allocate_node(struct cso_hash_data *hash)
static void cso_data_free_node(struct cso_node *node)
{
/* XXX still a leak here.
* Need to cast value ptr to original cso type, then free the
* driver-specific data hanging off of it. For example:
struct cso_sampler *csamp = (struct cso_sampler *) node->value;
FREE(csamp->data);
*/
FREE(node->value);
FREE(node);
}