disk_cache: use block size rather than file size

The majority of cache files are less than 1kb this resulted in us
greatly miscalculating the amount of disk space used by the cache.

Using the number of blocks allocated to the file is more
conservative and less likely to cause issues.

This change will result in cache sizes being miscalculated further
until old items added with the previous calculation have all been
removed. However I don't see anyway around that, the previous
patch should help limit that problem.

Cc: "17.1" <mesa-stable@lists.freedesktop.org>
Reviewed-and-Tested-by: Michel Dänzer <michel.daenzer@amd.com>
This commit is contained in:
Timothy Arceri 2017-04-27 11:15:30 +10:00
parent ce41237151
commit 4e1f3afea9

View file

@ -532,7 +532,7 @@ unlink_lru_file_from_directory(const char *path)
unlink(filename);
free (filename);
return sb.st_size;
return sb.st_blocks * 512;
}
/* Is entry a directory with a two-character name, (and not the
@ -637,8 +637,8 @@ disk_cache_remove(struct disk_cache *cache, const cache_key key)
unlink(filename);
free(filename);
if (sb.st_size)
p_atomic_add(cache->size, - (uint64_t)sb.st_size);
if (sb.st_blocks)
p_atomic_add(cache->size, - (uint64_t)sb.st_blocks * 512);
}
static ssize_t
@ -880,8 +880,14 @@ cache_put(void *job, int thread_index)
goto done;
}
file_size += cf_data_size + dc_job->cache->driver_keys_blob_size;
p_atomic_add(dc_job->cache->size, file_size);
struct stat sb;
if (stat(filename, &sb) == -1) {
/* Something went wrong remove the file */
unlink(filename);
goto done;
}
p_atomic_add(dc_job->cache->size, sb.st_blocks * 512);
done:
if (fd_final != -1)