mesa/src/util/mesa_cache_db_multipart.c
Dmitry Osipenko fd9f7b748e util/mesa-db: Introduce multipart mesa-db cache
Whenever a single file mesa-db cache hits max size limit, a half of cache
is evicted and the cache file is defragmented. The downside of this eviction
strategy is that it causes high disk IO usage during eviction if mesa-db
cache file size is large.

In order to mitigate this downside, we will split mesa-db into multiple
part such that only one part will be evicted at a time. Each part will be
an individual single file mesa-db cache, like a DB shard. The new multipart
mesa-db cache will merge the parts into a single virtual cache.

This patch introduces two new environment variables:

1. MESA_DISK_CACHE_DATABASE_NUM_PARTS:
Controls number of mesa-db cache file parts. By default 50 parts will be
created. The old pre-multipart mesa-db cache files will be auto-removed
if they exist, i.e. Mesa will switch to the new DB version automatically.

2. MESA_DISK_CACHE_DATABASE_EVICTION_SCORE_2X_PERIOD:
Controls the eviction score doubling time period. The evicted DB part
selection is based on cache entries size weighted by 'last_access_time' of
the entries. By default the cache eviction score is doubled for each month
of cache entry age, i.e. for two equally sized entries where one entry is
older by one month than the other, the older entry will have x2 eviction
score than the other entry. Database part with a highest total eviction
score is selected for eviction.

This patch brings x40 performance improvement of cache eviction time using
multipart cache vs a single file cache due to a smaller eviction portions
and more optimized eviction algorithm.

Acked-by: Timothy Arceri <tarceri@itsqueeze.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20256>
2023-02-01 02:47:30 +00:00

160 lines
4.1 KiB
C

/*
* Copyright © 2022 Collabora, Ltd.
*
* SPDX-License-Identifier: MIT
*/
#include <sys/stat.h>
#include "detect_os.h"
#include "string.h"
#include "mesa_cache_db_multipart.h"
#include "u_debug.h"
bool
mesa_cache_db_multipart_open(struct mesa_cache_db_multipart *db,
const char *cache_path)
{
#if DETECT_OS_WINDOWS
return false;
#else
char *part_path = NULL;
unsigned int i;
db->num_parts = debug_get_num_option("MESA_DISK_CACHE_DATABASE_NUM_PARTS", 50);
db->parts = calloc(db->num_parts, sizeof(*db->parts));
if (!db->parts)
return false;
for (i = 0; i < db->num_parts; i++) {
bool db_opened = false;
if (asprintf(&part_path, "%s/part%u", cache_path, i) == -1)
goto close_db;
if (mkdir(part_path, 0755) == -1 && errno != EEXIST)
goto free_path;
/* DB opening may fail only in a case of a severe problem,
* like IO error.
*/
db_opened = mesa_cache_db_open(&db->parts[i], part_path);
if (!db_opened)
goto free_path;
free(part_path);
}
/* remove old pre multi-part cache */
mesa_db_wipe_path(cache_path);
return true;
free_path:
free(part_path);
close_db:
while (i--)
mesa_cache_db_close(&db->parts[i]);
free(db->parts);
return false;
#endif
}
void
mesa_cache_db_multipart_close(struct mesa_cache_db_multipart *db)
{
while (db->num_parts--)
mesa_cache_db_close(&db->parts[db->num_parts]);
free(db->parts);
}
void
mesa_cache_db_multipart_set_size_limit(struct mesa_cache_db_multipart *db,
uint64_t max_cache_size)
{
for (unsigned int i = 0; i < db->num_parts; i++)
mesa_cache_db_set_size_limit(&db->parts[i],
max_cache_size / db->num_parts);
}
void *
mesa_cache_db_multipart_read_entry(struct mesa_cache_db_multipart *db,
const uint8_t *cache_key_160bit,
size_t *size)
{
unsigned last_read_part = db->last_read_part;
for (unsigned int i = 0; i < db->num_parts; i++) {
unsigned int part = (last_read_part + i) % db->num_parts;
void *cache_item = mesa_cache_db_read_entry(&db->parts[part],
cache_key_160bit, size);
if (cache_item) {
/* Likely that the next entry lookup will hit the same DB part. */
db->last_read_part = part;
return cache_item;
}
}
return NULL;
}
static unsigned
mesa_cache_db_multipart_select_victim_part(struct mesa_cache_db_multipart *db)
{
double best_score = 0, score;
unsigned victim = 0;
for (unsigned int i = 0; i < db->num_parts; i++) {
score = mesa_cache_db_eviction_score(&db->parts[i]);
if (score > best_score) {
best_score = score;
victim = i;
}
}
return victim;
}
bool
mesa_cache_db_multipart_entry_write(struct mesa_cache_db_multipart *db,
const uint8_t *cache_key_160bit,
const void *blob, size_t blob_size)
{
unsigned last_written_part = db->last_written_part;
int wpart = -1;
for (unsigned int i = 0; i < db->num_parts; i++) {
unsigned int part = (last_written_part + i) % db->num_parts;
/* Note that each DB part has own locking. */
if (mesa_cache_db_has_space(&db->parts[part], blob_size)) {
wpart = part;
break;
}
}
/* All DB parts are full. Writing to a full DB part will auto-trigger
* eviction of LRU cache entries from the part. Select DB part that
* contains majority of LRU cache entries.
*/
if (wpart < 0)
wpart = mesa_cache_db_multipart_select_victim_part(db);
db->last_written_part = wpart;
return mesa_cache_db_entry_write(&db->parts[wpart], cache_key_160bit,
blob, blob_size);
}
void
mesa_cache_db_multipart_entry_remove(struct mesa_cache_db_multipart *db,
const uint8_t *cache_key_160bit)
{
for (unsigned int i = 0; i < db->num_parts; i++)
mesa_cache_db_entry_remove(&db->parts[i], cache_key_160bit);
}