etnaviv: Add valgrind support

Add Valgrind support for etnaviv to track BO leaks.

Signed-off-by: Marek Vasut <marex@denx.de>
Reviewed-by: Christian Gmeiner <christian.gmeiner@gmail.com>
Reviewed-by: Lucas Stach <l.stach@pengutronix.de>
This commit is contained in:
Marek Vasut 2019-06-09 00:18:29 +02:00 committed by Lucas Stach
parent cf92074277
commit 6bb4b6d078
3 changed files with 64 additions and 1 deletions

View file

@ -44,6 +44,8 @@ static void set_name(struct etna_bo *bo, uint32_t name)
/* Called under etna_drm_table_lock */
void _etna_bo_del(struct etna_bo *bo)
{
VG_BO_FREE(bo);
if (bo->map)
os_munmap(bo->map, bo->size);
@ -132,6 +134,8 @@ struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
bo->reuse = 1;
pthread_mutex_unlock(&etna_drm_table_lock);
VG_BO_ALLOC(bo);
return bo;
}
@ -188,8 +192,10 @@ struct etna_bo *etna_bo_from_name(struct etna_device *dev,
goto out_unlock;
bo = bo_from_handle(dev, req.size, req.handle, 0);
if (bo)
if (bo) {
set_name(bo, name);
VG_BO_ALLOC(bo);
}
out_unlock:
pthread_mutex_unlock(&etna_drm_table_lock);
@ -229,6 +235,8 @@ struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
bo = bo_from_handle(dev, size, handle, 0);
VG_BO_ALLOC(bo);
out_unlock:
pthread_mutex_unlock(&etna_drm_table_lock);

View file

@ -85,6 +85,7 @@ void etna_bo_cache_cleanup(struct etna_bo_cache *cache, time_t time)
if (time && ((time - bo->free_time) <= 1))
break;
VG_BO_OBTAIN(bo);
list_del(&bo->list);
_etna_bo_del(bo);
}
@ -169,6 +170,7 @@ struct etna_bo *etna_bo_cache_alloc(struct etna_bo_cache *cache, uint32_t *size,
*size = bucket->size;
bo = find_in_bucket(bucket, flags);
if (bo) {
VG_BO_OBTAIN(bo);
p_atomic_set(&bo->refcnt, 1);
etna_device_ref(bo->dev);
return bo;
@ -189,6 +191,7 @@ int etna_bo_cache_free(struct etna_bo_cache *cache, struct etna_bo *bo)
clock_gettime(CLOCK_MONOTONIC, &time);
bo->free_time = time.tv_sec;
VG_BO_RELEASE(bo);
list_addtail(&bo->list, &bucket->list);
etna_bo_cache_cleanup(cache, time.tv_sec);

View file

@ -205,4 +205,56 @@ static inline void get_abs_timeout(struct drm_etnaviv_timespec *tv, uint64_t ns)
tv->tv_nsec = t.tv_nsec + ns - (s * 1000000000);
}
#if HAVE_VALGRIND
# include <valgrind/memcheck.h>
/*
* For tracking the backing memory (if valgrind enabled, we force a mmap
* for the purposes of tracking)
*/
static inline void VG_BO_ALLOC(struct etna_bo *bo)
{
if (bo && RUNNING_ON_VALGRIND) {
VALGRIND_MALLOCLIKE_BLOCK(etna_bo_map(bo), bo->size, 0, 1);
}
}
static inline void VG_BO_FREE(struct etna_bo *bo)
{
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
}
/*
* For tracking bo structs that are in the buffer-cache, so that valgrind
* doesn't attribute ownership to the first one to allocate the recycled
* bo.
*
* Note that the list_head in etna_bo is used to track the buffers in cache
* so disable error reporting on the range while they are in cache so
* valgrind doesn't squawk about list traversal.
*
*/
static inline void VG_BO_RELEASE(struct etna_bo *bo)
{
if (RUNNING_ON_VALGRIND) {
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, sizeof(*bo));
VALGRIND_MAKE_MEM_NOACCESS(bo, sizeof(*bo));
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
}
}
static inline void VG_BO_OBTAIN(struct etna_bo *bo)
{
if (RUNNING_ON_VALGRIND) {
VALGRIND_MAKE_MEM_DEFINED(bo, sizeof(*bo));
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, sizeof(*bo));
VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
}
}
#else
static inline void VG_BO_ALLOC(struct etna_bo *bo) {}
static inline void VG_BO_FREE(struct etna_bo *bo) {}
static inline void VG_BO_RELEASE(struct etna_bo *bo) {}
static inline void VG_BO_OBTAIN(struct etna_bo *bo) {}
#endif
#endif /* ETNAVIV_PRIV_H_ */