winsys/svga: Optionally avoid caching buffer maps

Mapping of graphics kernel buffers is quite costly. Therefore the svga
drm winsys caches all kernel buffer maps. However, that may lead to
less testing coverage of the unmap paths and (possibly) processes running
out of virtual memory space. Introduce a possibility to avoid that caching
by setting the environment variable SVGA_FORCE_KERNEL_UNMAPS to 1.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Roland Scheidegger <sroland@vmware.com>
Reviewed-by: Matthew McClure <mcclurem@vmware.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4804>
This commit is contained in:
Thomas Hellstrom 2020-04-22 15:03:15 +02:00 committed by Marge Bot
parent 422148de52
commit 298e247776
4 changed files with 22 additions and 12 deletions

View file

@ -63,6 +63,7 @@ struct vmw_gmr_buffer
struct vmw_region *region;
void *map;
unsigned map_flags;
unsigned map_count;
};
@ -104,8 +105,12 @@ vmw_gmr_buffer_destroy(struct pb_buffer *_buf)
{
struct vmw_gmr_buffer *buf = vmw_gmr_buffer(_buf);
vmw_ioctl_region_unmap(buf->region);
assert(buf->map_count == 0);
if (buf->map) {
assert(buf->mgr->vws->cache_maps);
vmw_ioctl_region_unmap(buf->region);
}
vmw_ioctl_region_destroy(buf->region);
FREE(buf);
@ -126,7 +131,6 @@ vmw_gmr_buffer_map(struct pb_buffer *_buf,
if (!buf->map)
return NULL;
if ((_buf->usage & VMW_BUFFER_USAGE_SYNC) &&
!(flags & PB_USAGE_UNSYNCHRONIZED)) {
ret = vmw_ioctl_syncforcpu(buf->region,
@ -137,6 +141,7 @@ vmw_gmr_buffer_map(struct pb_buffer *_buf,
return NULL;
}
buf->map_count++;
return buf->map;
}
@ -153,6 +158,12 @@ vmw_gmr_buffer_unmap(struct pb_buffer *_buf)
!(flags & PB_USAGE_CPU_WRITE),
FALSE);
}
assert(buf->map_count > 0);
if (!--buf->map_count && !buf->mgr->vws->cache_maps) {
vmw_ioctl_region_unmap(buf->region);
buf->map = NULL;
}
}

View file

@ -67,6 +67,7 @@ vmw_winsys_create( int fd )
{
struct vmw_winsys_screen *vws;
struct stat stat_buf;
const char *getenv_val;
if (dev_hash == NULL) {
dev_hash = _mesa_hash_table_create(NULL, vmw_dev_hash, vmw_dev_compare);
@ -97,6 +98,8 @@ vmw_winsys_create( int fd )
vws->base.have_gb_dma = !vws->force_coherent;
vws->base.need_to_rebind_resources = FALSE;
vws->base.have_transfer_from_buffer_cmd = vws->base.have_vgpu10;
getenv_val = getenv("SVGA_FORCE_KERNEL_UNMAPS");
vws->cache_maps = !getenv_val || strcmp(getenv_val, "0") == 0;
vws->fence_ops = vmw_fence_ops_create(vws);
if (!vws->fence_ops)
goto out_no_fence_ops;

View file

@ -108,6 +108,7 @@ struct vmw_winsys_screen
mtx_t cs_mutex;
boolean force_coherent;
boolean cache_maps;
};

View file

@ -64,7 +64,6 @@ struct vmw_region
uint32_t handle;
uint64_t map_handle;
void *data;
uint32_t map_count;
int drm_fd;
uint32_t size;
};
@ -637,7 +636,6 @@ vmw_ioctl_region_create(struct vmw_winsys_screen *vws, uint32_t size)
region->data = NULL;
region->handle = rep->handle;
region->map_handle = rep->map_handle;
region->map_count = 0;
region->size = size;
region->drm_fd = vws->ioctl.drm_fd;
@ -659,10 +657,7 @@ vmw_ioctl_region_destroy(struct vmw_region *region)
vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
region->ptr.gmrId, region->ptr.offset);
if (region->data) {
os_munmap(region->data, region->size);
region->data = NULL;
}
assert(region->data == NULL);
memset(&arg, 0, sizeof(arg));
arg.handle = region->handle;
@ -701,8 +696,6 @@ vmw_ioctl_region_map(struct vmw_region *region)
region->data = map;
}
++region->map_count;
return region->data;
}
@ -711,7 +704,9 @@ vmw_ioctl_region_unmap(struct vmw_region *region)
{
vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
region->ptr.gmrId, region->ptr.offset);
--region->map_count;
os_munmap(region->data, region->size);
region->data = NULL;
}
/**