i965: Drop brw_bo_map[_gtt] wrappers which issue perf warnings.

The stupid reason for eliminating these functions is that I'm about
to rename drm_bacon_bo_map() to brw_bo_map(), which makes the real
function have the short name, rather than the wrapper.

I'm also planning on reworking our mapping code soon, so we use WC
mappings and proper unsynchronized mappings on non-LLC platforms.
It will be easier to do that without thinking about the stall
warnings and wrappers.

My eventual hope is to put the performance warnings in the BO map
function itself, so all callers gain the warning.

Acked-by: Jason Ekstrand <jason@jlekstrand.net>
This commit is contained in:
Kenneth Graunke 2017-04-03 20:22:59 -07:00
parent dfd81373b6
commit e0d15e9769
7 changed files with 10 additions and 57 deletions

View file

@ -1462,12 +1462,6 @@ uint32_t brw_depth_format(struct brw_context *brw, mesa_format format);
/* brw_performance_query.c */
void brw_init_performance_queries(struct brw_context *brw);
/* intel_buffer_objects.c */
int brw_bo_map(struct brw_context *brw, drm_bacon_bo *bo, int write_enable,
const char *bo_name);
int brw_bo_map_gtt(struct brw_context *brw, drm_bacon_bo *bo,
const char *bo_name);
/* intel_extensions.c */
extern void intelInitExtensions(struct gl_context *ctx);

View file

@ -212,7 +212,7 @@ gen6_queryobj_get_results(struct gl_context *ctx,
if (query->bo == NULL)
return;
brw_bo_map(brw, query->bo, false, "query object");
drm_bacon_bo_map(query->bo, false);
uint64_t *results = query->bo->virtual;
switch (query->Base.Target) {
case GL_TIME_ELAPSED:

View file

@ -39,46 +39,6 @@
#include "intel_buffer_objects.h"
#include "intel_batchbuffer.h"
/**
* Map a buffer object; issue performance warnings if mapping causes stalls.
*
* This matches the drm_bacon_bo_map API, but takes an additional human-readable
* name for the buffer object to use in the performance debug message.
*/
int
brw_bo_map(struct brw_context *brw,
drm_bacon_bo *bo, int write_enable,
const char *bo_name)
{
if (likely(!brw->perf_debug) || !drm_bacon_bo_busy(bo))
return drm_bacon_bo_map(bo, write_enable);
double start_time = get_time();
int ret = drm_bacon_bo_map(bo, write_enable);
perf_debug("CPU mapping a busy %s BO stalled and took %.03f ms.\n",
bo_name, (get_time() - start_time) * 1000);
return ret;
}
int
brw_bo_map_gtt(struct brw_context *brw, drm_bacon_bo *bo, const char *bo_name)
{
if (likely(!brw->perf_debug) || !drm_bacon_bo_busy(bo))
return drm_bacon_gem_bo_map_gtt(bo);
double start_time = get_time();
int ret = drm_bacon_gem_bo_map_gtt(bo);
perf_debug("GTT mapping a busy %s BO stalled and took %.03f ms.\n",
bo_name, (get_time() - start_time) * 1000);
return ret;
}
static void
mark_buffer_gpu_usage(struct intel_buffer_object *intel_obj,
uint32_t offset, uint32_t size)
@ -429,8 +389,8 @@ brw_map_buffer_range(struct gl_context *ctx,
intel_obj->map_extra[index],
alignment);
if (brw->has_llc) {
brw_bo_map(brw, intel_obj->range_map_bo[index],
(access & GL_MAP_WRITE_BIT) != 0, "range-map");
drm_bacon_bo_map(intel_obj->range_map_bo[index],
(access & GL_MAP_WRITE_BIT) != 0);
} else {
drm_bacon_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
}
@ -450,8 +410,7 @@ brw_map_buffer_range(struct gl_context *ctx,
drm_bacon_gem_bo_map_gtt(intel_obj->buffer);
mark_buffer_inactive(intel_obj);
} else {
brw_bo_map(brw, intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0,
"MapBufferRange");
drm_bacon_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
mark_buffer_inactive(intel_obj);
}

View file

@ -1386,7 +1386,7 @@ intel_miptree_init_mcs(struct brw_context *brw,
*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
const int ret = brw_bo_map_gtt(brw, mt->mcs_buf->bo, "miptree");
const int ret = drm_bacon_gem_bo_map_gtt(mt->mcs_buf->bo);
if (unlikely(ret)) {
fprintf(stderr, "Failed to map mcs buffer into GTT\n");
drm_bacon_bo_unreference(mt->mcs_buf->bo);
@ -2473,9 +2473,9 @@ intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt)
* long as cache consistency is maintained).
*/
if (mt->tiling != I915_TILING_NONE || mt->is_scanout)
brw_bo_map_gtt(brw, bo, "miptree");
drm_bacon_gem_bo_map_gtt(bo);
else
brw_bo_map(brw, bo, true, "miptree");
drm_bacon_bo_map(bo, true);
return bo->virtual;
}

View file

@ -147,7 +147,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx,
intel_batchbuffer_flush(brw);
}
error = brw_bo_map(brw, bo, false /* write enable */, "miptree");
error = drm_bacon_bo_map(bo, false /* write enable */);
if (error) {
DBG("%s: failed to map bo\n", __func__);
return false;

View file

@ -532,7 +532,7 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx,
intel_batchbuffer_flush(brw);
}
error = brw_bo_map(brw, bo, false /* write enable */, "miptree");
error = drm_bacon_bo_map(bo, false /* write enable */);
if (error) {
DBG("%s: failed to map bo\n", __func__);
return false;

View file

@ -148,7 +148,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
intel_batchbuffer_flush(brw);
}
error = brw_bo_map(brw, bo, true /* write enable */, "miptree");
error = drm_bacon_bo_map(bo, true /* write enable */);
if (error || bo->virtual == NULL) {
DBG("%s: failed to map bo\n", __func__);
return false;