mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-25 00:00:11 +01:00
i965: Move intel_context::perf_debug to brw_context.
Signed-off-by: Kenneth Graunke <kenneth@whitecape.org> Acked-by: Chris Forbes <chrisf@ijw.co.nz> Acked-by: Paul Berry <stereotype441@gmail.com> Acked-by: Anuj Phogat <anuj.phogat@gmail.com>
This commit is contained in:
parent
7c3180a4ad
commit
b15f1fc3c6
23 changed files with 20 additions and 39 deletions
|
|
@ -147,7 +147,6 @@ is_color_fast_clear_compatible(struct brw_context *brw,
|
|||
gl_format format,
|
||||
const union gl_color_union *color)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (_mesa_is_format_integer_color(format))
|
||||
return false;
|
||||
|
||||
|
|
|
|||
|
|
@ -464,7 +464,7 @@ brwCreateContext(int api,
|
|||
ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_DEBUG_BIT;
|
||||
|
||||
/* Turn on some extra GL_ARB_debug_output generation. */
|
||||
intel->perf_debug = true;
|
||||
brw->perf_debug = true;
|
||||
}
|
||||
|
||||
brw_fs_alloc_reg_sets(brw);
|
||||
|
|
|
|||
|
|
@ -858,6 +858,13 @@ struct brw_context
|
|||
|
||||
GLenum reduced_primitive;
|
||||
|
||||
/**
|
||||
* Set if we're either a debug context or the INTEL_DEBUG=perf environment
|
||||
* variable is set, this is the flag indicating to do expensive work that
|
||||
* might lead to a perf_debug() call.
|
||||
*/
|
||||
bool perf_debug;
|
||||
|
||||
bool emit_state_always;
|
||||
bool has_surface_tile_offset;
|
||||
bool has_compr4;
|
||||
|
|
|
|||
|
|
@ -494,7 +494,7 @@ void brw_draw_prims( struct gl_context *ctx,
|
|||
GLuint max_index,
|
||||
struct gl_transform_feedback_object *tfb_vertcount )
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
const struct gl_client_array **arrays = ctx->Array._DrawArrays;
|
||||
|
||||
if (!_mesa_check_conditional_render(ctx))
|
||||
|
|
|
|||
|
|
@ -800,7 +800,6 @@ const struct brw_tracked_state brw_vertices = {
|
|||
static void brw_upload_indices(struct brw_context *brw)
|
||||
{
|
||||
struct gl_context *ctx = &brw->intel.ctx;
|
||||
struct intel_context *intel = &brw->intel;
|
||||
const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
|
||||
GLuint ib_size;
|
||||
drm_intel_bo *bo = NULL;
|
||||
|
|
|
|||
|
|
@ -3020,7 +3020,7 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c,
|
|||
bool start_busy = false;
|
||||
float start_time = 0;
|
||||
|
||||
if (unlikely(intel->perf_debug)) {
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
start_busy = (brw->batch.last_bo &&
|
||||
drm_intel_bo_busy(brw->batch.last_bo));
|
||||
start_time = get_time();
|
||||
|
|
@ -3077,7 +3077,7 @@ brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c,
|
|||
simd16_instructions,
|
||||
final_assembly_size);
|
||||
|
||||
if (unlikely(intel->perf_debug) && shader) {
|
||||
if (unlikely(brw->perf_debug) && shader) {
|
||||
if (shader->compiled_once)
|
||||
brw_wm_debug_recompile(brw, prog, &c->key);
|
||||
shader->compiled_once = true;
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ brw_queryobj_get_results(struct gl_context *ctx,
|
|||
if (drm_intel_bo_references(brw->batch.bo, query->bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (unlikely(intel->perf_debug)) {
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
if (drm_intel_bo_busy(query->bo)) {
|
||||
perf_debug("Stalling on the GPU waiting for a query object.\n");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -182,7 +182,7 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
|
|||
lower_variable_index_to_cond_assign(shader->ir,
|
||||
input, output, temp, uniform);
|
||||
|
||||
if (unlikely((intel->perf_debug) && lowered_variable_indexing)) {
|
||||
if (unlikely(brw->perf_debug && lowered_variable_indexing)) {
|
||||
perf_debug("Unsupported form of variable indexing in FS; falling "
|
||||
"back to very inefficient code generation\n");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -386,8 +386,6 @@ brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
|
|||
void
|
||||
brw_state_cache_check_size(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
/* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
|
||||
* state cache.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1504,11 +1504,10 @@ brw_vs_emit(struct brw_context *brw,
|
|||
void *mem_ctx,
|
||||
unsigned *final_assembly_size)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
bool start_busy = false;
|
||||
float start_time = 0;
|
||||
|
||||
if (unlikely(intel->perf_debug)) {
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
start_busy = (brw->batch.last_bo &&
|
||||
drm_intel_bo_busy(brw->batch.last_bo));
|
||||
start_time = get_time();
|
||||
|
|
@ -1548,7 +1547,7 @@ brw_vs_emit(struct brw_context *brw,
|
|||
const unsigned *generated =g.generate_assembly(&v.instructions,
|
||||
final_assembly_size);
|
||||
|
||||
if (unlikely(intel->perf_debug) && shader) {
|
||||
if (unlikely(brw->perf_debug) && shader) {
|
||||
if (shader->compiled_once) {
|
||||
brw_vs_debug_recompile(brw, prog, &c->key);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -334,7 +334,6 @@ do_vs_prog(struct brw_context *brw,
|
|||
static bool
|
||||
key_debug(struct brw_context *brw, const char *name, int a, int b)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (a != b) {
|
||||
perf_debug(" %s %d->%d\n", name, a, b);
|
||||
return true;
|
||||
|
|
@ -347,7 +346,6 @@ brw_vs_debug_recompile(struct brw_context *brw,
|
|||
struct gl_shader_program *prog,
|
||||
const struct brw_vs_prog_key *key)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct brw_cache_item *c = NULL;
|
||||
const struct brw_vs_prog_key *old_key = NULL;
|
||||
bool found = false;
|
||||
|
|
|
|||
|
|
@ -140,7 +140,6 @@ bool do_wm_prog(struct brw_context *brw,
|
|||
struct brw_fragment_program *fp,
|
||||
struct brw_wm_prog_key *key)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct brw_wm_compile *c;
|
||||
const GLuint *program;
|
||||
struct gl_shader *fs = NULL;
|
||||
|
|
@ -205,7 +204,6 @@ bool do_wm_prog(struct brw_context *brw,
|
|||
static bool
|
||||
key_debug(struct brw_context *brw, const char *name, int a, int b)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (a != b) {
|
||||
perf_debug(" %s %d->%d\n", name, a, b);
|
||||
return true;
|
||||
|
|
@ -244,7 +242,6 @@ brw_wm_debug_recompile(struct brw_context *brw,
|
|||
struct gl_shader_program *prog,
|
||||
const struct brw_wm_prog_key *key)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct brw_cache_item *c = NULL;
|
||||
const struct brw_wm_prog_key *old_key = NULL;
|
||||
bool found = false;
|
||||
|
|
|
|||
|
|
@ -157,7 +157,6 @@ gen6_queryobj_get_results(struct gl_context *ctx,
|
|||
struct brw_query_object *query)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
|
||||
if (query->bo == NULL)
|
||||
return;
|
||||
|
|
@ -169,7 +168,7 @@ gen6_queryobj_get_results(struct gl_context *ctx,
|
|||
if (drm_intel_bo_references(brw->batch.bo, query->bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (unlikely(intel->perf_debug)) {
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
if (drm_intel_bo_busy(query->bo)) {
|
||||
perf_debug("Stalling on the GPU waiting for a query object.\n");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -158,7 +158,6 @@ intel_miptree_blit(struct brw_context *brw,
|
|||
uint32_t width, uint32_t height,
|
||||
GLenum logicop)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
/* No sRGB decode or encode is done by the hardware blitter, which is
|
||||
* consistent with what we want in the callers (glCopyTexSubImage(),
|
||||
* glBlitFramebuffer(), texture validation, etc.).
|
||||
|
|
|
|||
|
|
@ -162,7 +162,6 @@ intel_bufferobj_subdata(struct gl_context * ctx,
|
|||
const GLvoid * data, struct gl_buffer_object *obj)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
bool busy;
|
||||
|
||||
|
|
@ -246,7 +245,6 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
|||
GLbitfield access, struct gl_buffer_object *obj)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
|
||||
assert(intel_obj);
|
||||
|
|
|
|||
|
|
@ -569,7 +569,7 @@ intelInitContext(struct brw_context *brw,
|
|||
INTEL_DEBUG &= ~DEBUG_SHADER_TIME;
|
||||
}
|
||||
if (INTEL_DEBUG & DEBUG_PERF)
|
||||
intel->perf_debug = true;
|
||||
brw->perf_debug = true;
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_AUB)
|
||||
drm_intel_bufmgr_gem_set_aub_dump(brw->bufmgr, true);
|
||||
|
|
|
|||
|
|
@ -129,13 +129,6 @@ struct intel_context
|
|||
bool has_llc;
|
||||
bool has_swizzling;
|
||||
|
||||
/**
|
||||
* Set if we're either a debug context or the INTEL_DEBUG=perf environment
|
||||
* variable is set, this is the flag indicating to do expensive work that
|
||||
* might lead to a perf_debug() call.
|
||||
*/
|
||||
bool perf_debug;
|
||||
|
||||
struct {
|
||||
drm_intel_bo *bo;
|
||||
GLuint offset;
|
||||
|
|
@ -258,8 +251,8 @@ extern int INTEL_DEBUG;
|
|||
static GLuint msg_id = 0; \
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_PERF)) \
|
||||
dbg_printf(__VA_ARGS__); \
|
||||
if (intel->perf_debug) \
|
||||
_mesa_gl_debug(&intel->ctx, &msg_id, \
|
||||
if (brw->perf_debug) \
|
||||
_mesa_gl_debug(&brw->intel.ctx, &msg_id, \
|
||||
MESA_DEBUG_TYPE_PERFORMANCE, \
|
||||
MESA_DEBUG_SEVERITY_MEDIUM, \
|
||||
__VA_ARGS__); \
|
||||
|
|
|
|||
|
|
@ -668,7 +668,6 @@ intel_blit_framebuffer_with_blitter(struct gl_context *ctx,
|
|||
GLbitfield mask, GLenum filter)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
|
||||
if (mask & GL_COLOR_BUFFER_BIT) {
|
||||
GLint i;
|
||||
|
|
|
|||
|
|
@ -1045,7 +1045,6 @@ intel_miptree_copy_slice(struct brw_context *brw,
|
|||
int depth)
|
||||
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
gl_format format = src_mt->format;
|
||||
uint32_t width = src_mt->level[level].width;
|
||||
uint32_t height = src_mt->level[level].height;
|
||||
|
|
|
|||
|
|
@ -177,7 +177,6 @@ do_blit_bitmap( struct gl_context *ctx,
|
|||
const GLubyte *bitmap )
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct gl_framebuffer *fb = ctx->DrawBuffer;
|
||||
struct intel_renderbuffer *irb;
|
||||
GLfloat tmpColor[4];
|
||||
|
|
|
|||
|
|
@ -165,7 +165,6 @@ intelReadPixels(struct gl_context * ctx,
|
|||
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
bool dirty;
|
||||
|
||||
DBG("%s\n", __FUNCTION__);
|
||||
|
|
|
|||
|
|
@ -98,7 +98,6 @@ intelCopyTexSubImage(struct gl_context *ctx, GLuint dims,
|
|||
GLsizei width, GLsizei height)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
|
||||
/* Try BLORP first. It can handle almost everything. */
|
||||
if (brw_blorp_copytexsubimage(brw, rb, texImage, slice, x, y,
|
||||
|
|
|
|||
|
|
@ -220,7 +220,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
|
|||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
|
||||
if (unlikely(intel->perf_debug)) {
|
||||
if (unlikely(brw->perf_debug)) {
|
||||
if (drm_intel_bo_busy(bo)) {
|
||||
perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue