mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-05 07:28:11 +02:00
i965: Pass brw_context to functions rather than intel_context.
This makes brw_context available in every function that used intel_context. This makes it possible to start migrating fields from intel_context to brw_context. Surprisingly, this actually removes some code, as functions that use OUT_BATCH don't need to declare "intel"; they just use "brw." Signed-off-by: Kenneth Graunke <kenneth@whitecape.org> Acked-by: Chris Forbes <chrisf@ijw.co.nz> Acked-by: Paul Berry <stereotype441@gmail.com> Acked-by: Anuj Phogat <anuj.phogat@gmail.com>
This commit is contained in:
parent
86f2711722
commit
ca437579b3
77 changed files with 782 additions and 777 deletions
|
|
@ -159,7 +159,7 @@ brw_blorp_params::brw_blorp_params()
|
|||
|
||||
extern "C" {
|
||||
void
|
||||
intel_hiz_exec(struct intel_context *intel, struct intel_mipmap_tree *mt,
|
||||
intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
|
||||
unsigned int level, unsigned int layer, gen6_hiz_op op)
|
||||
{
|
||||
const char *opname = NULL;
|
||||
|
|
@ -183,22 +183,22 @@ intel_hiz_exec(struct intel_context *intel, struct intel_mipmap_tree *mt,
|
|||
__FUNCTION__, opname, mt, level, layer);
|
||||
|
||||
brw_hiz_op_params params(mt, level, layer, op);
|
||||
brw_blorp_exec(intel, ¶ms);
|
||||
brw_blorp_exec(brw, ¶ms);
|
||||
}
|
||||
|
||||
} /* extern "C" */
|
||||
|
||||
void
|
||||
brw_blorp_exec(struct intel_context *intel, const brw_blorp_params *params)
|
||||
brw_blorp_exec(struct brw_context *brw, const brw_blorp_params *params)
|
||||
{
|
||||
struct brw_context *brw = brw_context(&intel->ctx);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
switch (intel->gen) {
|
||||
case 6:
|
||||
gen6_blorp_exec(intel, params);
|
||||
gen6_blorp_exec(brw, params);
|
||||
break;
|
||||
case 7:
|
||||
gen7_blorp_exec(intel, params);
|
||||
gen7_blorp_exec(brw, params);
|
||||
break;
|
||||
default:
|
||||
/* BLORP is not supported before Gen6. */
|
||||
|
|
@ -207,7 +207,7 @@ brw_blorp_exec(struct intel_context *intel, const brw_blorp_params *params)
|
|||
}
|
||||
|
||||
if (unlikely(intel->always_flush_batch))
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
/* We've smashed all state compared to what the normal 3D pipeline
|
||||
* rendering tracks for GL.
|
||||
|
|
@ -220,7 +220,7 @@ brw_blorp_exec(struct intel_context *intel, const brw_blorp_params *params)
|
|||
/* Flush the sampler cache so any texturing from the destination is
|
||||
* coherent.
|
||||
*/
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
}
|
||||
|
||||
brw_hiz_op_params::brw_hiz_op_params(struct intel_mipmap_tree *mt,
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
void
|
||||
brw_blorp_blit_miptrees(struct intel_context *intel,
|
||||
brw_blorp_blit_miptrees(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *src_mt,
|
||||
unsigned src_level, unsigned src_layer,
|
||||
struct intel_mipmap_tree *dst_mt,
|
||||
|
|
@ -47,11 +47,11 @@ brw_blorp_blit_miptrees(struct intel_context *intel,
|
|||
bool mirror_x, bool mirror_y);
|
||||
|
||||
bool
|
||||
brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
|
||||
brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
|
||||
bool partial_clear);
|
||||
|
||||
void
|
||||
brw_blorp_resolve_color(struct intel_context *intel,
|
||||
brw_blorp_resolve_color(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
@ -238,7 +238,7 @@ public:
|
|||
|
||||
|
||||
void
|
||||
brw_blorp_exec(struct intel_context *intel, const brw_blorp_params *params);
|
||||
brw_blorp_exec(struct brw_context *brw, const brw_blorp_params *params);
|
||||
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ find_miptree(GLbitfield buffer_bit, struct intel_renderbuffer *irb)
|
|||
}
|
||||
|
||||
void
|
||||
brw_blorp_blit_miptrees(struct intel_context *intel,
|
||||
brw_blorp_blit_miptrees(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *src_mt,
|
||||
unsigned src_level, unsigned src_layer,
|
||||
struct intel_mipmap_tree *dst_mt,
|
||||
|
|
@ -141,9 +141,9 @@ brw_blorp_blit_miptrees(struct intel_context *intel,
|
|||
* to destination color buffers, and the standard render path is
|
||||
* fast-color-aware.
|
||||
*/
|
||||
intel_miptree_resolve_color(intel, src_mt);
|
||||
intel_miptree_slice_resolve_depth(intel, src_mt, src_level, src_layer);
|
||||
intel_miptree_slice_resolve_depth(intel, dst_mt, dst_level, dst_layer);
|
||||
intel_miptree_resolve_color(brw, src_mt);
|
||||
intel_miptree_slice_resolve_depth(brw, src_mt, src_level, src_layer);
|
||||
intel_miptree_slice_resolve_depth(brw, dst_mt, dst_level, dst_layer);
|
||||
|
||||
DBG("%s from %s mt %p %d %d (%f,%f) (%f,%f)"
|
||||
"to %s mt %p %d %d (%f,%f) (%f,%f) (flip %d,%d)\n",
|
||||
|
|
@ -154,7 +154,7 @@ brw_blorp_blit_miptrees(struct intel_context *intel,
|
|||
dst_level, dst_layer, dst_x0, dst_y0, dst_x1, dst_y1,
|
||||
mirror_x, mirror_y);
|
||||
|
||||
brw_blorp_blit_params params(brw_context(&intel->ctx),
|
||||
brw_blorp_blit_params params(brw,
|
||||
src_mt, src_level, src_layer,
|
||||
dst_mt, dst_level, dst_layer,
|
||||
src_x0, src_y0,
|
||||
|
|
@ -162,13 +162,13 @@ brw_blorp_blit_miptrees(struct intel_context *intel,
|
|||
dst_x0, dst_y0,
|
||||
dst_x1, dst_y1,
|
||||
mirror_x, mirror_y);
|
||||
brw_blorp_exec(intel, ¶ms);
|
||||
brw_blorp_exec(brw, ¶ms);
|
||||
|
||||
intel_miptree_slice_set_needs_hiz_resolve(dst_mt, dst_level, dst_layer);
|
||||
}
|
||||
|
||||
static void
|
||||
do_blorp_blit(struct intel_context *intel, GLbitfield buffer_bit,
|
||||
do_blorp_blit(struct brw_context *brw, GLbitfield buffer_bit,
|
||||
struct intel_renderbuffer *src_irb,
|
||||
struct intel_renderbuffer *dst_irb,
|
||||
GLfloat srcX0, GLfloat srcY0, GLfloat srcX1, GLfloat srcY1,
|
||||
|
|
@ -180,7 +180,7 @@ do_blorp_blit(struct intel_context *intel, GLbitfield buffer_bit,
|
|||
struct intel_mipmap_tree *dst_mt = find_miptree(buffer_bit, dst_irb);
|
||||
|
||||
/* Do the blit */
|
||||
brw_blorp_blit_miptrees(intel,
|
||||
brw_blorp_blit_miptrees(brw,
|
||||
src_mt, src_irb->mt_level, src_irb->mt_layer,
|
||||
dst_mt, dst_irb->mt_level, dst_irb->mt_layer,
|
||||
srcX0, srcY0, srcX1, srcY1,
|
||||
|
|
@ -223,17 +223,17 @@ formats_match(GLbitfield buffer_bit, struct intel_renderbuffer *src_irb,
|
|||
}
|
||||
|
||||
static bool
|
||||
try_blorp_blit(struct intel_context *intel,
|
||||
try_blorp_blit(struct brw_context *brw,
|
||||
GLfloat srcX0, GLfloat srcY0, GLfloat srcX1, GLfloat srcY1,
|
||||
GLfloat dstX0, GLfloat dstY0, GLfloat dstX1, GLfloat dstY1,
|
||||
GLenum filter, GLbitfield buffer_bit)
|
||||
{
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
struct gl_context *ctx = &brw->intel.ctx;
|
||||
|
||||
/* Sync up the state of window system buffers. We need to do this before
|
||||
* we go looking for the buffers.
|
||||
*/
|
||||
intel_prepare_render(intel);
|
||||
intel_prepare_render(brw);
|
||||
|
||||
const struct gl_framebuffer *read_fb = ctx->ReadBuffer;
|
||||
const struct gl_framebuffer *draw_fb = ctx->DrawBuffer;
|
||||
|
|
@ -302,7 +302,7 @@ try_blorp_blit(struct intel_context *intel,
|
|||
for (unsigned i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; ++i) {
|
||||
dst_irb = intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[i]);
|
||||
if (dst_irb)
|
||||
do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
|
||||
do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
|
||||
srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
|
||||
mirror_x, mirror_y);
|
||||
}
|
||||
|
|
@ -314,7 +314,7 @@ try_blorp_blit(struct intel_context *intel,
|
|||
intel_renderbuffer(draw_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
|
||||
if (!formats_match(buffer_bit, src_irb, dst_irb))
|
||||
return false;
|
||||
do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
|
||||
do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
|
||||
srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
|
||||
mirror_x, mirror_y);
|
||||
break;
|
||||
|
|
@ -325,7 +325,7 @@ try_blorp_blit(struct intel_context *intel,
|
|||
intel_renderbuffer(draw_fb->Attachment[BUFFER_STENCIL].Renderbuffer);
|
||||
if (!formats_match(buffer_bit, src_irb, dst_irb))
|
||||
return false;
|
||||
do_blorp_blit(intel, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
|
||||
do_blorp_blit(brw, buffer_bit, src_irb, dst_irb, srcX0, srcY0,
|
||||
srcX1, srcY1, dstX0, dstY0, dstX1, dstY1,
|
||||
mirror_x, mirror_y);
|
||||
break;
|
||||
|
|
@ -337,7 +337,7 @@ try_blorp_blit(struct intel_context *intel,
|
|||
}
|
||||
|
||||
bool
|
||||
brw_blorp_copytexsubimage(struct intel_context *intel,
|
||||
brw_blorp_copytexsubimage(struct brw_context *brw,
|
||||
struct gl_renderbuffer *src_rb,
|
||||
struct gl_texture_image *dst_image,
|
||||
int slice,
|
||||
|
|
@ -345,6 +345,7 @@ brw_blorp_copytexsubimage(struct intel_context *intel,
|
|||
int dstX0, int dstY0,
|
||||
int width, int height)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb);
|
||||
struct intel_texture_image *intel_image = intel_texture_image(dst_image);
|
||||
|
|
@ -352,7 +353,7 @@ brw_blorp_copytexsubimage(struct intel_context *intel,
|
|||
/* Sync up the state of window system buffers. We need to do this before
|
||||
* we go looking at the src renderbuffer's miptree.
|
||||
*/
|
||||
intel_prepare_render(intel);
|
||||
intel_prepare_render(brw);
|
||||
|
||||
struct intel_mipmap_tree *src_mt = src_irb->mt;
|
||||
struct intel_mipmap_tree *dst_mt = intel_image->mt;
|
||||
|
|
@ -391,7 +392,7 @@ brw_blorp_copytexsubimage(struct intel_context *intel,
|
|||
mirror_y = true;
|
||||
}
|
||||
|
||||
brw_blorp_blit_miptrees(intel,
|
||||
brw_blorp_blit_miptrees(brw,
|
||||
src_mt, src_irb->mt_level, src_irb->mt_layer,
|
||||
dst_mt, dst_image->Level, dst_image->Face + slice,
|
||||
srcX0, srcY0, srcX1, srcY1,
|
||||
|
|
@ -414,7 +415,7 @@ brw_blorp_copytexsubimage(struct intel_context *intel,
|
|||
dst_mt = dst_mt->stencil_mt;
|
||||
|
||||
if (src_mt != dst_mt) {
|
||||
brw_blorp_blit_miptrees(intel,
|
||||
brw_blorp_blit_miptrees(brw,
|
||||
src_mt, src_irb->mt_level, src_irb->mt_layer,
|
||||
dst_mt, dst_image->Level,
|
||||
dst_image->Face + slice,
|
||||
|
|
@ -429,11 +430,13 @@ brw_blorp_copytexsubimage(struct intel_context *intel,
|
|||
|
||||
|
||||
GLbitfield
|
||||
brw_blorp_framebuffer(struct intel_context *intel,
|
||||
brw_blorp_framebuffer(struct brw_context *brw,
|
||||
GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
|
||||
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
|
||||
GLbitfield mask, GLenum filter)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
/* BLORP is not supported before Gen6. */
|
||||
if (intel->gen < 6)
|
||||
return mask;
|
||||
|
|
@ -446,7 +449,7 @@ brw_blorp_framebuffer(struct intel_context *intel,
|
|||
|
||||
for (unsigned int i = 0; i < ARRAY_SIZE(buffer_bits); ++i) {
|
||||
if ((mask & buffer_bits[i]) &&
|
||||
try_blorp_blit(intel,
|
||||
try_blorp_blit(brw,
|
||||
srcX0, srcY0, srcX1, srcY1,
|
||||
dstX0, dstY0, dstX1, dstY1,
|
||||
filter, buffer_bits[i])) {
|
||||
|
|
|
|||
|
|
@ -143,10 +143,11 @@ brw_blorp_const_color_program::~brw_blorp_const_color_program()
|
|||
* moment we only support floating point, unorm, and snorm buffers.
|
||||
*/
|
||||
static bool
|
||||
is_color_fast_clear_compatible(struct intel_context *intel,
|
||||
is_color_fast_clear_compatible(struct brw_context *brw,
|
||||
gl_format format,
|
||||
const union gl_color_union *color)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (_mesa_is_format_integer_color(format))
|
||||
return false;
|
||||
|
||||
|
|
@ -238,7 +239,7 @@ brw_blorp_clear_params::brw_blorp_clear_params(struct brw_context *brw,
|
|||
/* If we can do this as a fast color clear, do so. */
|
||||
if (irb->mt->mcs_state != INTEL_MCS_STATE_NONE && !partial_clear &&
|
||||
wm_prog_key.use_simd16_replicated_data &&
|
||||
is_color_fast_clear_compatible(intel, format, &ctx->Color.ClearColor)) {
|
||||
is_color_fast_clear_compatible(brw, format, &ctx->Color.ClearColor)) {
|
||||
memset(push_consts, 0xff, 4*sizeof(float));
|
||||
fast_clear_op = GEN7_FAST_CLEAR_OP_FAST_CLEAR;
|
||||
|
||||
|
|
@ -258,7 +259,7 @@ brw_blorp_clear_params::brw_blorp_clear_params(struct brw_context *brw,
|
|||
* with X alignment multiplied by 16 and Y alignment multiplied by 32.
|
||||
*/
|
||||
unsigned x_align, y_align;
|
||||
intel_get_non_msrt_mcs_alignment(intel, irb->mt, &x_align, &y_align);
|
||||
intel_get_non_msrt_mcs_alignment(brw, irb->mt, &x_align, &y_align);
|
||||
x_align *= 16;
|
||||
y_align *= 32;
|
||||
x0 = ROUND_DOWN_TO(x0, x_align);
|
||||
|
|
@ -303,7 +304,7 @@ brw_blorp_rt_resolve_params::brw_blorp_rt_resolve_params(
|
|||
* X and Y alignment each divided by 2.
|
||||
*/
|
||||
unsigned x_align, y_align;
|
||||
intel_get_non_msrt_mcs_alignment(&brw->intel, mt, &x_align, &y_align);
|
||||
intel_get_non_msrt_mcs_alignment(brw, mt, &x_align, &y_align);
|
||||
unsigned x_scaledown = x_align / 2;
|
||||
unsigned y_scaledown = y_align / 2;
|
||||
x0 = y0 = 0;
|
||||
|
|
@ -425,11 +426,10 @@ brw_blorp_const_color_program::compile(struct brw_context *brw,
|
|||
|
||||
extern "C" {
|
||||
bool
|
||||
brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
|
||||
brw_blorp_clear_color(struct brw_context *brw, struct gl_framebuffer *fb,
|
||||
bool partial_clear)
|
||||
{
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct gl_context *ctx = &brw->intel.ctx;
|
||||
|
||||
/* The constant color clear code doesn't work for multisampled surfaces, so
|
||||
* we need to support falling back to other clear mechanisms.
|
||||
|
|
@ -484,7 +484,7 @@ brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
|
|||
* it now.
|
||||
*/
|
||||
if (!irb->mt->mcs_mt) {
|
||||
if (!intel_miptree_alloc_non_msrt_mcs(intel, irb->mt)) {
|
||||
if (!intel_miptree_alloc_non_msrt_mcs(brw, irb->mt)) {
|
||||
/* MCS allocation failed--probably this will only happen in
|
||||
* out-of-memory conditions. But in any case, try to recover
|
||||
* by falling back to a non-blorp clear technique.
|
||||
|
|
@ -498,7 +498,7 @@ brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
|
|||
DBG("%s to mt %p level %d layer %d\n", __FUNCTION__,
|
||||
irb->mt, irb->mt_level, irb->mt_layer);
|
||||
|
||||
brw_blorp_exec(intel, ¶ms);
|
||||
brw_blorp_exec(brw, ¶ms);
|
||||
|
||||
if (is_fast_clear) {
|
||||
/* Now that the fast clear has occurred, put the buffer in
|
||||
|
|
@ -513,14 +513,12 @@ brw_blorp_clear_color(struct intel_context *intel, struct gl_framebuffer *fb,
|
|||
}
|
||||
|
||||
void
|
||||
brw_blorp_resolve_color(struct intel_context *intel, struct intel_mipmap_tree *mt)
|
||||
brw_blorp_resolve_color(struct brw_context *brw, struct intel_mipmap_tree *mt)
|
||||
{
|
||||
struct brw_context *brw = brw_context(&intel->ctx);
|
||||
|
||||
DBG("%s to mt %p\n", __FUNCTION__, mt);
|
||||
|
||||
brw_blorp_rt_resolve_params params(brw, mt);
|
||||
brw_blorp_exec(intel, ¶ms);
|
||||
brw_blorp_exec(brw, ¶ms);
|
||||
mt->mcs_state = INTEL_MCS_STATE_RESOLVED;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -104,6 +104,7 @@ noop_scissor(struct gl_context *ctx, struct gl_framebuffer *fb)
|
|||
static bool
|
||||
brw_fast_clear_depth(struct gl_context *ctx)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct gl_framebuffer *fb = ctx->DrawBuffer;
|
||||
struct intel_renderbuffer *depth_irb =
|
||||
|
|
@ -167,7 +168,7 @@ brw_fast_clear_depth(struct gl_context *ctx)
|
|||
* flags out of the HiZ buffer into the real depth buffer.
|
||||
*/
|
||||
if (mt->depth_clear_value != depth_clear_value) {
|
||||
intel_miptree_all_slices_resolve_depth(intel, mt);
|
||||
intel_miptree_all_slices_resolve_depth(brw, mt);
|
||||
mt->depth_clear_value = depth_clear_value;
|
||||
}
|
||||
|
||||
|
|
@ -178,9 +179,9 @@ brw_fast_clear_depth(struct gl_context *ctx)
|
|||
* must be issued before the rectangle primitive used for the depth
|
||||
* buffer clear operation.
|
||||
*/
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
|
||||
intel_hiz_exec(intel, mt, depth_irb->mt_level, depth_irb->mt_layer,
|
||||
intel_hiz_exec(brw, mt, depth_irb->mt_level, depth_irb->mt_layer,
|
||||
GEN6_HIZ_OP_DEPTH_CLEAR);
|
||||
|
||||
if (intel->gen == 6) {
|
||||
|
|
@ -190,7 +191,7 @@ brw_fast_clear_depth(struct gl_context *ctx)
|
|||
* by a PIPE_CONTROL command with DEPTH_STALL bit set and Then
|
||||
* followed by Depth FLUSH'
|
||||
*/
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
}
|
||||
|
||||
/* Now, the HiZ buffer contains data that needs to be resolved to the depth
|
||||
|
|
@ -219,7 +220,7 @@ brw_clear(struct gl_context *ctx, GLbitfield mask)
|
|||
intel->front_buffer_dirty = true;
|
||||
}
|
||||
|
||||
intel_prepare_render(intel);
|
||||
intel_prepare_render(brw);
|
||||
brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);
|
||||
|
||||
if (mask & BUFFER_BIT_DEPTH) {
|
||||
|
|
@ -232,7 +233,7 @@ brw_clear(struct gl_context *ctx, GLbitfield mask)
|
|||
/* BLORP is currently only supported on Gen6+. */
|
||||
if (intel->gen >= 6) {
|
||||
if (mask & BUFFER_BITS_COLOR) {
|
||||
if (brw_blorp_clear_color(intel, fb, partial_clear)) {
|
||||
if (brw_blorp_clear_color(brw, fb, partial_clear)) {
|
||||
debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
|
||||
mask &= ~BUFFER_BITS_COLOR;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -300,7 +300,7 @@ brwCreateContext(int api,
|
|||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
|
||||
if (!intelInitContext( intel, api, major_version, minor_version,
|
||||
if (!intelInitContext( brw, api, major_version, minor_version,
|
||||
mesaVis, driContextPriv,
|
||||
sharedContextPrivate, &functions,
|
||||
error)) {
|
||||
|
|
|
|||
|
|
@ -1157,8 +1157,8 @@ void gen6_init_queryobj_functions(struct dd_function_table *functions);
|
|||
/*======================================================================
|
||||
* brw_state_dump.c
|
||||
*/
|
||||
void brw_debug_batch(struct intel_context *intel);
|
||||
void brw_annotate_aub(struct intel_context *intel);
|
||||
void brw_debug_batch(struct brw_context *brw);
|
||||
void brw_annotate_aub(struct brw_context *brw);
|
||||
|
||||
/*======================================================================
|
||||
* brw_tex.c
|
||||
|
|
@ -1172,7 +1172,7 @@ void brw_validate_textures( struct brw_context *brw );
|
|||
void brwInitFragProgFuncs( struct dd_function_table *functions );
|
||||
|
||||
int brw_get_scratch_size(int size);
|
||||
void brw_get_scratch_bo(struct intel_context *intel,
|
||||
void brw_get_scratch_bo(struct brw_context *brw,
|
||||
drm_intel_bo **scratch_bo, int size);
|
||||
void brw_init_shader_time(struct brw_context *brw);
|
||||
int brw_get_shader_time_index(struct brw_context *brw,
|
||||
|
|
@ -1212,8 +1212,8 @@ void brw_upload_ubo_surfaces(struct brw_context *brw,
|
|||
uint32_t *surf_offsets);
|
||||
|
||||
/* brw_surface_formats.c */
|
||||
bool brw_is_hiz_depth_format(struct intel_context *ctx, gl_format format);
|
||||
bool brw_render_target_supported(struct intel_context *intel,
|
||||
bool brw_is_hiz_depth_format(struct brw_context *ctx, gl_format format);
|
||||
bool brw_render_target_supported(struct brw_context *brw,
|
||||
struct gl_renderbuffer *rb);
|
||||
|
||||
/* gen6_sol.c */
|
||||
|
|
@ -1234,13 +1234,13 @@ gen7_end_transform_feedback(struct gl_context *ctx,
|
|||
|
||||
/* brw_blorp_blit.cpp */
|
||||
GLbitfield
|
||||
brw_blorp_framebuffer(struct intel_context *intel,
|
||||
brw_blorp_framebuffer(struct brw_context *brw,
|
||||
GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
|
||||
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
|
||||
GLbitfield mask, GLenum filter);
|
||||
|
||||
bool
|
||||
brw_blorp_copytexsubimage(struct intel_context *intel,
|
||||
brw_blorp_copytexsubimage(struct brw_context *brw,
|
||||
struct gl_renderbuffer *src_rb,
|
||||
struct gl_texture_image *dst_image,
|
||||
int slice,
|
||||
|
|
@ -1338,7 +1338,7 @@ brw_program_reloc(struct brw_context *brw, uint32_t state_offset,
|
|||
}
|
||||
|
||||
bool brw_do_cubemap_normalize(struct exec_list *instructions);
|
||||
bool brw_lower_texture_gradients(struct intel_context *intel,
|
||||
bool brw_lower_texture_gradients(struct brw_context *brw,
|
||||
struct exec_list *instructions);
|
||||
|
||||
struct opcode_desc {
|
||||
|
|
|
|||
|
|
@ -146,8 +146,6 @@ const struct brw_tracked_state brw_curbe_offsets = {
|
|||
*/
|
||||
void brw_upload_cs_urb_state(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
/* It appears that this is the state packet for the CS unit, ie. the
|
||||
* urb entries detailed here are housed in the CS range from the
|
||||
|
|
|
|||
|
|
@ -196,7 +196,7 @@ static void brw_emit_prim(struct brw_context *brw,
|
|||
* the besides the draw code.
|
||||
*/
|
||||
if (intel->always_flush_cache) {
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
}
|
||||
|
||||
BEGIN_BATCH(6);
|
||||
|
|
@ -213,7 +213,7 @@ static void brw_emit_prim(struct brw_context *brw,
|
|||
intel->batch.need_workaround_flush = true;
|
||||
|
||||
if (intel->always_flush_cache) {
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -253,7 +253,7 @@ static void gen7_emit_prim(struct brw_context *brw,
|
|||
* the besides the draw code.
|
||||
*/
|
||||
if (intel->always_flush_cache) {
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
}
|
||||
|
||||
BEGIN_BATCH(7);
|
||||
|
|
@ -267,7 +267,7 @@ static void gen7_emit_prim(struct brw_context *brw,
|
|||
ADVANCE_BATCH();
|
||||
|
||||
if (intel->always_flush_cache) {
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -302,14 +302,13 @@ static void
|
|||
brw_predraw_resolve_buffers(struct brw_context *brw)
|
||||
{
|
||||
struct gl_context *ctx = &brw->intel.ctx;
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct intel_renderbuffer *depth_irb;
|
||||
struct intel_texture_object *tex_obj;
|
||||
|
||||
/* Resolve the depth buffer's HiZ buffer. */
|
||||
depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
|
||||
if (depth_irb)
|
||||
intel_renderbuffer_resolve_hiz(intel, depth_irb);
|
||||
intel_renderbuffer_resolve_hiz(brw, depth_irb);
|
||||
|
||||
/* Resolve depth buffer of each enabled depth texture, and color buffer of
|
||||
* each fast-clear-enabled color texture.
|
||||
|
|
@ -320,8 +319,8 @@ brw_predraw_resolve_buffers(struct brw_context *brw)
|
|||
tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
|
||||
if (!tex_obj || !tex_obj->mt)
|
||||
continue;
|
||||
intel_miptree_all_slices_resolve_depth(intel, tex_obj->mt);
|
||||
intel_miptree_resolve_color(intel, tex_obj->mt);
|
||||
intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
|
||||
intel_miptree_resolve_color(brw, tex_obj->mt);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -384,7 +383,7 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
|
|||
*/
|
||||
brw_validate_textures( brw );
|
||||
|
||||
intel_prepare_render(intel);
|
||||
intel_prepare_render(brw);
|
||||
|
||||
/* This workaround has to happen outside of brw_upload_state() because it
|
||||
* may flush the batchbuffer for a blit, affecting the state flags.
|
||||
|
|
@ -423,8 +422,8 @@ static bool brw_try_draw_prims( struct gl_context *ctx,
|
|||
* we've got validated state that needs to be in the same batch as the
|
||||
* primitives.
|
||||
*/
|
||||
intel_batchbuffer_require_space(intel, estimated_max_prim_size, false);
|
||||
intel_batchbuffer_save_state(intel);
|
||||
intel_batchbuffer_require_space(brw, estimated_max_prim_size, false);
|
||||
intel_batchbuffer_save_state(brw);
|
||||
|
||||
if (brw->num_instances != prim->num_instances) {
|
||||
brw->num_instances = prim->num_instances;
|
||||
|
|
@ -459,12 +458,12 @@ retry:
|
|||
|
||||
if (dri_bufmgr_check_aperture_space(&intel->batch.bo, 1)) {
|
||||
if (!fail_next) {
|
||||
intel_batchbuffer_reset_to_saved(intel);
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_reset_to_saved(brw);
|
||||
intel_batchbuffer_flush(brw);
|
||||
fail_next = true;
|
||||
goto retry;
|
||||
} else {
|
||||
if (intel_batchbuffer_flush(intel) == -ENOSPC) {
|
||||
if (intel_batchbuffer_flush(brw) == -ENOSPC) {
|
||||
static bool warned = false;
|
||||
|
||||
if (!warned) {
|
||||
|
|
@ -480,7 +479,7 @@ retry:
|
|||
}
|
||||
|
||||
if (intel->always_flush_batch)
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
brw_state_cache_check_size(brw);
|
||||
brw_postdraw_set_buffers_need_resolve(brw);
|
||||
|
|
|
|||
|
|
@ -223,9 +223,10 @@ static GLuint byte_types_scale[5] = {
|
|||
* Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
|
||||
*/
|
||||
static unsigned
|
||||
get_surface_type(struct intel_context *intel,
|
||||
get_surface_type(struct brw_context *brw,
|
||||
const struct gl_client_array *glarray)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
int size = glarray->Size;
|
||||
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_VERTS))
|
||||
|
|
@ -366,7 +367,7 @@ copy_array_to_vbo_array(struct brw_context *brw,
|
|||
* to replicate it out.
|
||||
*/
|
||||
if (src_stride == 0) {
|
||||
intel_upload_data(&brw->intel, element->glarray->Ptr,
|
||||
intel_upload_data(brw, element->glarray->Ptr,
|
||||
element->glarray->_ElementSize,
|
||||
element->glarray->_ElementSize,
|
||||
&buffer->bo, &buffer->offset);
|
||||
|
|
@ -380,10 +381,10 @@ copy_array_to_vbo_array(struct brw_context *brw,
|
|||
GLuint size = count * dst_stride;
|
||||
|
||||
if (dst_stride == src_stride) {
|
||||
intel_upload_data(&brw->intel, src, size, dst_stride,
|
||||
intel_upload_data(brw, src, size, dst_stride,
|
||||
&buffer->bo, &buffer->offset);
|
||||
} else {
|
||||
char * const map = intel_upload_map(&brw->intel, size, dst_stride);
|
||||
char * const map = intel_upload_map(brw, size, dst_stride);
|
||||
char *dst = map;
|
||||
|
||||
while (count--) {
|
||||
|
|
@ -391,7 +392,7 @@ copy_array_to_vbo_array(struct brw_context *brw,
|
|||
src += src_stride;
|
||||
dst += dst_stride;
|
||||
}
|
||||
intel_upload_unmap(&brw->intel, map, size, dst_stride,
|
||||
intel_upload_unmap(brw, map, size, dst_stride,
|
||||
&buffer->bo, &buffer->offset);
|
||||
}
|
||||
buffer->stride = dst_stride;
|
||||
|
|
@ -472,7 +473,7 @@ static void brw_prepare_vertices(struct brw_context *brw)
|
|||
struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
|
||||
|
||||
/* Named buffer object: Just reference its contents directly. */
|
||||
buffer->bo = intel_bufferobj_source(intel,
|
||||
buffer->bo = intel_bufferobj_source(brw,
|
||||
intel_buffer, 1,
|
||||
&buffer->offset);
|
||||
drm_intel_bo_reference(buffer->bo);
|
||||
|
|
@ -687,7 +688,7 @@ static void brw_emit_vertices(struct brw_context *brw)
|
|||
OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (2 * nr_elements - 1));
|
||||
for (i = 0; i < brw->vb.nr_enabled; i++) {
|
||||
struct brw_vertex_element *input = brw->vb.enabled[i];
|
||||
uint32_t format = get_surface_type(intel, input->glarray);
|
||||
uint32_t format = get_surface_type(brw, input->glarray);
|
||||
uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
|
||||
uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
|
||||
uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
|
||||
|
|
@ -748,7 +749,7 @@ static void brw_emit_vertices(struct brw_context *brw)
|
|||
}
|
||||
|
||||
if (intel->gen >= 6 && gen6_edgeflag_input) {
|
||||
uint32_t format = get_surface_type(intel, gen6_edgeflag_input->glarray);
|
||||
uint32_t format = get_surface_type(brw, gen6_edgeflag_input->glarray);
|
||||
|
||||
OUT_BATCH((gen6_edgeflag_input->buffer << GEN6_VE0_INDEX_SHIFT) |
|
||||
GEN6_VE0_VALID |
|
||||
|
|
@ -820,7 +821,7 @@ static void brw_upload_indices(struct brw_context *brw)
|
|||
|
||||
/* Get new bufferobj, offset:
|
||||
*/
|
||||
intel_upload_data(&brw->intel, index_buffer->ptr, ib_size, ib_type_size,
|
||||
intel_upload_data(brw, index_buffer->ptr, ib_size, ib_type_size,
|
||||
&bo, &offset);
|
||||
brw->ib.start_vertex_offset = offset / ib_type_size;
|
||||
} else {
|
||||
|
|
@ -839,8 +840,7 @@ static void brw_upload_indices(struct brw_context *brw)
|
|||
GL_MAP_READ_BIT,
|
||||
bufferobj);
|
||||
|
||||
intel_upload_data(&brw->intel, map, ib_size, ib_type_size,
|
||||
&bo, &offset);
|
||||
intel_upload_data(brw, map, ib_size, ib_type_size, &bo, &offset);
|
||||
brw->ib.start_vertex_offset = offset / ib_type_size;
|
||||
|
||||
ctx->Driver.UnmapBuffer(ctx, bufferobj);
|
||||
|
|
@ -851,7 +851,7 @@ static void brw_upload_indices(struct brw_context *brw)
|
|||
*/
|
||||
brw->ib.start_vertex_offset = offset / ib_type_size;
|
||||
|
||||
bo = intel_bufferobj_source(intel,
|
||||
bo = intel_bufferobj_source(brw,
|
||||
intel_buffer_object(bufferobj),
|
||||
ib_type_size,
|
||||
&offset);
|
||||
|
|
|
|||
|
|
@ -213,7 +213,7 @@ brw_init_compile(struct brw_context *brw, struct brw_compile *p, void *mem_ctx)
|
|||
p->loop_stack = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);
|
||||
p->if_depth_in_loop = rzalloc_array(mem_ctx, int, p->loop_stack_array_size);
|
||||
|
||||
brw_init_compaction_tables(&brw->intel);
|
||||
brw_init_compaction_tables(brw);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -230,7 +230,6 @@ void
|
|||
brw_dump_compile(struct brw_compile *p, FILE *out, int start, int end)
|
||||
{
|
||||
struct brw_context *brw = p->brw;
|
||||
struct intel_context *intel = &brw->intel;
|
||||
void *store = p->store;
|
||||
bool dump_hex = false;
|
||||
|
||||
|
|
@ -247,7 +246,7 @@ brw_dump_compile(struct brw_compile *p, FILE *out, int start, int end)
|
|||
((uint32_t *)insn)[0]);
|
||||
}
|
||||
|
||||
brw_uncompact_instruction(intel, &uncompacted, compacted);
|
||||
brw_uncompact_instruction(brw, &uncompacted, compacted);
|
||||
insn = &uncompacted;
|
||||
offset += 8;
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -393,16 +393,16 @@ void brw_set_uip_jip(struct brw_compile *p);
|
|||
uint32_t brw_swap_cmod(uint32_t cmod);
|
||||
|
||||
/* brw_eu_compact.c */
|
||||
void brw_init_compaction_tables(struct intel_context *intel);
|
||||
void brw_init_compaction_tables(struct brw_context *brw);
|
||||
void brw_compact_instructions(struct brw_compile *p);
|
||||
void brw_uncompact_instruction(struct intel_context *intel,
|
||||
void brw_uncompact_instruction(struct brw_context *brw,
|
||||
struct brw_instruction *dst,
|
||||
struct brw_compact_instruction *src);
|
||||
bool brw_try_compact_instruction(struct brw_compile *p,
|
||||
struct brw_compact_instruction *dst,
|
||||
struct brw_instruction *src);
|
||||
|
||||
void brw_debug_compact_uncompact(struct intel_context *intel,
|
||||
void brw_debug_compact_uncompact(struct brw_context *brw,
|
||||
struct brw_instruction *orig,
|
||||
struct brw_instruction *uncompacted);
|
||||
|
||||
|
|
|
|||
|
|
@ -326,10 +326,11 @@ static const uint32_t *subreg_table;
|
|||
static const uint32_t *src_index_table;
|
||||
|
||||
static bool
|
||||
set_control_index(struct intel_context *intel,
|
||||
set_control_index(struct brw_context *brw,
|
||||
struct brw_compact_instruction *dst,
|
||||
struct brw_instruction *src)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
uint32_t *src_u32 = (uint32_t *)src;
|
||||
uint32_t uncompacted = 0;
|
||||
|
||||
|
|
@ -473,7 +474,7 @@ brw_try_compact_instruction(struct brw_compile *p,
|
|||
|
||||
temp.dw0.opcode = src->header.opcode;
|
||||
temp.dw0.debug_control = src->header.debug_control;
|
||||
if (!set_control_index(intel, &temp, src))
|
||||
if (!set_control_index(brw, &temp, src))
|
||||
return false;
|
||||
if (!set_datatype_index(&temp, src))
|
||||
return false;
|
||||
|
|
@ -498,10 +499,11 @@ brw_try_compact_instruction(struct brw_compile *p,
|
|||
}
|
||||
|
||||
static void
|
||||
set_uncompacted_control(struct intel_context *intel,
|
||||
set_uncompacted_control(struct brw_context *brw,
|
||||
struct brw_instruction *dst,
|
||||
struct brw_compact_instruction *src)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
uint32_t *dst_u32 = (uint32_t *)dst;
|
||||
uint32_t uncompacted = control_index_table[src->dw0.control_index];
|
||||
|
||||
|
|
@ -555,16 +557,17 @@ set_uncompacted_src1(struct brw_instruction *dst,
|
|||
}
|
||||
|
||||
void
|
||||
brw_uncompact_instruction(struct intel_context *intel,
|
||||
brw_uncompact_instruction(struct brw_context *brw,
|
||||
struct brw_instruction *dst,
|
||||
struct brw_compact_instruction *src)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
memset(dst, 0, sizeof(*dst));
|
||||
|
||||
dst->header.opcode = src->dw0.opcode;
|
||||
dst->header.debug_control = src->dw0.debug_control;
|
||||
|
||||
set_uncompacted_control(intel, dst, src);
|
||||
set_uncompacted_control(brw, dst, src);
|
||||
set_uncompacted_datatype(dst, src);
|
||||
set_uncompacted_subreg(dst, src);
|
||||
dst->header.acc_wr_control = src->dw0.acc_wr_control;
|
||||
|
|
@ -578,10 +581,11 @@ brw_uncompact_instruction(struct intel_context *intel,
|
|||
dst->bits3.da1.src1_reg_nr = src->dw1.src1_reg_nr;
|
||||
}
|
||||
|
||||
void brw_debug_compact_uncompact(struct intel_context *intel,
|
||||
void brw_debug_compact_uncompact(struct brw_context *brw,
|
||||
struct brw_instruction *orig,
|
||||
struct brw_instruction *uncompacted)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
fprintf(stderr, "Instruction compact/uncompact changed (gen%d):\n",
|
||||
intel->gen);
|
||||
|
||||
|
|
@ -632,8 +636,9 @@ update_uip_jip(struct brw_instruction *insn, int this_old_ip,
|
|||
}
|
||||
|
||||
void
|
||||
brw_init_compaction_tables(struct intel_context *intel)
|
||||
brw_init_compaction_tables(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
assert(gen6_control_index_table[ARRAY_SIZE(gen6_control_index_table) - 1] != 0);
|
||||
assert(gen6_datatype_table[ARRAY_SIZE(gen6_datatype_table) - 1] != 0);
|
||||
assert(gen6_subreg_table[ARRAY_SIZE(gen6_subreg_table) - 1] != 0);
|
||||
|
|
@ -697,9 +702,9 @@ brw_compact_instructions(struct brw_compile *p)
|
|||
|
||||
if (INTEL_DEBUG) {
|
||||
struct brw_instruction uncompacted;
|
||||
brw_uncompact_instruction(intel, &uncompacted, dst);
|
||||
brw_uncompact_instruction(brw, &uncompacted, dst);
|
||||
if (memcmp(&saved, &uncompacted, sizeof(uncompacted))) {
|
||||
brw_debug_compact_uncompact(intel, &saved, &uncompacted);
|
||||
brw_debug_compact_uncompact(brw, &saved, &uncompacted);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -165,9 +165,10 @@ lower_texture_grad_visitor::visit_leave(ir_texture *ir)
|
|||
extern "C" {
|
||||
|
||||
bool
|
||||
brw_lower_texture_gradients(struct intel_context *intel,
|
||||
brw_lower_texture_gradients(struct brw_context *brw,
|
||||
struct exec_list *instructions)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
bool has_sample_d_c = intel->gen >= 8 || intel->is_haswell;
|
||||
lower_texture_grad_visitor v(has_sample_d_c);
|
||||
|
||||
|
|
|
|||
|
|
@ -76,8 +76,6 @@ const struct brw_tracked_state brw_drawing_rect = {
|
|||
*/
|
||||
static void upload_binding_table_pointers(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(6);
|
||||
OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 | (6 - 2));
|
||||
OUT_BATCH(brw->vs.bind_bo_offset);
|
||||
|
|
@ -110,8 +108,6 @@ const struct brw_tracked_state brw_binding_table_pointers = {
|
|||
*/
|
||||
static void upload_gen6_binding_table_pointers(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 |
|
||||
GEN6_BINDING_TABLE_MODIFY_VS |
|
||||
|
|
@ -398,7 +394,7 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
|
|||
perf_debug("HW workaround: blitting depth level %d to a temporary "
|
||||
"to fix alignment (depth tile offset %d,%d)\n",
|
||||
depth_irb->mt_level, tile_x, tile_y);
|
||||
intel_renderbuffer_move_to_temp(intel, depth_irb, invalidate_depth);
|
||||
intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
|
||||
/* In the case of stencil_irb being the same packed depth/stencil
|
||||
* texture but not the same rb, make it point at our rebased mt, too.
|
||||
*/
|
||||
|
|
@ -459,7 +455,7 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
|
|||
"to fix alignment (stencil tile offset %d,%d)\n",
|
||||
stencil_irb->mt_level, stencil_tile_x, stencil_tile_y);
|
||||
|
||||
intel_renderbuffer_move_to_temp(intel, stencil_irb, invalidate_stencil);
|
||||
intel_renderbuffer_move_to_temp(brw, stencil_irb, invalidate_stencil);
|
||||
stencil_mt = get_stencil_miptree(stencil_irb);
|
||||
|
||||
intel_miptree_get_image_offset(stencil_mt,
|
||||
|
|
@ -483,8 +479,7 @@ brw_workaround_depthstencil_alignment(struct brw_context *brw,
|
|||
tile_x, tile_y,
|
||||
stencil_tile_x, stencil_tile_y);
|
||||
|
||||
intel_renderbuffer_move_to_temp(intel, depth_irb,
|
||||
invalidate_depth);
|
||||
intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
|
||||
|
||||
tile_x = depth_irb->draw_x & tile_mask_x;
|
||||
tile_y = depth_irb->draw_y & tile_mask_y;
|
||||
|
|
@ -675,8 +670,8 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw,
|
|||
* non-pipelined state that will need the PIPE_CONTROL workaround.
|
||||
*/
|
||||
if (intel->gen == 6) {
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_depth_stall_flushes(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
intel_emit_depth_stall_flushes(brw);
|
||||
}
|
||||
|
||||
unsigned int len;
|
||||
|
|
@ -782,7 +777,7 @@ brw_emit_depth_stencil_hiz(struct brw_context *brw,
|
|||
*/
|
||||
if (intel->gen >= 6 || hiz) {
|
||||
if (intel->gen == 6)
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 |
|
||||
|
|
@ -819,7 +814,7 @@ static void upload_polygon_stipple(struct brw_context *brw)
|
|||
return;
|
||||
|
||||
if (intel->gen == 6)
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
|
||||
BEGIN_BATCH(33);
|
||||
OUT_BATCH(_3DSTATE_POLY_STIPPLE_PATTERN << 16 | (33 - 2));
|
||||
|
|
@ -867,7 +862,7 @@ static void upload_polygon_stipple_offset(struct brw_context *brw)
|
|||
return;
|
||||
|
||||
if (intel->gen == 6)
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH(_3DSTATE_POLY_STIPPLE_OFFSET << 16 | (2-2));
|
||||
|
|
@ -909,7 +904,7 @@ static void upload_aa_line_parameters(struct brw_context *brw)
|
|||
return;
|
||||
|
||||
if (intel->gen == 6)
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
|
||||
OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
|
||||
/* use legacy aa line coverage computation */
|
||||
|
|
@ -942,7 +937,7 @@ static void upload_line_stipple(struct brw_context *brw)
|
|||
return;
|
||||
|
||||
if (intel->gen == 6)
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
|
||||
BEGIN_BATCH(3);
|
||||
OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2));
|
||||
|
|
@ -985,7 +980,7 @@ brw_upload_invariant_state(struct brw_context *brw)
|
|||
|
||||
/* 3DSTATE_SIP, 3DSTATE_MULTISAMPLE, etc. are nonpipelined. */
|
||||
if (intel->gen == 6)
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
|
||||
/* Select the 3D pipeline (as opposed to media) */
|
||||
BEGIN_BATCH(1);
|
||||
|
|
@ -1045,7 +1040,7 @@ static void upload_state_base_address( struct brw_context *brw )
|
|||
|
||||
if (intel->gen >= 6) {
|
||||
if (intel->gen == 6)
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
|
||||
BEGIN_BATCH(10);
|
||||
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
|
||||
|
|
|
|||
|
|
@ -205,9 +205,10 @@ brw_get_scratch_size(int size)
|
|||
}
|
||||
|
||||
void
|
||||
brw_get_scratch_bo(struct intel_context *intel,
|
||||
brw_get_scratch_bo(struct brw_context *brw,
|
||||
drm_intel_bo **scratch_bo, int size)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
drm_intel_bo *old_bo = *scratch_bo;
|
||||
|
||||
if (old_bo && old_bo->size < size) {
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ struct brw_sampler_prog_key_data {
|
|||
void brw_populate_sampler_prog_key_data(struct gl_context *ctx,
|
||||
const struct gl_program *prog,
|
||||
struct brw_sampler_prog_key_data *key);
|
||||
bool brw_debug_recompile_sampler_key(struct intel_context *intel,
|
||||
bool brw_debug_recompile_sampler_key(struct brw_context *brw,
|
||||
const struct brw_sampler_prog_key_data *old_key,
|
||||
const struct brw_sampler_prog_key_data *key);
|
||||
void brw_add_texrect_params(struct gl_program *prog);
|
||||
|
|
|
|||
|
|
@ -47,8 +47,9 @@
|
|||
* Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
|
||||
*/
|
||||
static void
|
||||
write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
|
||||
write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (intel->gen >= 6) {
|
||||
/* Emit workaround flushes: */
|
||||
if (intel->gen == 6) {
|
||||
|
|
@ -92,8 +93,9 @@ write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
|
|||
* Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
|
||||
*/
|
||||
static void
|
||||
write_depth_count(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
|
||||
write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
assert(intel->gen < 6);
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
|
|
@ -120,6 +122,7 @@ static void
|
|||
brw_queryobj_get_results(struct gl_context *ctx,
|
||||
struct brw_query_object *query)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
|
||||
int i;
|
||||
|
|
@ -135,7 +138,7 @@ brw_queryobj_get_results(struct gl_context *ctx,
|
|||
* when mapped.
|
||||
*/
|
||||
if (drm_intel_bo_references(intel->batch.bo, query->bo))
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (unlikely(intel->perf_debug)) {
|
||||
if (drm_intel_bo_busy(query->bo)) {
|
||||
|
|
@ -270,7 +273,7 @@ brw_begin_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
*/
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
query->bo = drm_intel_bo_alloc(intel->bufmgr, "timer query", 4096, 4096);
|
||||
write_timestamp(intel, query->bo, 0);
|
||||
write_timestamp(brw, query->bo, 0);
|
||||
break;
|
||||
|
||||
case GL_ANY_SAMPLES_PASSED:
|
||||
|
|
@ -323,7 +326,7 @@ brw_end_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
switch (query->Base.Target) {
|
||||
case GL_TIME_ELAPSED_EXT:
|
||||
/* Write the final timestamp. */
|
||||
write_timestamp(intel, query->bo, 1);
|
||||
write_timestamp(brw, query->bo, 1);
|
||||
break;
|
||||
|
||||
case GL_ANY_SAMPLES_PASSED:
|
||||
|
|
@ -386,6 +389,7 @@ static void brw_wait_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
*/
|
||||
static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_query_object *query = (struct brw_query_object *)q;
|
||||
|
||||
|
|
@ -399,7 +403,7 @@ static void brw_check_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
* the async query will return true in finite time.
|
||||
*/
|
||||
if (query->bo && drm_intel_bo_references(intel->batch.bo, query->bo))
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
|
||||
brw_queryobj_get_results(ctx, query);
|
||||
|
|
@ -473,7 +477,7 @@ brw_emit_query_begin(struct brw_context *brw)
|
|||
|
||||
ensure_bo_has_space(ctx, query);
|
||||
|
||||
write_depth_count(intel, query->bo, query->last_index * 2);
|
||||
write_depth_count(brw, query->bo, query->last_index * 2);
|
||||
|
||||
brw->query.begin_emitted = true;
|
||||
}
|
||||
|
|
@ -496,7 +500,7 @@ brw_emit_query_end(struct brw_context *brw)
|
|||
if (!brw->query.begin_emitted)
|
||||
return;
|
||||
|
||||
write_depth_count(intel, query->bo, query->last_index * 2 + 1);
|
||||
write_depth_count(brw, query->bo, query->last_index * 2 + 1);
|
||||
|
||||
brw->query.begin_emitted = false;
|
||||
query->last_index++;
|
||||
|
|
@ -512,6 +516,7 @@ brw_emit_query_end(struct brw_context *brw)
|
|||
static void
|
||||
brw_query_counter(struct gl_context *ctx, struct gl_query_object *q)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_query_object *query = (struct brw_query_object *) q;
|
||||
|
||||
|
|
@ -519,7 +524,7 @@ brw_query_counter(struct gl_context *ctx, struct gl_query_object *q)
|
|||
|
||||
drm_intel_bo_unreference(query->bo);
|
||||
query->bo = drm_intel_bo_alloc(intel->bufmgr, "timestamp query", 4096, 4096);
|
||||
write_timestamp(intel, query->bo, 0);
|
||||
write_timestamp(brw, query->bo, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -59,8 +59,10 @@ static bool debug = false;
|
|||
class schedule_node : public exec_node
|
||||
{
|
||||
public:
|
||||
schedule_node(backend_instruction *inst, const struct intel_context *intel)
|
||||
schedule_node(backend_instruction *inst, const struct brw_context *brw)
|
||||
{
|
||||
const struct intel_context *intel = &brw->intel;
|
||||
|
||||
this->inst = inst;
|
||||
this->child_array_size = 0;
|
||||
this->children = NULL;
|
||||
|
|
@ -428,7 +430,7 @@ vec4_instruction_scheduler::vec4_instruction_scheduler(vec4_visitor *v,
|
|||
void
|
||||
instruction_scheduler::add_inst(backend_instruction *inst)
|
||||
{
|
||||
schedule_node *n = new(mem_ctx) schedule_node(inst, bv->intel);
|
||||
schedule_node *n = new(mem_ctx) schedule_node(inst, bv->brw);
|
||||
|
||||
assert(!inst->is_head_sentinel());
|
||||
assert(!inst->is_tail_sentinel());
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
|
|||
lower_if_to_cond_assign(shader->ir, 16);
|
||||
|
||||
do_lower_texture_projection(shader->ir);
|
||||
brw_lower_texture_gradients(intel, shader->ir);
|
||||
brw_lower_texture_gradients(brw, shader->ir);
|
||||
do_vec_index_to_cond_assign(shader->ir);
|
||||
lower_vector_insert(shader->ir, true);
|
||||
brw_do_cubemap_normalize(shader->ir);
|
||||
|
|
|
|||
|
|
@ -160,7 +160,7 @@ void brw_destroy_caches( struct brw_context *brw );
|
|||
/***********************************************************************
|
||||
* brw_state_batch.c
|
||||
*/
|
||||
#define BRW_BATCH_STRUCT(brw, s) intel_batchbuffer_data(&brw->intel, (s), \
|
||||
#define BRW_BATCH_STRUCT(brw, s) intel_batchbuffer_data(brw, (s), \
|
||||
sizeof(*(s)), false)
|
||||
|
||||
void *brw_state_batch(struct brw_context *brw,
|
||||
|
|
@ -178,7 +178,7 @@ uint32_t brw_format_for_mesa_format(gl_format mesa_format);
|
|||
|
||||
GLuint translate_tex_target(GLenum target);
|
||||
|
||||
GLuint translate_tex_format(struct intel_context *intel,
|
||||
GLuint translate_tex_format(struct brw_context *brw,
|
||||
gl_format mesa_format,
|
||||
GLenum depth_mode,
|
||||
GLenum srgb_decode);
|
||||
|
|
|
|||
|
|
@ -79,9 +79,9 @@ make_annotation(drm_intel_aub_annotation *annotation, uint32_t type,
|
|||
* is annotated according to the type of each data structure.
|
||||
*/
|
||||
void
|
||||
brw_annotate_aub(struct intel_context *intel)
|
||||
brw_annotate_aub(struct brw_context *brw)
|
||||
{
|
||||
struct brw_context *brw = brw_context(&intel->ctx);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
unsigned annotation_count = 2 * brw->state_batch_count + 1;
|
||||
drm_intel_aub_annotation annotations[annotation_count];
|
||||
|
|
@ -135,7 +135,7 @@ brw_state_batch(struct brw_context *brw,
|
|||
*/
|
||||
if (batch->state_batch_offset < size ||
|
||||
offset < 4*batch->used + batch->reserved_space) {
|
||||
intel_batchbuffer_flush(&brw->intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -351,7 +351,6 @@ brw_init_caches(struct brw_context *brw)
|
|||
static void
|
||||
brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct brw_cache_item *c, *next;
|
||||
GLuint i;
|
||||
|
||||
|
|
@ -383,7 +382,7 @@ brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
|
|||
brw->state.dirty.mesa |= ~0;
|
||||
brw->state.dirty.brw |= ~0;
|
||||
brw->state.dirty.cache |= ~0;
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
|||
|
|
@ -641,9 +641,9 @@ dump_state_batch(struct brw_context *brw)
|
|||
* The buffer offsets printed rely on the buffer containing the last offset
|
||||
* it was validated at.
|
||||
*/
|
||||
void brw_debug_batch(struct intel_context *intel)
|
||||
void brw_debug_batch(struct brw_context *brw)
|
||||
{
|
||||
struct brw_context *brw = brw_context(&intel->ctx);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
drm_intel_bo_map(intel->batch.bo, false);
|
||||
dump_state_batch(brw);
|
||||
|
|
|
|||
|
|
@ -478,7 +478,7 @@ void brw_upload_state(struct brw_context *brw)
|
|||
if ((state->mesa | state->cache | state->brw) == 0)
|
||||
return;
|
||||
|
||||
intel_check_front_buffer_rendering(intel);
|
||||
intel_check_front_buffer_rendering(brw);
|
||||
|
||||
if (unlikely(INTEL_DEBUG)) {
|
||||
/* Debug version which enforces various sanity checks on the
|
||||
|
|
|
|||
|
|
@ -649,10 +649,10 @@ brw_init_surface_formats(struct brw_context *brw)
|
|||
}
|
||||
|
||||
bool
|
||||
brw_render_target_supported(struct intel_context *intel,
|
||||
brw_render_target_supported(struct brw_context *brw,
|
||||
struct gl_renderbuffer *rb)
|
||||
{
|
||||
struct brw_context *brw = brw_context(&intel->ctx);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
gl_format format = rb->Format;
|
||||
|
||||
/* Many integer formats are promoted to RGBA (like XRGB8888 is), which means
|
||||
|
|
@ -683,12 +683,13 @@ brw_render_target_supported(struct intel_context *intel,
|
|||
}
|
||||
|
||||
GLuint
|
||||
translate_tex_format(struct intel_context *intel,
|
||||
translate_tex_format(struct brw_context *brw,
|
||||
gl_format mesa_format,
|
||||
GLenum depth_mode,
|
||||
GLenum srgb_decode)
|
||||
{
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_context *ctx = &brw->intel.ctx;
|
||||
if (srgb_decode == GL_SKIP_DECODE_EXT)
|
||||
mesa_format = _mesa_get_srgb_format_linear(mesa_format);
|
||||
|
||||
|
|
@ -732,8 +733,9 @@ translate_tex_format(struct intel_context *intel,
|
|||
|
||||
/** Can HiZ be enabled on a depthbuffer of the given format? */
|
||||
bool
|
||||
brw_is_hiz_depth_format(struct intel_context *intel, gl_format format)
|
||||
brw_is_hiz_depth_format(struct brw_context *brw, gl_format format)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (!intel->has_hiz)
|
||||
return false;
|
||||
|
||||
|
|
|
|||
|
|
@ -45,14 +45,13 @@
|
|||
void brw_validate_textures( struct brw_context *brw )
|
||||
{
|
||||
struct gl_context *ctx = &brw->intel.ctx;
|
||||
struct intel_context *intel = &brw->intel;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
|
||||
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
|
||||
|
||||
if (texUnit->_ReallyEnabled) {
|
||||
intel_finalize_mipmap_tree(intel, i);
|
||||
intel_finalize_mipmap_tree(brw, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,9 +39,10 @@
|
|||
#define FILE_DEBUG_FLAG DEBUG_MIPTREE
|
||||
|
||||
static unsigned int
|
||||
intel_horizontal_texture_alignment_unit(struct intel_context *intel,
|
||||
intel_horizontal_texture_alignment_unit(struct brw_context *brw,
|
||||
gl_format format)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
/**
|
||||
* From the "Alignment Unit Size" section of various specs, namely:
|
||||
* - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
|
||||
|
|
@ -93,9 +94,10 @@ intel_horizontal_texture_alignment_unit(struct intel_context *intel,
|
|||
}
|
||||
|
||||
static unsigned int
|
||||
intel_vertical_texture_alignment_unit(struct intel_context *intel,
|
||||
intel_vertical_texture_alignment_unit(struct brw_context *brw,
|
||||
gl_format format)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
/**
|
||||
* From the "Alignment Unit Size" section of various specs, namely:
|
||||
* - Gen3 Spec: "Memory Data Formats" Volume, Section 1.20.1.4
|
||||
|
|
@ -205,9 +207,10 @@ brw_miptree_layout_2d(struct intel_mipmap_tree *mt)
|
|||
}
|
||||
|
||||
static void
|
||||
brw_miptree_layout_texture_array(struct intel_context *intel,
|
||||
brw_miptree_layout_texture_array(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
unsigned qpitch = 0;
|
||||
int h0, h1;
|
||||
|
||||
|
|
@ -231,7 +234,7 @@ brw_miptree_layout_texture_array(struct intel_context *intel,
|
|||
}
|
||||
|
||||
static void
|
||||
brw_miptree_layout_texture_3d(struct intel_context *intel,
|
||||
brw_miptree_layout_texture_3d(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt)
|
||||
{
|
||||
unsigned width = mt->physical_width0;
|
||||
|
|
@ -309,39 +312,40 @@ brw_miptree_layout_texture_3d(struct intel_context *intel,
|
|||
}
|
||||
|
||||
void
|
||||
brw_miptree_layout(struct intel_context *intel, struct intel_mipmap_tree *mt)
|
||||
brw_miptree_layout(struct brw_context *brw, struct intel_mipmap_tree *mt)
|
||||
{
|
||||
mt->align_w = intel_horizontal_texture_alignment_unit(intel, mt->format);
|
||||
mt->align_h = intel_vertical_texture_alignment_unit(intel, mt->format);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
mt->align_w = intel_horizontal_texture_alignment_unit(brw, mt->format);
|
||||
mt->align_h = intel_vertical_texture_alignment_unit(brw, mt->format);
|
||||
|
||||
switch (mt->target) {
|
||||
case GL_TEXTURE_CUBE_MAP:
|
||||
if (intel->gen == 4) {
|
||||
/* Gen4 stores cube maps as 3D textures. */
|
||||
assert(mt->physical_depth0 == 6);
|
||||
brw_miptree_layout_texture_3d(intel, mt);
|
||||
brw_miptree_layout_texture_3d(brw, mt);
|
||||
} else {
|
||||
/* All other hardware stores cube maps as 2D arrays. */
|
||||
brw_miptree_layout_texture_array(intel, mt);
|
||||
brw_miptree_layout_texture_array(brw, mt);
|
||||
}
|
||||
break;
|
||||
|
||||
case GL_TEXTURE_3D:
|
||||
brw_miptree_layout_texture_3d(intel, mt);
|
||||
brw_miptree_layout_texture_3d(brw, mt);
|
||||
break;
|
||||
|
||||
case GL_TEXTURE_1D_ARRAY:
|
||||
case GL_TEXTURE_2D_ARRAY:
|
||||
case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
|
||||
case GL_TEXTURE_CUBE_MAP_ARRAY:
|
||||
brw_miptree_layout_texture_array(intel, mt);
|
||||
brw_miptree_layout_texture_array(brw, mt);
|
||||
break;
|
||||
|
||||
default:
|
||||
switch (mt->msaa_layout) {
|
||||
case INTEL_MSAA_LAYOUT_UMS:
|
||||
case INTEL_MSAA_LAYOUT_CMS:
|
||||
brw_miptree_layout_texture_array(intel, mt);
|
||||
brw_miptree_layout_texture_array(brw, mt);
|
||||
break;
|
||||
case INTEL_MSAA_LAYOUT_NONE:
|
||||
case INTEL_MSAA_LAYOUT_IMS:
|
||||
|
|
|
|||
|
|
@ -317,7 +317,7 @@ do_vs_prog(struct brw_context *brw,
|
|||
prog_data.base.total_scratch
|
||||
= brw_get_scratch_size(c.base.last_scratch*REG_SIZE);
|
||||
|
||||
brw_get_scratch_bo(intel, &brw->vs.scratch_bo,
|
||||
brw_get_scratch_bo(brw, &brw->vs.scratch_bo,
|
||||
prog_data.base.total_scratch * brw->max_vs_threads);
|
||||
}
|
||||
|
||||
|
|
@ -332,8 +332,9 @@ do_vs_prog(struct brw_context *brw,
|
|||
}
|
||||
|
||||
static bool
|
||||
key_debug(struct intel_context *intel, const char *name, int a, int b)
|
||||
key_debug(struct brw_context *brw, const char *name, int a, int b)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (a != b) {
|
||||
perf_debug(" %s %d->%d\n", name, a, b);
|
||||
return true;
|
||||
|
|
@ -373,31 +374,31 @@ brw_vs_debug_recompile(struct brw_context *brw,
|
|||
}
|
||||
|
||||
for (unsigned int i = 0; i < VERT_ATTRIB_MAX; i++) {
|
||||
found |= key_debug(intel, "Vertex attrib w/a flags",
|
||||
found |= key_debug(brw, "Vertex attrib w/a flags",
|
||||
old_key->gl_attrib_wa_flags[i],
|
||||
key->gl_attrib_wa_flags[i]);
|
||||
}
|
||||
|
||||
found |= key_debug(intel, "user clip flags",
|
||||
found |= key_debug(brw, "user clip flags",
|
||||
old_key->base.userclip_active, key->base.userclip_active);
|
||||
|
||||
found |= key_debug(intel, "user clipping planes as push constants",
|
||||
found |= key_debug(brw, "user clipping planes as push constants",
|
||||
old_key->base.nr_userclip_plane_consts,
|
||||
key->base.nr_userclip_plane_consts);
|
||||
|
||||
found |= key_debug(intel, "clip distance enable",
|
||||
found |= key_debug(brw, "clip distance enable",
|
||||
old_key->base.uses_clip_distance, key->base.uses_clip_distance);
|
||||
found |= key_debug(intel, "clip plane enable bitfield",
|
||||
found |= key_debug(brw, "clip plane enable bitfield",
|
||||
old_key->base.userclip_planes_enabled_gen_4_5,
|
||||
key->base.userclip_planes_enabled_gen_4_5);
|
||||
found |= key_debug(intel, "copy edgeflag",
|
||||
found |= key_debug(brw, "copy edgeflag",
|
||||
old_key->copy_edgeflag, key->copy_edgeflag);
|
||||
found |= key_debug(intel, "PointCoord replace",
|
||||
found |= key_debug(brw, "PointCoord replace",
|
||||
old_key->point_coord_replace, key->point_coord_replace);
|
||||
found |= key_debug(intel, "vertex color clamping",
|
||||
found |= key_debug(brw, "vertex color clamping",
|
||||
old_key->base.clamp_vertex_color, key->base.clamp_vertex_color);
|
||||
|
||||
found |= brw_debug_recompile_sampler_key(intel, &old_key->base.tex,
|
||||
found |= brw_debug_recompile_sampler_key(brw, &old_key->base.tex,
|
||||
&key->base.tex);
|
||||
|
||||
if (!found) {
|
||||
|
|
|
|||
|
|
@ -66,10 +66,10 @@ dri_bo_release(drm_intel_bo **bo)
|
|||
/**
|
||||
* called from intelDestroyContext()
|
||||
*/
|
||||
static void brw_destroy_context( struct intel_context *intel )
|
||||
static void
|
||||
brw_destroy_context(struct brw_context *brw)
|
||||
{
|
||||
struct brw_context *brw = brw_context(&intel->ctx);
|
||||
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
|
||||
/* Force a report. */
|
||||
brw->shader_time.report_time = 0;
|
||||
|
|
@ -99,9 +99,9 @@ static void brw_destroy_context( struct intel_context *intel )
|
|||
* at the end of a batchbuffer. If you add more GPU state, increase
|
||||
* the BATCH_RESERVED macro.
|
||||
*/
|
||||
static void brw_finish_batch(struct intel_context *intel)
|
||||
static void
|
||||
brw_finish_batch(struct brw_context *brw)
|
||||
{
|
||||
struct brw_context *brw = brw_context(&intel->ctx);
|
||||
brw_emit_query_end(brw);
|
||||
|
||||
if (brw->curbe.curbe_bo) {
|
||||
|
|
@ -115,9 +115,10 @@ static void brw_finish_batch(struct intel_context *intel)
|
|||
/**
|
||||
* called from intelFlushBatchLocked
|
||||
*/
|
||||
static void brw_new_batch( struct intel_context *intel )
|
||||
static void
|
||||
brw_new_batch(struct brw_context *brw)
|
||||
{
|
||||
struct brw_context *brw = brw_context(&intel->ctx);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
/* If the kernel supports hardware contexts, then most hardware state is
|
||||
* preserved between batches; we only need to re-emit state that is required
|
||||
|
|
|
|||
|
|
@ -184,7 +184,7 @@ bool do_wm_prog(struct brw_context *brw,
|
|||
|
||||
c->prog_data.total_scratch = brw_get_scratch_size(c->last_scratch);
|
||||
|
||||
brw_get_scratch_bo(intel, &brw->wm.scratch_bo,
|
||||
brw_get_scratch_bo(brw, &brw->wm.scratch_bo,
|
||||
c->prog_data.total_scratch * brw->max_wm_threads);
|
||||
}
|
||||
|
||||
|
|
@ -203,8 +203,9 @@ bool do_wm_prog(struct brw_context *brw,
|
|||
}
|
||||
|
||||
static bool
|
||||
key_debug(struct intel_context *intel, const char *name, int a, int b)
|
||||
key_debug(struct brw_context *brw, const char *name, int a, int b)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (a != b) {
|
||||
perf_debug(" %s %d->%d\n", name, a, b);
|
||||
return true;
|
||||
|
|
@ -214,25 +215,25 @@ key_debug(struct intel_context *intel, const char *name, int a, int b)
|
|||
}
|
||||
|
||||
bool
|
||||
brw_debug_recompile_sampler_key(struct intel_context *intel,
|
||||
brw_debug_recompile_sampler_key(struct brw_context *brw,
|
||||
const struct brw_sampler_prog_key_data *old_key,
|
||||
const struct brw_sampler_prog_key_data *key)
|
||||
{
|
||||
bool found = false;
|
||||
|
||||
for (unsigned int i = 0; i < MAX_SAMPLERS; i++) {
|
||||
found |= key_debug(intel, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
|
||||
found |= key_debug(brw, "EXT_texture_swizzle or DEPTH_TEXTURE_MODE",
|
||||
old_key->swizzles[i], key->swizzles[i]);
|
||||
}
|
||||
found |= key_debug(intel, "GL_CLAMP enabled on any texture unit's 1st coordinate",
|
||||
found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 1st coordinate",
|
||||
old_key->gl_clamp_mask[0], key->gl_clamp_mask[0]);
|
||||
found |= key_debug(intel, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
|
||||
found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 2nd coordinate",
|
||||
old_key->gl_clamp_mask[1], key->gl_clamp_mask[1]);
|
||||
found |= key_debug(intel, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
|
||||
found |= key_debug(brw, "GL_CLAMP enabled on any texture unit's 3rd coordinate",
|
||||
old_key->gl_clamp_mask[2], key->gl_clamp_mask[2]);
|
||||
found |= key_debug(intel, "GL_MESA_ycbcr texturing\n",
|
||||
found |= key_debug(brw, "GL_MESA_ycbcr texturing\n",
|
||||
old_key->yuvtex_mask, key->yuvtex_mask);
|
||||
found |= key_debug(intel, "GL_MESA_ycbcr UV swapping\n",
|
||||
found |= key_debug(brw, "GL_MESA_ycbcr UV swapping\n",
|
||||
old_key->yuvtex_swap_mask, key->yuvtex_swap_mask);
|
||||
|
||||
return found;
|
||||
|
|
@ -268,29 +269,29 @@ brw_wm_debug_recompile(struct brw_context *brw,
|
|||
return;
|
||||
}
|
||||
|
||||
found |= key_debug(intel, "alphatest, computed depth, depth test, or "
|
||||
found |= key_debug(brw, "alphatest, computed depth, depth test, or "
|
||||
"depth write",
|
||||
old_key->iz_lookup, key->iz_lookup);
|
||||
found |= key_debug(intel, "depth statistics",
|
||||
found |= key_debug(brw, "depth statistics",
|
||||
old_key->stats_wm, key->stats_wm);
|
||||
found |= key_debug(intel, "flat shading",
|
||||
found |= key_debug(brw, "flat shading",
|
||||
old_key->flat_shade, key->flat_shade);
|
||||
found |= key_debug(intel, "number of color buffers",
|
||||
found |= key_debug(brw, "number of color buffers",
|
||||
old_key->nr_color_regions, key->nr_color_regions);
|
||||
found |= key_debug(intel, "MRT alpha test or alpha-to-coverage",
|
||||
found |= key_debug(brw, "MRT alpha test or alpha-to-coverage",
|
||||
old_key->replicate_alpha, key->replicate_alpha);
|
||||
found |= key_debug(intel, "rendering to FBO",
|
||||
found |= key_debug(brw, "rendering to FBO",
|
||||
old_key->render_to_fbo, key->render_to_fbo);
|
||||
found |= key_debug(intel, "fragment color clamping",
|
||||
found |= key_debug(brw, "fragment color clamping",
|
||||
old_key->clamp_fragment_color, key->clamp_fragment_color);
|
||||
found |= key_debug(intel, "line smoothing",
|
||||
found |= key_debug(brw, "line smoothing",
|
||||
old_key->line_aa, key->line_aa);
|
||||
found |= key_debug(intel, "renderbuffer height",
|
||||
found |= key_debug(brw, "renderbuffer height",
|
||||
old_key->drawable_height, key->drawable_height);
|
||||
found |= key_debug(intel, "input slots valid",
|
||||
found |= key_debug(brw, "input slots valid",
|
||||
old_key->input_slots_valid, key->input_slots_valid);
|
||||
|
||||
found |= brw_debug_recompile_sampler_key(intel, &old_key->tex, &key->tex);
|
||||
found |= brw_debug_recompile_sampler_key(brw, &old_key->tex, &key->tex);
|
||||
|
||||
if (!found) {
|
||||
perf_debug(" Something else\n");
|
||||
|
|
|
|||
|
|
@ -251,7 +251,6 @@ brw_update_texture_surface(struct gl_context *ctx,
|
|||
uint32_t *binding_table,
|
||||
unsigned surf_index)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
|
||||
struct intel_texture_object *intelObj = intel_texture_object(tObj);
|
||||
|
|
@ -272,7 +271,7 @@ brw_update_texture_surface(struct gl_context *ctx,
|
|||
surf[0] = (translate_tex_target(tObj->Target) << BRW_SURFACE_TYPE_SHIFT |
|
||||
BRW_SURFACE_MIPMAPLAYOUT_BELOW << BRW_SURFACE_MIPLAYOUT_SHIFT |
|
||||
BRW_SURFACE_CUBEFACE_ENABLES |
|
||||
(translate_tex_format(intel,
|
||||
(translate_tex_format(brw,
|
||||
mt->format,
|
||||
tObj->DepthMode,
|
||||
sampler->sRGBDecode) <<
|
||||
|
|
@ -374,8 +373,7 @@ brw_update_sol_surface(struct brw_context *brw,
|
|||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
|
||||
drm_intel_bo *bo =
|
||||
intel_bufferobj_buffer(intel, intel_bo, INTEL_WRITE_PART);
|
||||
drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo, INTEL_WRITE_PART);
|
||||
uint32_t *surf = brw_state_batch(brw, AUB_TRACE_SURFACE_STATE, 6 * 4, 32,
|
||||
out_offset);
|
||||
uint32_t pitch_minus_1 = 4*stride_dwords - 1;
|
||||
|
|
@ -561,7 +559,7 @@ brw_update_null_renderbuffer_surface(struct brw_context *brw, unsigned int unit)
|
|||
unsigned width_in_tiles = ALIGN(fb->Width, 16) / 16;
|
||||
unsigned height_in_tiles = ALIGN(fb->Height, 16) / 16;
|
||||
unsigned size_needed = (width_in_tiles + height_in_tiles - 1) * 4096;
|
||||
brw_get_scratch_bo(intel, &brw->wm.multisampled_null_render_target_bo,
|
||||
brw_get_scratch_bo(brw, &brw->wm.multisampled_null_render_target_bo,
|
||||
size_needed);
|
||||
bo = brw->wm.multisampled_null_render_target_bo;
|
||||
surface_type = BRW_SURFACE_2D;
|
||||
|
|
@ -634,7 +632,7 @@ brw_update_renderbuffer_surface(struct brw_context *brw,
|
|||
* select the image. So, instead, we just make a new single-level
|
||||
* miptree and render into that.
|
||||
*/
|
||||
intel_renderbuffer_move_to_temp(intel, irb, false);
|
||||
intel_renderbuffer_move_to_temp(brw, irb, false);
|
||||
mt = irb->mt;
|
||||
}
|
||||
}
|
||||
|
|
@ -831,7 +829,7 @@ brw_upload_ubo_surfaces(struct brw_context *brw,
|
|||
|
||||
binding = &ctx->UniformBufferBindings[shader->UniformBlocks[i].Binding];
|
||||
intel_bo = intel_buffer_object(binding->BufferObject);
|
||||
drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_bo, INTEL_READ);
|
||||
drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_bo, INTEL_READ);
|
||||
|
||||
/* Because behavior for referencing outside of the binding's size in the
|
||||
* glBindBufferRange case is undefined, we can just bind the whole buffer
|
||||
|
|
|
|||
|
|
@ -231,8 +231,6 @@ static void
|
|||
gen6_blorp_emit_urb_config(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(3);
|
||||
OUT_BATCH(_3DSTATE_URB << 16 | (3 - 2));
|
||||
OUT_BATCH(brw->urb.max_vs_entries << GEN6_URB_VS_ENTRIES_SHIFT);
|
||||
|
|
@ -351,8 +349,6 @@ gen6_blorp_emit_cc_state_pointers(struct brw_context *brw,
|
|||
uint32_t depthstencil_offset,
|
||||
uint32_t cc_state_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2));
|
||||
OUT_BATCH(cc_blend_state_offset | 1); /* BLEND_STATE offset */
|
||||
|
|
@ -539,8 +535,6 @@ gen6_blorp_emit_sampler_state_pointers(struct brw_context *brw,
|
|||
const brw_blorp_params *params,
|
||||
uint32_t sampler_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS << 16 |
|
||||
VS_SAMPLER_STATE_CHANGE |
|
||||
|
|
@ -573,7 +567,7 @@ gen6_blorp_emit_vs_disable(struct brw_context *brw,
|
|||
* toggle. Pipeline flush can be executed by sending a PIPE_CONTROL
|
||||
* command with CS stall bit set and a post sync operation.
|
||||
*/
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
}
|
||||
|
||||
/* Disable the push constant buffers. */
|
||||
|
|
@ -604,8 +598,6 @@ void
|
|||
gen6_blorp_emit_gs_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
/* Disable all the constant buffers. */
|
||||
BEGIN_BATCH(5);
|
||||
OUT_BATCH(_3DSTATE_CONSTANT_GS << 16 | (5 - 2));
|
||||
|
|
@ -645,8 +637,6 @@ void
|
|||
gen6_blorp_emit_clip_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(_3DSTATE_CLIP << 16 | (4 - 2));
|
||||
OUT_BATCH(0);
|
||||
|
|
@ -678,8 +668,6 @@ static void
|
|||
gen6_blorp_emit_sf_config(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(20);
|
||||
OUT_BATCH(_3DSTATE_SF << 16 | (20 - 2));
|
||||
OUT_BATCH((1 - 1) << GEN6_SF_NUM_OUTPUTS_SHIFT | /* only position */
|
||||
|
|
@ -702,7 +690,6 @@ gen6_blorp_emit_wm_config(struct brw_context *brw,
|
|||
uint32_t prog_offset,
|
||||
brw_blorp_prog_data *prog_data)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
uint32_t dw2, dw4, dw5, dw6;
|
||||
|
||||
/* Even when thread dispatch is disabled, max threads (dw5.25:31) must be
|
||||
|
|
@ -774,8 +761,6 @@ gen6_blorp_emit_constant_ps(struct brw_context *brw,
|
|||
const brw_blorp_params *params,
|
||||
uint32_t wm_push_const_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
/* Make sure the push constants fill an exact integer number of
|
||||
* registers.
|
||||
*/
|
||||
|
|
@ -800,8 +785,6 @@ static void
|
|||
gen6_blorp_emit_constant_ps_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
/* Disable the push constant buffers. */
|
||||
BEGIN_BATCH(5);
|
||||
OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 | (5 - 2));
|
||||
|
|
@ -820,8 +803,6 @@ gen6_blorp_emit_binding_table_pointers(struct brw_context *brw,
|
|||
const brw_blorp_params *params,
|
||||
uint32_t wm_bind_bo_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 |
|
||||
GEN6_BINDING_TABLE_MODIFY_PS |
|
||||
|
|
@ -879,8 +860,8 @@ gen6_blorp_emit_depth_stencil_config(struct brw_context *brw,
|
|||
tile_x &= ~7;
|
||||
tile_y &= ~7;
|
||||
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_depth_stall_flushes(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
intel_emit_depth_stall_flushes(brw);
|
||||
|
||||
BEGIN_BATCH(7);
|
||||
OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
|
||||
|
|
@ -936,8 +917,6 @@ static void
|
|||
gen6_blorp_emit_depth_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(7);
|
||||
OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
|
||||
OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
|
||||
|
|
@ -961,8 +940,6 @@ static void
|
|||
gen6_blorp_emit_clear_params(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 |
|
||||
GEN5_DEPTH_CLEAR_VALID |
|
||||
|
|
@ -977,8 +954,6 @@ void
|
|||
gen6_blorp_emit_drawing_rectangle(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
|
||||
OUT_BATCH(0);
|
||||
|
|
@ -993,7 +968,6 @@ void
|
|||
gen6_blorp_emit_viewport_state(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct brw_cc_viewport *ccv;
|
||||
uint32_t cc_vp_offset;
|
||||
|
||||
|
|
@ -1019,8 +993,6 @@ static void
|
|||
gen6_blorp_emit_primitive(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(6);
|
||||
OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
|
||||
_3DPRIM_RECTLIST << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
|
||||
|
|
@ -1044,11 +1016,9 @@ gen6_blorp_emit_primitive(struct brw_context *brw,
|
|||
* This function alters no GL state.
|
||||
*/
|
||||
void
|
||||
gen6_blorp_exec(struct intel_context *intel,
|
||||
gen6_blorp_exec(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
brw_blorp_prog_data *prog_data = NULL;
|
||||
uint32_t cc_blend_state_offset = 0;
|
||||
uint32_t cc_state_offset = 0;
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ struct intel_mipmap_tree;
|
|||
}
|
||||
|
||||
void
|
||||
gen6_blorp_exec(struct intel_context *intel,
|
||||
gen6_blorp_exec(struct brw_context *brw,
|
||||
const brw_blorp_params *params);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -33,8 +33,6 @@
|
|||
static void
|
||||
upload_gs_state(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
/* Disable all the constant buffers. */
|
||||
BEGIN_BATCH(5);
|
||||
OUT_BATCH(_3DSTATE_CONSTANT_GS << 16 | (5 - 2));
|
||||
|
|
|
|||
|
|
@ -149,8 +149,6 @@ gen6_emit_3dstate_sample_mask(struct brw_context *brw,
|
|||
unsigned num_samples, float coverage,
|
||||
bool coverage_invert, unsigned sample_mask)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH(_3DSTATE_SAMPLE_MASK << 16 | (2 - 2));
|
||||
if (num_samples > 1) {
|
||||
|
|
@ -189,7 +187,7 @@ static void upload_multisample_state(struct brw_context *brw)
|
|||
}
|
||||
|
||||
/* 3DSTATE_MULTISAMPLE is nonpipelined. */
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
|
||||
gen6_emit_3dstate_multisample(brw, num_samples);
|
||||
gen6_emit_3dstate_sample_mask(brw, num_samples, coverage,
|
||||
|
|
|
|||
|
|
@ -43,8 +43,9 @@
|
|||
* Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
|
||||
*/
|
||||
static void
|
||||
write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
|
||||
write_timestamp(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
/* Emit workaround flushes: */
|
||||
if (intel->gen == 6) {
|
||||
/* The timestamp write below is a non-zero post-sync op, which on
|
||||
|
|
@ -75,11 +76,12 @@ write_timestamp(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
|
|||
* Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
|
||||
*/
|
||||
static void
|
||||
write_depth_count(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
|
||||
write_depth_count(struct brw_context *brw, drm_intel_bo *query_bo, int idx)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
/* Emit Sandybridge workaround flush: */
|
||||
if (intel->gen == 6)
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
|
||||
BEGIN_BATCH(5);
|
||||
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
|
||||
|
|
@ -102,12 +104,13 @@ write_depth_count(struct intel_context *intel, drm_intel_bo *query_bo, int idx)
|
|||
* function also performs a pipeline flush for proper synchronization.
|
||||
*/
|
||||
static void
|
||||
write_reg(struct intel_context *intel,
|
||||
write_reg(struct brw_context *brw,
|
||||
drm_intel_bo *query_bo, uint32_t reg, int idx)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
assert(intel->gen >= 6);
|
||||
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
|
||||
/* MI_STORE_REGISTER_MEM only stores a single 32-bit value, so to
|
||||
* read a full 64-bit register, we need to do two of them.
|
||||
|
|
@ -128,20 +131,21 @@ write_reg(struct intel_context *intel,
|
|||
}
|
||||
|
||||
static void
|
||||
write_primitives_generated(struct intel_context *intel,
|
||||
write_primitives_generated(struct brw_context *brw,
|
||||
drm_intel_bo *query_bo, int idx)
|
||||
{
|
||||
write_reg(intel, query_bo, CL_INVOCATION_COUNT, idx);
|
||||
write_reg(brw, query_bo, CL_INVOCATION_COUNT, idx);
|
||||
}
|
||||
|
||||
static void
|
||||
write_xfb_primitives_written(struct intel_context *intel,
|
||||
write_xfb_primitives_written(struct brw_context *brw,
|
||||
drm_intel_bo *query_bo, int idx)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (intel->gen >= 7) {
|
||||
write_reg(intel, query_bo, SO_NUM_PRIMS_WRITTEN0_IVB, idx);
|
||||
write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN0_IVB, idx);
|
||||
} else {
|
||||
write_reg(intel, query_bo, SO_NUM_PRIMS_WRITTEN, idx);
|
||||
write_reg(brw, query_bo, SO_NUM_PRIMS_WRITTEN, idx);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -152,6 +156,7 @@ static void
|
|||
gen6_queryobj_get_results(struct gl_context *ctx,
|
||||
struct brw_query_object *query)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
|
||||
if (query->bo == NULL)
|
||||
|
|
@ -162,7 +167,7 @@ gen6_queryobj_get_results(struct gl_context *ctx,
|
|||
* when mapped.
|
||||
*/
|
||||
if (drm_intel_bo_references(intel->batch.bo, query->bo))
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (unlikely(intel->perf_debug)) {
|
||||
if (drm_intel_bo_busy(query->bo)) {
|
||||
|
|
@ -243,6 +248,7 @@ gen6_queryobj_get_results(struct gl_context *ctx,
|
|||
static void
|
||||
gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_query_object *query = (struct brw_query_object *)q;
|
||||
|
||||
|
|
@ -271,21 +277,21 @@ gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
* obtain the time elapsed. Notably, this includes time elapsed while
|
||||
* the system was doing other work, such as running other applications.
|
||||
*/
|
||||
write_timestamp(intel, query->bo, 0);
|
||||
write_timestamp(brw, query->bo, 0);
|
||||
break;
|
||||
|
||||
case GL_ANY_SAMPLES_PASSED:
|
||||
case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
|
||||
case GL_SAMPLES_PASSED_ARB:
|
||||
write_depth_count(intel, query->bo, 0);
|
||||
write_depth_count(brw, query->bo, 0);
|
||||
break;
|
||||
|
||||
case GL_PRIMITIVES_GENERATED:
|
||||
write_primitives_generated(intel, query->bo, 0);
|
||||
write_primitives_generated(brw, query->bo, 0);
|
||||
break;
|
||||
|
||||
case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
|
||||
write_xfb_primitives_written(intel, query->bo, 0);
|
||||
write_xfb_primitives_written(brw, query->bo, 0);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
@ -305,26 +311,26 @@ gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
static void
|
||||
gen6_end_query(struct gl_context *ctx, struct gl_query_object *q)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct brw_query_object *query = (struct brw_query_object *)q;
|
||||
|
||||
switch (query->Base.Target) {
|
||||
case GL_TIME_ELAPSED:
|
||||
write_timestamp(intel, query->bo, 1);
|
||||
write_timestamp(brw, query->bo, 1);
|
||||
break;
|
||||
|
||||
case GL_ANY_SAMPLES_PASSED:
|
||||
case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
|
||||
case GL_SAMPLES_PASSED_ARB:
|
||||
write_depth_count(intel, query->bo, 1);
|
||||
write_depth_count(brw, query->bo, 1);
|
||||
break;
|
||||
|
||||
case GL_PRIMITIVES_GENERATED:
|
||||
write_primitives_generated(intel, query->bo, 1);
|
||||
write_primitives_generated(brw, query->bo, 1);
|
||||
break;
|
||||
|
||||
case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
|
||||
write_xfb_primitives_written(intel, query->bo, 1);
|
||||
write_xfb_primitives_written(brw, query->bo, 1);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
@ -355,6 +361,7 @@ static void gen6_wait_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
*/
|
||||
static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_query_object *query = (struct brw_query_object *)q;
|
||||
|
||||
|
|
@ -366,7 +373,7 @@ static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
|
|||
* the async query will return true in finite time.
|
||||
*/
|
||||
if (query->bo && drm_intel_bo_references(intel->batch.bo, query->bo))
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (query->bo == NULL || !drm_intel_bo_busy(query->bo)) {
|
||||
gen6_queryobj_get_results(ctx, query);
|
||||
|
|
|
|||
|
|
@ -33,8 +33,6 @@
|
|||
static void
|
||||
upload_sampler_state_pointers(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS << 16 |
|
||||
VS_SAMPLER_STATE_CHANGE |
|
||||
|
|
|
|||
|
|
@ -187,6 +187,5 @@ brw_end_transform_feedback(struct gl_context *ctx,
|
|||
* simplicity, just do a full flush.
|
||||
*/
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,7 +49,6 @@
|
|||
static void
|
||||
gen6_upload_urb( struct brw_context *brw )
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
int nr_vs_entries, nr_gs_entries;
|
||||
int total_urb_size = brw->urb.size * 1024; /* in bytes */
|
||||
|
||||
|
|
@ -111,7 +110,7 @@ gen6_upload_urb( struct brw_context *brw )
|
|||
* a workaround.
|
||||
*/
|
||||
if (brw->urb.gen6_gs_previously_active && !brw->gs.prog_active)
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
brw->urb.gen6_gs_previously_active = brw->gs.prog_active;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -120,8 +120,6 @@ const struct brw_tracked_state gen6_sf_vp = {
|
|||
|
||||
static void upload_viewport_state_pointers(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(_3DSTATE_VIEWPORT_STATE_POINTERS << 16 | (4 - 2) |
|
||||
GEN6_CC_VIEWPORT_MODIFY |
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ upload_vs_state(struct brw_context *brw)
|
|||
* flush can be executed by sending a PIPE_CONTROL command with CS
|
||||
* stall bit set and a post sync operation.
|
||||
*/
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
|
||||
if (brw->vs.push_const_size == 0) {
|
||||
/* Disable the push constant buffers. */
|
||||
|
|
@ -182,7 +182,7 @@ upload_vs_state(struct brw_context *brw)
|
|||
* bug reports that led to this workaround, and may be more than
|
||||
* what is strictly required to avoid the issue.
|
||||
*/
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(_3DSTATE_PIPE_CONTROL | (4 - 2));
|
||||
|
|
|
|||
|
|
@ -67,8 +67,6 @@ gen7_blorp_emit_blend_state_pointer(struct brw_context *brw,
|
|||
const brw_blorp_params *params,
|
||||
uint32_t cc_blend_state_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH(_3DSTATE_BLEND_STATE_POINTERS << 16 | (2 - 2));
|
||||
OUT_BATCH(cc_blend_state_offset | 1);
|
||||
|
|
@ -82,8 +80,6 @@ gen7_blorp_emit_cc_state_pointer(struct brw_context *brw,
|
|||
const brw_blorp_params *params,
|
||||
uint32_t cc_state_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
|
||||
OUT_BATCH(cc_state_offset | 1);
|
||||
|
|
@ -94,7 +90,6 @@ static void
|
|||
gen7_blorp_emit_cc_viewport(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct brw_cc_viewport *ccv;
|
||||
uint32_t cc_vp_offset;
|
||||
|
||||
|
|
@ -120,8 +115,6 @@ gen7_blorp_emit_depth_stencil_state_pointers(struct brw_context *brw,
|
|||
const brw_blorp_params *params,
|
||||
uint32_t depthstencil_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH(_3DSTATE_DEPTH_STENCIL_STATE_POINTERS << 16 | (2 - 2));
|
||||
OUT_BATCH(depthstencil_offset | 1);
|
||||
|
|
@ -286,8 +279,6 @@ static void
|
|||
gen7_blorp_emit_vs_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(7);
|
||||
OUT_BATCH(_3DSTATE_CONSTANT_VS << 16 | (7 - 2));
|
||||
OUT_BATCH(0);
|
||||
|
|
@ -317,8 +308,6 @@ static void
|
|||
gen7_blorp_emit_hs_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(7);
|
||||
OUT_BATCH(_3DSTATE_CONSTANT_HS << 16 | (7 - 2));
|
||||
OUT_BATCH(0);
|
||||
|
|
@ -349,8 +338,6 @@ static void
|
|||
gen7_blorp_emit_te_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH(_3DSTATE_TE << 16 | (4 - 2));
|
||||
OUT_BATCH(0);
|
||||
|
|
@ -368,8 +355,6 @@ static void
|
|||
gen7_blorp_emit_ds_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(7);
|
||||
OUT_BATCH(_3DSTATE_CONSTANT_DS << 16 | (7 - 2));
|
||||
OUT_BATCH(0);
|
||||
|
|
@ -398,8 +383,6 @@ static void
|
|||
gen7_blorp_emit_gs_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(7);
|
||||
OUT_BATCH(_3DSTATE_CONSTANT_GS << 16 | (7 - 2));
|
||||
OUT_BATCH(0);
|
||||
|
|
@ -429,8 +412,6 @@ static void
|
|||
gen7_blorp_emit_streamout_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(3);
|
||||
OUT_BATCH(_3DSTATE_STREAMOUT << 16 | (3 - 2));
|
||||
OUT_BATCH(0);
|
||||
|
|
@ -443,8 +424,6 @@ static void
|
|||
gen7_blorp_emit_sf_config(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
/* 3DSTATE_SF
|
||||
*
|
||||
* Disable ViewportTransformEnable (dw1.1)
|
||||
|
|
@ -498,8 +477,6 @@ gen7_blorp_emit_wm_config(struct brw_context *brw,
|
|||
const brw_blorp_params *params,
|
||||
brw_blorp_prog_data *prog_data)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
uint32_t dw1 = 0, dw2 = 0;
|
||||
|
||||
switch (params->hiz_op) {
|
||||
|
|
@ -615,8 +592,6 @@ gen7_blorp_emit_binding_table_pointers_ps(struct brw_context *brw,
|
|||
const brw_blorp_params *params,
|
||||
uint32_t wm_bind_bo_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS_PS << 16 | (2 - 2));
|
||||
OUT_BATCH(wm_bind_bo_offset);
|
||||
|
|
@ -629,8 +604,6 @@ gen7_blorp_emit_sampler_state_pointers_ps(struct brw_context *brw,
|
|||
const brw_blorp_params *params,
|
||||
uint32_t sampler_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH(_3DSTATE_SAMPLER_STATE_POINTERS_PS << 16 | (2 - 2));
|
||||
OUT_BATCH(sampler_offset);
|
||||
|
|
@ -643,8 +616,6 @@ gen7_blorp_emit_constant_ps(struct brw_context *brw,
|
|||
const brw_blorp_params *params,
|
||||
uint32_t wm_push_const_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
/* Make sure the push constants fill an exact integer number of
|
||||
* registers.
|
||||
*/
|
||||
|
|
@ -670,8 +641,6 @@ static void
|
|||
gen7_blorp_emit_constant_ps_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(7);
|
||||
OUT_BATCH(_3DSTATE_CONSTANT_PS << 16 | (7 - 2));
|
||||
OUT_BATCH(0);
|
||||
|
|
@ -687,8 +656,7 @@ static void
|
|||
gen7_blorp_emit_depth_stencil_config(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
struct gl_context *ctx = &brw->intel.ctx;
|
||||
uint32_t draw_x = params->depth.x_offset;
|
||||
uint32_t draw_y = params->depth.y_offset;
|
||||
uint32_t tile_mask_x, tile_mask_y;
|
||||
|
|
@ -729,7 +697,7 @@ gen7_blorp_emit_depth_stencil_config(struct brw_context *brw,
|
|||
tile_x &= ~7;
|
||||
tile_y &= ~7;
|
||||
|
||||
intel_emit_depth_stall_flushes(intel);
|
||||
intel_emit_depth_stall_flushes(brw);
|
||||
|
||||
BEGIN_BATCH(7);
|
||||
OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
|
||||
|
|
@ -782,8 +750,6 @@ static void
|
|||
gen7_blorp_emit_depth_disable(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(7);
|
||||
OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
|
||||
OUT_BATCH(BRW_DEPTHFORMAT_D32_FLOAT << 18 | (BRW_SURFACE_NULL << 29));
|
||||
|
|
@ -808,8 +774,6 @@ static void
|
|||
gen7_blorp_emit_clear_params(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(3);
|
||||
OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS << 16 | (3 - 2));
|
||||
OUT_BATCH(params->depth.mt ? params->depth.mt->depth_clear_value : 0);
|
||||
|
|
@ -823,8 +787,6 @@ static void
|
|||
gen7_blorp_emit_primitive(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(7);
|
||||
OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
|
||||
OUT_BATCH(GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL |
|
||||
|
|
@ -842,11 +804,9 @@ gen7_blorp_emit_primitive(struct brw_context *brw,
|
|||
* \copydoc gen6_blorp_exec()
|
||||
*/
|
||||
void
|
||||
gen7_blorp_exec(struct intel_context *intel,
|
||||
gen7_blorp_exec(struct brw_context *brw,
|
||||
const brw_blorp_params *params)
|
||||
{
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
brw_blorp_prog_data *prog_data = NULL;
|
||||
uint32_t cc_blend_state_offset = 0;
|
||||
uint32_t cc_state_offset = 0;
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ struct intel_mipmap_tree;
|
|||
}
|
||||
|
||||
void
|
||||
gen7_blorp_exec(struct intel_context *intel,
|
||||
gen7_blorp_exec(struct brw_context *brw,
|
||||
const brw_blorp_params *params);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -29,8 +29,6 @@
|
|||
static void
|
||||
disable_stages(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
assert(!brw->gs.prog_active);
|
||||
|
||||
/* Disable the Geometry Shader (GS) Unit */
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ gen7_emit_depth_stencil_hiz(struct brw_context *brw,
|
|||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
|
||||
intel_emit_depth_stall_flushes(intel);
|
||||
intel_emit_depth_stall_flushes(brw);
|
||||
|
||||
/* _NEW_DEPTH, _NEW_STENCIL, _NEW_BUFFERS */
|
||||
BEGIN_BATCH(7);
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ upload_3dstate_so_buffers(struct brw_context *brw)
|
|||
continue;
|
||||
}
|
||||
|
||||
bo = intel_bufferobj_buffer(intel, bufferobj, INTEL_WRITE_PART);
|
||||
bo = intel_bufferobj_buffer(brw, bufferobj, INTEL_WRITE_PART);
|
||||
stride = linked_xfb_info->BufferStride[i] * 4;
|
||||
|
||||
start = xfb_obj->Offset[i];
|
||||
|
|
@ -260,7 +260,7 @@ gen7_begin_transform_feedback(struct gl_context *ctx, GLenum mode,
|
|||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
intel->batch.needs_sol_reset = true;
|
||||
}
|
||||
|
||||
|
|
@ -275,7 +275,6 @@ gen7_end_transform_feedback(struct gl_context *ctx,
|
|||
* This also covers any cache flushing required.
|
||||
*/
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ gen7_upload_urb(struct brw_context *brw)
|
|||
/* GS requirement */
|
||||
assert(!brw->gs.prog_active);
|
||||
|
||||
gen7_emit_vs_workaround_flush(intel);
|
||||
gen7_emit_vs_workaround_flush(brw);
|
||||
gen7_emit_urb_state(brw, brw->urb.nr_vs_entries, vs_size, brw->urb.vs_start);
|
||||
}
|
||||
|
||||
|
|
@ -108,8 +108,6 @@ void
|
|||
gen7_emit_urb_state(struct brw_context *brw, GLuint nr_vs_entries,
|
||||
GLuint vs_size, GLuint vs_start)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH(_3DSTATE_URB_VS << 16 | (2 - 2));
|
||||
OUT_BATCH(nr_vs_entries |
|
||||
|
|
|
|||
|
|
@ -99,8 +99,6 @@ const struct brw_tracked_state gen7_sf_clip_viewport = {
|
|||
|
||||
static void upload_cc_viewport_state_pointer(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH(_3DSTATE_VIEWPORT_STATE_POINTERS_CC << 16 | (2 - 2));
|
||||
OUT_BATCH(brw->cc.vp_offset);
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ upload_vs_state(struct brw_context *brw)
|
|||
const int max_threads_shift = brw->intel.is_haswell ?
|
||||
HSW_VS_MAX_THREADS_SHIFT : GEN6_VS_MAX_THREADS_SHIFT;
|
||||
|
||||
gen7_emit_vs_workaround_flush(intel);
|
||||
gen7_emit_vs_workaround_flush(brw);
|
||||
|
||||
/* BRW_NEW_VS_BINDING_TABLE */
|
||||
BEGIN_BATCH(2);
|
||||
|
|
|
|||
|
|
@ -306,7 +306,7 @@ gen7_update_texture_surface(struct gl_context *ctx,
|
|||
8 * 4, 32, &binding_table[surf_index]);
|
||||
memset(surf, 0, 8 * 4);
|
||||
|
||||
uint32_t tex_format = translate_tex_format(intel,
|
||||
uint32_t tex_format = translate_tex_format(brw,
|
||||
mt->format,
|
||||
tObj->DepthMode,
|
||||
sampler->sRGBDecode);
|
||||
|
|
@ -546,7 +546,7 @@ gen7_update_renderbuffer_surface(struct brw_context *brw,
|
|||
/* Render targets can't use IMS layout */
|
||||
assert(irb->mt->msaa_layout != INTEL_MSAA_LAYOUT_IMS);
|
||||
|
||||
assert(brw_render_target_supported(intel, rb));
|
||||
assert(brw_render_target_supported(brw, rb));
|
||||
format = brw->render_target_format[rb_format];
|
||||
if (unlikely(!brw->format_supported_as_render_target[rb_format])) {
|
||||
_mesa_problem(ctx, "%s: renderbuffer format %s unsupported\n",
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@
|
|||
#include "brw_context.h"
|
||||
|
||||
static void
|
||||
intel_batchbuffer_reset(struct intel_context *intel);
|
||||
intel_batchbuffer_reset(struct brw_context *brw);
|
||||
|
||||
struct cached_batch_item {
|
||||
struct cached_batch_item *next;
|
||||
|
|
@ -41,8 +41,10 @@ struct cached_batch_item {
|
|||
uint16_t size;
|
||||
};
|
||||
|
||||
static void clear_cache( struct intel_context *intel )
|
||||
static void
|
||||
clear_cache(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct cached_batch_item *item = intel->batch.cached_items;
|
||||
|
||||
while (item) {
|
||||
|
|
@ -55,9 +57,10 @@ static void clear_cache( struct intel_context *intel )
|
|||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_init(struct intel_context *intel)
|
||||
intel_batchbuffer_init(struct brw_context *brw)
|
||||
{
|
||||
intel_batchbuffer_reset(intel);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
intel_batchbuffer_reset(brw);
|
||||
|
||||
if (intel->gen >= 6) {
|
||||
/* We can't just use brw_state_batch to get a chunk of space for
|
||||
|
|
@ -76,15 +79,16 @@ intel_batchbuffer_init(struct intel_context *intel)
|
|||
}
|
||||
|
||||
static void
|
||||
intel_batchbuffer_reset(struct intel_context *intel)
|
||||
intel_batchbuffer_reset(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (intel->batch.last_bo != NULL) {
|
||||
drm_intel_bo_unreference(intel->batch.last_bo);
|
||||
intel->batch.last_bo = NULL;
|
||||
}
|
||||
intel->batch.last_bo = intel->batch.bo;
|
||||
|
||||
clear_cache(intel);
|
||||
clear_cache(brw);
|
||||
|
||||
intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
|
||||
BATCH_SZ, 4096);
|
||||
|
|
@ -100,16 +104,18 @@ intel_batchbuffer_reset(struct intel_context *intel)
|
|||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_save_state(struct intel_context *intel)
|
||||
intel_batchbuffer_save_state(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
intel->batch.saved.used = intel->batch.used;
|
||||
intel->batch.saved.reloc_count =
|
||||
drm_intel_gem_bo_get_reloc_count(intel->batch.bo);
|
||||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_reset_to_saved(struct intel_context *intel)
|
||||
intel_batchbuffer_reset_to_saved(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
drm_intel_gem_bo_clear_relocs(intel->batch.bo, intel->batch.saved.reloc_count);
|
||||
|
||||
intel->batch.used = intel->batch.saved.used;
|
||||
|
|
@ -117,22 +123,24 @@ intel_batchbuffer_reset_to_saved(struct intel_context *intel)
|
|||
/* Cached batch state is dead, since we just cleared some unknown part of the
|
||||
* batchbuffer. Assume that the caller resets any other state necessary.
|
||||
*/
|
||||
clear_cache(intel);
|
||||
clear_cache(brw);
|
||||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_free(struct intel_context *intel)
|
||||
intel_batchbuffer_free(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
free(intel->batch.cpu_map);
|
||||
drm_intel_bo_unreference(intel->batch.last_bo);
|
||||
drm_intel_bo_unreference(intel->batch.bo);
|
||||
drm_intel_bo_unreference(intel->batch.workaround_bo);
|
||||
clear_cache(intel);
|
||||
clear_cache(brw);
|
||||
}
|
||||
|
||||
static void
|
||||
do_batch_dump(struct intel_context *intel)
|
||||
do_batch_dump(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct drm_intel_decode *decode;
|
||||
struct intel_batchbuffer *batch = &intel->batch;
|
||||
int ret;
|
||||
|
|
@ -165,15 +173,16 @@ do_batch_dump(struct intel_context *intel)
|
|||
if (ret == 0) {
|
||||
drm_intel_bo_unmap(batch->bo);
|
||||
|
||||
brw_debug_batch(intel);
|
||||
brw_debug_batch(brw);
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Push this whole function into bufmgr.
|
||||
*/
|
||||
static int
|
||||
do_flush_locked(struct intel_context *intel)
|
||||
do_flush_locked(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct intel_batchbuffer *batch = &intel->batch;
|
||||
int ret = 0;
|
||||
|
||||
|
|
@ -203,7 +212,7 @@ do_flush_locked(struct intel_context *intel)
|
|||
|
||||
if (ret == 0) {
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_AUB))
|
||||
brw_annotate_aub(intel);
|
||||
brw_annotate_aub(brw);
|
||||
if (intel->hw_ctx == NULL || batch->is_blit) {
|
||||
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
|
||||
flags);
|
||||
|
|
@ -215,21 +224,22 @@ do_flush_locked(struct intel_context *intel)
|
|||
}
|
||||
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
|
||||
do_batch_dump(intel);
|
||||
do_batch_dump(brw);
|
||||
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
|
||||
exit(1);
|
||||
}
|
||||
intel->vtbl.new_batch(intel);
|
||||
intel->vtbl.new_batch(brw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
_intel_batchbuffer_flush(struct intel_context *intel,
|
||||
_intel_batchbuffer_flush(struct brw_context *brw,
|
||||
const char *file, int line)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
int ret;
|
||||
|
||||
if (intel->batch.used == 0)
|
||||
|
|
@ -247,21 +257,21 @@ _intel_batchbuffer_flush(struct intel_context *intel,
|
|||
intel->batch.reserved_space = 0;
|
||||
|
||||
if (intel->vtbl.finish_batch)
|
||||
intel->vtbl.finish_batch(intel);
|
||||
intel->vtbl.finish_batch(brw);
|
||||
|
||||
/* Mark the end of the buffer. */
|
||||
intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
|
||||
intel_batchbuffer_emit_dword(brw, MI_BATCH_BUFFER_END);
|
||||
if (intel->batch.used & 1) {
|
||||
/* Round batchbuffer usage to 2 DWORDs. */
|
||||
intel_batchbuffer_emit_dword(intel, MI_NOOP);
|
||||
intel_batchbuffer_emit_dword(brw, MI_NOOP);
|
||||
}
|
||||
|
||||
intel_upload_finish(intel);
|
||||
intel_upload_finish(brw);
|
||||
|
||||
/* Check that we didn't just wrap our batchbuffer at a bad time. */
|
||||
assert(!intel->no_batch_wrap);
|
||||
|
||||
ret = do_flush_locked(intel);
|
||||
ret = do_flush_locked(brw);
|
||||
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
|
||||
fprintf(stderr, "waiting for idle\n");
|
||||
|
|
@ -270,7 +280,7 @@ _intel_batchbuffer_flush(struct intel_context *intel,
|
|||
|
||||
/* Reset the buffer:
|
||||
*/
|
||||
intel_batchbuffer_reset(intel);
|
||||
intel_batchbuffer_reset(brw);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -279,11 +289,12 @@ _intel_batchbuffer_flush(struct intel_context *intel,
|
|||
/* This is the only way buffers get added to the validate list.
|
||||
*/
|
||||
bool
|
||||
intel_batchbuffer_emit_reloc(struct intel_context *intel,
|
||||
intel_batchbuffer_emit_reloc(struct brw_context *brw,
|
||||
drm_intel_bo *buffer,
|
||||
uint32_t read_domains, uint32_t write_domain,
|
||||
uint32_t delta)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
int ret;
|
||||
|
||||
ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
|
||||
|
|
@ -297,18 +308,19 @@ intel_batchbuffer_emit_reloc(struct intel_context *intel,
|
|||
* the buffer doesn't move and we can short-circuit the relocation processing
|
||||
* in the kernel
|
||||
*/
|
||||
intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
|
||||
intel_batchbuffer_emit_dword(brw, buffer->offset + delta);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
|
||||
intel_batchbuffer_emit_reloc_fenced(struct brw_context *brw,
|
||||
drm_intel_bo *buffer,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain,
|
||||
uint32_t delta)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
int ret;
|
||||
|
||||
ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
|
||||
|
|
@ -322,24 +334,26 @@ intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
|
|||
* be, in case the buffer doesn't move and we can short-circuit the
|
||||
* relocation processing in the kernel
|
||||
*/
|
||||
intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
|
||||
intel_batchbuffer_emit_dword(brw, buffer->offset + delta);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_data(struct intel_context *intel,
|
||||
intel_batchbuffer_data(struct brw_context *brw,
|
||||
const void *data, GLuint bytes, bool is_blit)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
assert((bytes & 3) == 0);
|
||||
intel_batchbuffer_require_space(intel, bytes, is_blit);
|
||||
intel_batchbuffer_require_space(brw, bytes, is_blit);
|
||||
__memcpy(intel->batch.map + intel->batch.used, data, bytes);
|
||||
intel->batch.used += bytes >> 2;
|
||||
}
|
||||
|
||||
void
|
||||
intel_batchbuffer_cached_advance(struct intel_context *intel)
|
||||
intel_batchbuffer_cached_advance(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct cached_batch_item **prev = &intel->batch.cached_items, *item;
|
||||
uint32_t sz = (intel->batch.used - intel->batch.emit) * sizeof(uint32_t);
|
||||
uint32_t *start = intel->batch.map + intel->batch.emit;
|
||||
|
|
@ -391,8 +405,9 @@ emit:
|
|||
* already flushed (e.g., via a preceding MI_FLUSH).
|
||||
*/
|
||||
void
|
||||
intel_emit_depth_stall_flushes(struct intel_context *intel)
|
||||
intel_emit_depth_stall_flushes(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
assert(intel->gen >= 6 && intel->gen <= 7);
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
|
|
@ -426,8 +441,9 @@ intel_emit_depth_stall_flushes(struct intel_context *intel)
|
|||
* to be sent before any combination of VS associated 3DSTATE."
|
||||
*/
|
||||
void
|
||||
gen7_emit_vs_workaround_flush(struct intel_context *intel)
|
||||
gen7_emit_vs_workaround_flush(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
assert(intel->gen == 7);
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
|
|
@ -477,8 +493,9 @@ gen7_emit_vs_workaround_flush(struct intel_context *intel)
|
|||
* really our business. That leaves only stall at scoreboard.
|
||||
*/
|
||||
void
|
||||
intel_emit_post_sync_nonzero_flush(struct intel_context *intel)
|
||||
intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (!intel->batch.need_workaround_flush)
|
||||
return;
|
||||
|
||||
|
|
@ -508,8 +525,9 @@ intel_emit_post_sync_nonzero_flush(struct intel_context *intel)
|
|||
* This is also used for the always_flush_cache driconf debug option.
|
||||
*/
|
||||
void
|
||||
intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
|
||||
intel_batchbuffer_emit_mi_flush(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (intel->gen >= 6) {
|
||||
if (intel->batch.is_blit) {
|
||||
BEGIN_BATCH_BLT(4);
|
||||
|
|
@ -526,7 +544,7 @@ intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
|
|||
* Flush Enable =1, a PIPE_CONTROL with any non-zero
|
||||
* post-sync-op is required.
|
||||
*/
|
||||
intel_emit_post_sync_nonzero_flush(intel);
|
||||
intel_emit_post_sync_nonzero_flush(brw);
|
||||
}
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
|
|
|
|||
|
|
@ -24,12 +24,12 @@ extern "C" {
|
|||
|
||||
struct intel_batchbuffer;
|
||||
|
||||
void intel_batchbuffer_init(struct intel_context *intel);
|
||||
void intel_batchbuffer_free(struct intel_context *intel);
|
||||
void intel_batchbuffer_save_state(struct intel_context *intel);
|
||||
void intel_batchbuffer_reset_to_saved(struct intel_context *intel);
|
||||
void intel_batchbuffer_init(struct brw_context *brw);
|
||||
void intel_batchbuffer_free(struct brw_context *brw);
|
||||
void intel_batchbuffer_save_state(struct brw_context *brw);
|
||||
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
|
||||
|
||||
int _intel_batchbuffer_flush(struct intel_context *intel,
|
||||
int _intel_batchbuffer_flush(struct brw_context *brw,
|
||||
const char *file, int line);
|
||||
|
||||
#define intel_batchbuffer_flush(intel) \
|
||||
|
|
@ -41,23 +41,23 @@ int _intel_batchbuffer_flush(struct intel_context *intel,
|
|||
* Consider it a convenience function wrapping multple
|
||||
* intel_buffer_dword() calls.
|
||||
*/
|
||||
void intel_batchbuffer_data(struct intel_context *intel,
|
||||
void intel_batchbuffer_data(struct brw_context *brw,
|
||||
const void *data, GLuint bytes, bool is_blit);
|
||||
|
||||
bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
|
||||
bool intel_batchbuffer_emit_reloc(struct brw_context *brw,
|
||||
drm_intel_bo *buffer,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain,
|
||||
uint32_t offset);
|
||||
bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
|
||||
bool intel_batchbuffer_emit_reloc_fenced(struct brw_context *brw,
|
||||
drm_intel_bo *buffer,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain,
|
||||
uint32_t offset);
|
||||
void intel_batchbuffer_emit_mi_flush(struct intel_context *intel);
|
||||
void intel_emit_post_sync_nonzero_flush(struct intel_context *intel);
|
||||
void intel_emit_depth_stall_flushes(struct intel_context *intel);
|
||||
void gen7_emit_vs_workaround_flush(struct intel_context *intel);
|
||||
void intel_batchbuffer_emit_mi_flush(struct brw_context *brw);
|
||||
void intel_emit_post_sync_nonzero_flush(struct brw_context *brw);
|
||||
void intel_emit_depth_stall_flushes(struct brw_context *brw);
|
||||
void gen7_emit_vs_workaround_flush(struct brw_context *brw);
|
||||
|
||||
static INLINE uint32_t float_as_int(float f)
|
||||
{
|
||||
|
|
@ -76,36 +76,37 @@ static INLINE uint32_t float_as_int(float f)
|
|||
* work...
|
||||
*/
|
||||
static INLINE unsigned
|
||||
intel_batchbuffer_space(struct intel_context *intel)
|
||||
intel_batchbuffer_space(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
return (intel->batch.state_batch_offset - intel->batch.reserved_space)
|
||||
- intel->batch.used*4;
|
||||
}
|
||||
|
||||
|
||||
static INLINE void
|
||||
intel_batchbuffer_emit_dword(struct intel_context *intel, GLuint dword)
|
||||
intel_batchbuffer_emit_dword(struct brw_context *brw, GLuint dword)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
#ifdef DEBUG
|
||||
assert(intel_batchbuffer_space(intel) >= 4);
|
||||
assert(intel_batchbuffer_space(brw) >= 4);
|
||||
#endif
|
||||
intel->batch.map[intel->batch.used++] = dword;
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
intel_batchbuffer_emit_float(struct intel_context *intel, float f)
|
||||
intel_batchbuffer_emit_float(struct brw_context *brw, float f)
|
||||
{
|
||||
intel_batchbuffer_emit_dword(intel, float_as_int(f));
|
||||
intel_batchbuffer_emit_dword(brw, float_as_int(f));
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
intel_batchbuffer_require_space(struct intel_context *intel,
|
||||
GLuint sz, int is_blit)
|
||||
intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, int is_blit)
|
||||
{
|
||||
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (intel->gen >= 6 &&
|
||||
intel->batch.is_blit != is_blit && intel->batch.used) {
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
|
||||
intel->batch.is_blit = is_blit;
|
||||
|
|
@ -113,14 +114,15 @@ intel_batchbuffer_require_space(struct intel_context *intel,
|
|||
#ifdef DEBUG
|
||||
assert(sz < BATCH_SZ - BATCH_RESERVED);
|
||||
#endif
|
||||
if (intel_batchbuffer_space(intel) < sz)
|
||||
intel_batchbuffer_flush(intel);
|
||||
if (intel_batchbuffer_space(brw) < sz)
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
|
||||
static INLINE void
|
||||
intel_batchbuffer_begin(struct intel_context *intel, int n, bool is_blit)
|
||||
intel_batchbuffer_begin(struct brw_context *brw, int n, bool is_blit)
|
||||
{
|
||||
intel_batchbuffer_require_space(intel, n * 4, is_blit);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
intel_batchbuffer_require_space(brw, n * 4, is_blit);
|
||||
|
||||
intel->batch.emit = intel->batch.used;
|
||||
#ifdef DEBUG
|
||||
|
|
@ -129,9 +131,10 @@ intel_batchbuffer_begin(struct intel_context *intel, int n, bool is_blit)
|
|||
}
|
||||
|
||||
static INLINE void
|
||||
intel_batchbuffer_advance(struct intel_context *intel)
|
||||
intel_batchbuffer_advance(struct brw_context *brw)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct intel_batchbuffer *batch = &intel->batch;
|
||||
unsigned int _n = batch->used - batch->emit;
|
||||
assert(batch->total != 0);
|
||||
|
|
@ -144,27 +147,27 @@ intel_batchbuffer_advance(struct intel_context *intel)
|
|||
#endif
|
||||
}
|
||||
|
||||
void intel_batchbuffer_cached_advance(struct intel_context *intel);
|
||||
void intel_batchbuffer_cached_advance(struct brw_context *brw);
|
||||
|
||||
/* Here are the crusty old macros, to be removed:
|
||||
*/
|
||||
#define BATCH_LOCALS
|
||||
|
||||
#define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n, false)
|
||||
#define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(intel, n, true)
|
||||
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
|
||||
#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
|
||||
#define BEGIN_BATCH(n) intel_batchbuffer_begin(brw, n, false)
|
||||
#define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(brw, n, true)
|
||||
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(brw, d)
|
||||
#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(brw, f)
|
||||
#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
|
||||
intel_batchbuffer_emit_reloc(intel, buf, \
|
||||
intel_batchbuffer_emit_reloc(brw, buf, \
|
||||
read_domains, write_domain, delta); \
|
||||
} while (0)
|
||||
#define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
|
||||
intel_batchbuffer_emit_reloc_fenced(intel, buf, \
|
||||
intel_batchbuffer_emit_reloc_fenced(brw, buf, \
|
||||
read_domains, write_domain, delta); \
|
||||
} while (0)
|
||||
|
||||
#define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
|
||||
#define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);
|
||||
#define ADVANCE_BATCH() intel_batchbuffer_advance(brw);
|
||||
#define CACHED_BATCH() intel_batchbuffer_cached_advance(brw);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@
|
|||
#define FILE_DEBUG_FLAG DEBUG_BLIT
|
||||
|
||||
static void
|
||||
intel_miptree_set_alpha_to_one(struct intel_context *intel,
|
||||
intel_miptree_set_alpha_to_one(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt,
|
||||
int x, int y, int width, int height);
|
||||
|
||||
|
|
@ -101,9 +101,10 @@ br13_for_cpp(int cpp)
|
|||
* server).
|
||||
*/
|
||||
static void
|
||||
set_blitter_tiling(struct intel_context *intel,
|
||||
set_blitter_tiling(struct brw_context *brw,
|
||||
bool dst_y_tiled, bool src_y_tiled)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
assert(intel->gen >= 6);
|
||||
|
||||
/* Idle the blitter before we update how tiling is interpreted. */
|
||||
|
|
@ -122,12 +123,12 @@ set_blitter_tiling(struct intel_context *intel,
|
|||
#define BEGIN_BATCH_BLT_TILED(n, dst_y_tiled, src_y_tiled) do { \
|
||||
BEGIN_BATCH_BLT(n + ((dst_y_tiled || src_y_tiled) ? 14 : 0)); \
|
||||
if (dst_y_tiled || src_y_tiled) \
|
||||
set_blitter_tiling(intel, dst_y_tiled, src_y_tiled); \
|
||||
set_blitter_tiling(brw, dst_y_tiled, src_y_tiled); \
|
||||
} while (0)
|
||||
|
||||
#define ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled) do { \
|
||||
if (dst_y_tiled || src_y_tiled) \
|
||||
set_blitter_tiling(intel, false, false); \
|
||||
set_blitter_tiling(brw, false, false); \
|
||||
ADVANCE_BATCH(); \
|
||||
} while (0)
|
||||
|
||||
|
|
@ -147,7 +148,7 @@ set_blitter_tiling(struct intel_context *intel,
|
|||
* renderbuffers/textures.
|
||||
*/
|
||||
bool
|
||||
intel_miptree_blit(struct intel_context *intel,
|
||||
intel_miptree_blit(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *src_mt,
|
||||
int src_level, int src_slice,
|
||||
uint32_t src_x, uint32_t src_y, bool src_flip,
|
||||
|
|
@ -157,6 +158,7 @@ intel_miptree_blit(struct intel_context *intel,
|
|||
uint32_t width, uint32_t height,
|
||||
GLenum logicop)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
/* No sRGB decode or encode is done by the hardware blitter, which is
|
||||
* consistent with what we want in the callers (glCopyTexSubImage(),
|
||||
* glBlitFramebuffer(), texture validation, etc.).
|
||||
|
|
@ -208,10 +210,10 @@ intel_miptree_blit(struct intel_context *intel,
|
|||
/* The blitter has no idea about HiZ or fast color clears, so we need to
|
||||
* resolve the miptrees before we do anything.
|
||||
*/
|
||||
intel_miptree_slice_resolve_depth(intel, src_mt, src_level, src_slice);
|
||||
intel_miptree_slice_resolve_depth(intel, dst_mt, dst_level, dst_slice);
|
||||
intel_miptree_resolve_color(intel, src_mt);
|
||||
intel_miptree_resolve_color(intel, dst_mt);
|
||||
intel_miptree_slice_resolve_depth(brw, src_mt, src_level, src_slice);
|
||||
intel_miptree_slice_resolve_depth(brw, dst_mt, dst_level, dst_slice);
|
||||
intel_miptree_resolve_color(brw, src_mt);
|
||||
intel_miptree_resolve_color(brw, dst_mt);
|
||||
|
||||
if (src_flip)
|
||||
src_y = src_mt->level[src_level].height - src_y - height;
|
||||
|
|
@ -235,7 +237,7 @@ intel_miptree_blit(struct intel_context *intel,
|
|||
dst_x += dst_image_x;
|
||||
dst_y += dst_image_y;
|
||||
|
||||
if (!intelEmitCopyBlit(intel,
|
||||
if (!intelEmitCopyBlit(brw,
|
||||
src_mt->cpp,
|
||||
src_pitch,
|
||||
src_mt->region->bo, src_mt->offset,
|
||||
|
|
@ -252,7 +254,7 @@ intel_miptree_blit(struct intel_context *intel,
|
|||
|
||||
if (src_mt->format == MESA_FORMAT_XRGB8888 &&
|
||||
dst_mt->format == MESA_FORMAT_ARGB8888) {
|
||||
intel_miptree_set_alpha_to_one(intel, dst_mt,
|
||||
intel_miptree_set_alpha_to_one(brw, dst_mt,
|
||||
dst_x, dst_y,
|
||||
width, height);
|
||||
}
|
||||
|
|
@ -263,7 +265,7 @@ intel_miptree_blit(struct intel_context *intel,
|
|||
/* Copy BitBlt
|
||||
*/
|
||||
bool
|
||||
intelEmitCopyBlit(struct intel_context *intel,
|
||||
intelEmitCopyBlit(struct brw_context *brw,
|
||||
GLuint cpp,
|
||||
GLshort src_pitch,
|
||||
drm_intel_bo *src_buffer,
|
||||
|
|
@ -278,6 +280,7 @@ intelEmitCopyBlit(struct intel_context *intel,
|
|||
GLshort w, GLshort h,
|
||||
GLenum logic_op)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
GLuint CMD, BR13, pass = 0;
|
||||
int dst_y2 = dst_y + h;
|
||||
int dst_x2 = dst_x + w;
|
||||
|
|
@ -304,7 +307,7 @@ intelEmitCopyBlit(struct intel_context *intel,
|
|||
aper_array[2] = src_buffer;
|
||||
|
||||
if (dri_bufmgr_check_aperture_space(aper_array, 3) != 0) {
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
pass++;
|
||||
} else
|
||||
break;
|
||||
|
|
@ -313,7 +316,7 @@ intelEmitCopyBlit(struct intel_context *intel,
|
|||
if (pass >= 2)
|
||||
return false;
|
||||
|
||||
intel_batchbuffer_require_space(intel, 8 * 4, true);
|
||||
intel_batchbuffer_require_space(brw, 8 * 4, true);
|
||||
DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
|
||||
__FUNCTION__,
|
||||
src_buffer, src_pitch, src_offset, src_x, src_y,
|
||||
|
|
@ -390,13 +393,13 @@ intelEmitCopyBlit(struct intel_context *intel,
|
|||
|
||||
ADVANCE_BATCH_TILED(dst_y_tiled, src_y_tiled);
|
||||
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
intelEmitImmediateColorExpandBlit(struct intel_context *intel,
|
||||
intelEmitImmediateColorExpandBlit(struct brw_context *brw,
|
||||
GLuint cpp,
|
||||
GLubyte *src_bits, GLuint src_size,
|
||||
GLuint fg_color,
|
||||
|
|
@ -429,10 +432,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
|
|||
__FUNCTION__,
|
||||
dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
|
||||
|
||||
intel_batchbuffer_require_space(intel,
|
||||
(8 * 4) +
|
||||
(3 * 4) +
|
||||
dwords * 4, true);
|
||||
intel_batchbuffer_require_space(brw, (8 * 4) + (3 * 4) + dwords * 4, true);
|
||||
|
||||
opcode = XY_SETUP_BLT_CMD;
|
||||
if (cpp == 4)
|
||||
|
|
@ -466,9 +466,9 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
|
|||
OUT_BATCH(((y + h) << 16) | (x + w));
|
||||
ADVANCE_BATCH();
|
||||
|
||||
intel_batchbuffer_data(intel, src_bits, dwords * 4, true);
|
||||
intel_batchbuffer_data(brw, src_bits, dwords * 4, true);
|
||||
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
@ -478,13 +478,14 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
|
|||
* end to cover the last if we need.
|
||||
*/
|
||||
void
|
||||
intel_emit_linear_blit(struct intel_context *intel,
|
||||
intel_emit_linear_blit(struct brw_context *brw,
|
||||
drm_intel_bo *dst_bo,
|
||||
unsigned int dst_offset,
|
||||
drm_intel_bo *src_bo,
|
||||
unsigned int src_offset,
|
||||
unsigned int size)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
GLuint pitch, height;
|
||||
bool ok;
|
||||
|
|
@ -495,7 +496,7 @@ intel_emit_linear_blit(struct intel_context *intel,
|
|||
*/
|
||||
pitch = ROUND_DOWN_TO(MIN2(size, (1 << 15) - 1), 4);
|
||||
height = (pitch == 0) ? 1 : size / pitch;
|
||||
ok = intelEmitCopyBlit(intel, 1,
|
||||
ok = intelEmitCopyBlit(brw, 1,
|
||||
pitch, src_bo, src_offset, I915_TILING_NONE,
|
||||
pitch, dst_bo, dst_offset, I915_TILING_NONE,
|
||||
0, 0, /* src x/y */
|
||||
|
|
@ -511,7 +512,7 @@ intel_emit_linear_blit(struct intel_context *intel,
|
|||
assert (size < (1 << 15));
|
||||
pitch = ALIGN(size, 4);
|
||||
if (size != 0) {
|
||||
ok = intelEmitCopyBlit(intel, 1,
|
||||
ok = intelEmitCopyBlit(brw, 1,
|
||||
pitch, src_bo, src_offset, I915_TILING_NONE,
|
||||
pitch, dst_bo, dst_offset, I915_TILING_NONE,
|
||||
0, 0, /* src x/y */
|
||||
|
|
@ -532,10 +533,11 @@ intel_emit_linear_blit(struct intel_context *intel,
|
|||
* miptree.
|
||||
*/
|
||||
static void
|
||||
intel_miptree_set_alpha_to_one(struct intel_context *intel,
|
||||
intel_miptree_set_alpha_to_one(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt,
|
||||
int x, int y, int width, int height)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct intel_region *region = mt->region;
|
||||
uint32_t BR13, CMD;
|
||||
int pitch, cpp;
|
||||
|
|
@ -564,7 +566,7 @@ intel_miptree_set_alpha_to_one(struct intel_context *intel,
|
|||
|
||||
if (drm_intel_bufmgr_check_aperture_space(aper_array,
|
||||
ARRAY_SIZE(aper_array)) != 0) {
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
|
||||
bool dst_y_tiled = region->tiling == I915_TILING_Y;
|
||||
|
|
@ -580,5 +582,5 @@ intel_miptree_set_alpha_to_one(struct intel_context *intel,
|
|||
OUT_BATCH(0xffffffff); /* white, but only alpha gets written */
|
||||
ADVANCE_BATCH_TILED(dst_y_tiled, false);
|
||||
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@
|
|||
#include "brw_context.h"
|
||||
|
||||
bool
|
||||
intelEmitCopyBlit(struct intel_context *intel,
|
||||
intelEmitCopyBlit(struct brw_context *brw,
|
||||
GLuint cpp,
|
||||
GLshort src_pitch,
|
||||
drm_intel_bo *src_buffer,
|
||||
|
|
@ -46,7 +46,7 @@ intelEmitCopyBlit(struct intel_context *intel,
|
|||
GLshort w, GLshort h,
|
||||
GLenum logicop );
|
||||
|
||||
bool intel_miptree_blit(struct intel_context *intel,
|
||||
bool intel_miptree_blit(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *src_mt,
|
||||
int src_level, int src_slice,
|
||||
uint32_t src_x, uint32_t src_y, bool src_flip,
|
||||
|
|
@ -57,7 +57,7 @@ bool intel_miptree_blit(struct intel_context *intel,
|
|||
GLenum logicop);
|
||||
|
||||
bool
|
||||
intelEmitImmediateColorExpandBlit(struct intel_context *intel,
|
||||
intelEmitImmediateColorExpandBlit(struct brw_context *brw,
|
||||
GLuint cpp,
|
||||
GLubyte *src_bits, GLuint src_size,
|
||||
GLuint fg_color,
|
||||
|
|
@ -68,7 +68,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
|
|||
GLshort x, GLshort y,
|
||||
GLshort w, GLshort h,
|
||||
GLenum logic_op);
|
||||
void intel_emit_linear_blit(struct intel_context *intel,
|
||||
void intel_emit_linear_blit(struct brw_context *brw,
|
||||
drm_intel_bo *dst_bo,
|
||||
unsigned int dst_offset,
|
||||
drm_intel_bo *src_bo,
|
||||
|
|
|
|||
|
|
@ -46,11 +46,10 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj);
|
|||
|
||||
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
|
||||
static void
|
||||
intel_bufferobj_alloc_buffer(struct intel_context *intel,
|
||||
intel_bufferobj_alloc_buffer(struct brw_context *brw,
|
||||
struct intel_buffer_object *intel_obj)
|
||||
{
|
||||
struct brw_context *brw = brw_context(&intel->ctx);
|
||||
|
||||
struct intel_context *intel = &brw->intel;
|
||||
intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
|
||||
intel_obj->Base.Size, 64);
|
||||
|
||||
|
|
@ -123,7 +122,7 @@ intel_bufferobj_data(struct gl_context * ctx,
|
|||
const GLvoid * data,
|
||||
GLenum usage, struct gl_buffer_object *obj)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
|
||||
/* Part of the ABI, but this function doesn't use it.
|
||||
|
|
@ -139,7 +138,7 @@ intel_bufferobj_data(struct gl_context * ctx,
|
|||
release_buffer(intel_obj);
|
||||
|
||||
if (size != 0) {
|
||||
intel_bufferobj_alloc_buffer(intel, intel_obj);
|
||||
intel_bufferobj_alloc_buffer(brw, intel_obj);
|
||||
if (!intel_obj->buffer)
|
||||
return false;
|
||||
|
||||
|
|
@ -163,6 +162,7 @@ intel_bufferobj_subdata(struct gl_context * ctx,
|
|||
GLsizeiptrARB size,
|
||||
const GLvoid * data, struct gl_buffer_object *obj)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
bool busy;
|
||||
|
|
@ -180,7 +180,7 @@ intel_bufferobj_subdata(struct gl_context * ctx,
|
|||
if (size == intel_obj->Base.Size) {
|
||||
/* Replace the current busy bo with fresh data. */
|
||||
drm_intel_bo_unreference(intel_obj->buffer);
|
||||
intel_bufferobj_alloc_buffer(intel, intel_obj);
|
||||
intel_bufferobj_alloc_buffer(brw, intel_obj);
|
||||
drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
|
||||
} else {
|
||||
perf_debug("Using a blit copy to avoid stalling on %ldb "
|
||||
|
|
@ -191,7 +191,7 @@ intel_bufferobj_subdata(struct gl_context * ctx,
|
|||
|
||||
drm_intel_bo_subdata(temp_bo, 0, size, data);
|
||||
|
||||
intel_emit_linear_blit(intel,
|
||||
intel_emit_linear_blit(brw,
|
||||
intel_obj->buffer, offset,
|
||||
temp_bo, 0,
|
||||
size);
|
||||
|
|
@ -214,11 +214,12 @@ intel_bufferobj_get_subdata(struct gl_context * ctx,
|
|||
GLvoid * data, struct gl_buffer_object *obj)
|
||||
{
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
|
||||
assert(intel_obj);
|
||||
if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
|
||||
}
|
||||
|
|
@ -246,6 +247,7 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
|||
GLintptr offset, GLsizeiptr length,
|
||||
GLbitfield access, struct gl_buffer_object *obj)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
|
||||
|
|
@ -275,7 +277,7 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
|||
if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
|
||||
if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
|
||||
drm_intel_bo_unreference(intel_obj->buffer);
|
||||
intel_bufferobj_alloc_buffer(intel, intel_obj);
|
||||
intel_bufferobj_alloc_buffer(brw, intel_obj);
|
||||
} else {
|
||||
perf_debug("Stalling on the GPU for mapping a busy buffer "
|
||||
"object\n");
|
||||
|
|
@ -284,7 +286,7 @@ intel_bufferobj_map_range(struct gl_context * ctx,
|
|||
} else if (drm_intel_bo_busy(intel_obj->buffer) &&
|
||||
(access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
|
||||
drm_intel_bo_unreference(intel_obj->buffer);
|
||||
intel_bufferobj_alloc_buffer(intel, intel_obj);
|
||||
intel_bufferobj_alloc_buffer(brw, intel_obj);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -335,6 +337,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
|||
GLintptr offset, GLsizeiptr length,
|
||||
struct gl_buffer_object *obj)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
drm_intel_bo *temp_bo;
|
||||
|
|
@ -352,7 +355,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
|||
|
||||
drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
|
||||
|
||||
intel_emit_linear_blit(intel,
|
||||
intel_emit_linear_blit(brw,
|
||||
intel_obj->buffer, obj->Offset + offset,
|
||||
temp_bo, 0,
|
||||
length);
|
||||
|
|
@ -367,7 +370,7 @@ intel_bufferobj_flush_mapped_range(struct gl_context *ctx,
|
|||
static GLboolean
|
||||
intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
|
||||
|
||||
assert(intel_obj);
|
||||
|
|
@ -378,13 +381,13 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
|
|||
* flush. Once again, we wish for a domain tracker in libdrm to cover
|
||||
* usage inside of a batchbuffer.
|
||||
*/
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
free(intel_obj->range_map_buffer);
|
||||
intel_obj->range_map_buffer = NULL;
|
||||
} else if (intel_obj->range_map_bo != NULL) {
|
||||
drm_intel_bo_unmap(intel_obj->range_map_bo);
|
||||
|
||||
intel_emit_linear_blit(intel,
|
||||
intel_emit_linear_blit(brw,
|
||||
intel_obj->buffer, obj->Offset,
|
||||
intel_obj->range_map_bo, 0,
|
||||
obj->Length);
|
||||
|
|
@ -394,7 +397,7 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
|
|||
* flush. Once again, we wish for a domain tracker in libdrm to cover
|
||||
* usage inside of a batchbuffer.
|
||||
*/
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
|
||||
drm_intel_bo_unreference(intel_obj->range_map_bo);
|
||||
intel_obj->range_map_bo = NULL;
|
||||
|
|
@ -409,12 +412,12 @@ intel_bufferobj_unmap(struct gl_context * ctx, struct gl_buffer_object *obj)
|
|||
}
|
||||
|
||||
drm_intel_bo *
|
||||
intel_bufferobj_buffer(struct intel_context *intel,
|
||||
intel_bufferobj_buffer(struct brw_context *brw,
|
||||
struct intel_buffer_object *intel_obj,
|
||||
GLuint flag)
|
||||
{
|
||||
if (intel_obj->buffer == NULL)
|
||||
intel_bufferobj_alloc_buffer(intel, intel_obj);
|
||||
intel_bufferobj_alloc_buffer(brw, intel_obj);
|
||||
|
||||
return intel_obj->buffer;
|
||||
}
|
||||
|
|
@ -422,8 +425,9 @@ intel_bufferobj_buffer(struct intel_context *intel,
|
|||
#define INTEL_UPLOAD_SIZE (64*1024)
|
||||
|
||||
void
|
||||
intel_upload_finish(struct intel_context *intel)
|
||||
intel_upload_finish(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (!intel->upload.bo)
|
||||
return;
|
||||
|
||||
|
|
@ -439,9 +443,10 @@ intel_upload_finish(struct intel_context *intel)
|
|||
intel->upload.bo = NULL;
|
||||
}
|
||||
|
||||
static void wrap_buffers(struct intel_context *intel, GLuint size)
|
||||
static void wrap_buffers(struct brw_context *brw, GLuint size)
|
||||
{
|
||||
intel_upload_finish(intel);
|
||||
struct intel_context *intel = &brw->intel;
|
||||
intel_upload_finish(brw);
|
||||
|
||||
if (size < INTEL_UPLOAD_SIZE)
|
||||
size = INTEL_UPLOAD_SIZE;
|
||||
|
|
@ -450,16 +455,17 @@ static void wrap_buffers(struct intel_context *intel, GLuint size)
|
|||
intel->upload.offset = 0;
|
||||
}
|
||||
|
||||
void intel_upload_data(struct intel_context *intel,
|
||||
void intel_upload_data(struct brw_context *brw,
|
||||
const void *ptr, GLuint size, GLuint align,
|
||||
drm_intel_bo **return_bo,
|
||||
GLuint *return_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
GLuint base, delta;
|
||||
|
||||
base = (intel->upload.offset + align - 1) / align * align;
|
||||
if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
|
||||
wrap_buffers(intel, size);
|
||||
wrap_buffers(brw, size);
|
||||
base = 0;
|
||||
}
|
||||
|
||||
|
|
@ -496,14 +502,15 @@ void intel_upload_data(struct intel_context *intel,
|
|||
intel->upload.offset = base + size;
|
||||
}
|
||||
|
||||
void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
|
||||
void *intel_upload_map(struct brw_context *brw, GLuint size, GLuint align)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
GLuint base, delta;
|
||||
char *ptr;
|
||||
|
||||
base = (intel->upload.offset + align - 1) / align * align;
|
||||
if (intel->upload.bo == NULL || base + size > intel->upload.bo->size) {
|
||||
wrap_buffers(intel, size);
|
||||
wrap_buffers(brw, size);
|
||||
base = 0;
|
||||
}
|
||||
|
||||
|
|
@ -532,11 +539,12 @@ void *intel_upload_map(struct intel_context *intel, GLuint size, GLuint align)
|
|||
return ptr;
|
||||
}
|
||||
|
||||
void intel_upload_unmap(struct intel_context *intel,
|
||||
void intel_upload_unmap(struct brw_context *brw,
|
||||
const void *ptr, GLuint size, GLuint align,
|
||||
drm_intel_bo **return_bo,
|
||||
GLuint *return_offset)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
GLuint base;
|
||||
|
||||
base = (intel->upload.offset + align - 1) / align * align;
|
||||
|
|
@ -553,7 +561,7 @@ void intel_upload_unmap(struct intel_context *intel,
|
|||
}
|
||||
|
||||
drm_intel_bo *
|
||||
intel_bufferobj_source(struct intel_context *intel,
|
||||
intel_bufferobj_source(struct brw_context *brw,
|
||||
struct intel_buffer_object *intel_obj,
|
||||
GLuint align, GLuint *offset)
|
||||
{
|
||||
|
|
@ -568,7 +576,7 @@ intel_bufferobj_copy_subdata(struct gl_context *ctx,
|
|||
GLintptr read_offset, GLintptr write_offset,
|
||||
GLsizeiptr size)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_buffer_object *intel_src = intel_buffer_object(src);
|
||||
struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
|
||||
drm_intel_bo *src_bo, *dst_bo;
|
||||
|
|
@ -577,10 +585,10 @@ intel_bufferobj_copy_subdata(struct gl_context *ctx,
|
|||
if (size == 0)
|
||||
return;
|
||||
|
||||
dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
|
||||
src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);
|
||||
dst_bo = intel_bufferobj_buffer(brw, intel_dst, INTEL_WRITE_PART);
|
||||
src_bo = intel_bufferobj_source(brw, intel_src, 64, &src_offset);
|
||||
|
||||
intel_emit_linear_blit(intel,
|
||||
intel_emit_linear_blit(brw,
|
||||
dst_bo, write_offset,
|
||||
src_bo, read_offset + src_offset, size);
|
||||
|
||||
|
|
@ -589,7 +597,7 @@ intel_bufferobj_copy_subdata(struct gl_context *ctx,
|
|||
* flush. Once again, we wish for a domain tracker in libdrm to cover
|
||||
* usage inside of a batchbuffer.
|
||||
*/
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
}
|
||||
|
||||
static GLenum
|
||||
|
|
@ -617,8 +625,8 @@ intel_buffer_object_purgeable(struct gl_context * ctx,
|
|||
return GL_RELEASED_APPLE;
|
||||
} else {
|
||||
/* XXX Create the buffer and madvise(MADV_DONTNEED)? */
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj, INTEL_READ);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_obj, INTEL_READ);
|
||||
|
||||
return intel_buffer_purgeable(bo);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -52,27 +52,27 @@ struct intel_buffer_object
|
|||
|
||||
/* Get the bm buffer associated with a GL bufferobject:
|
||||
*/
|
||||
drm_intel_bo *intel_bufferobj_buffer(struct intel_context *intel,
|
||||
drm_intel_bo *intel_bufferobj_buffer(struct brw_context *brw,
|
||||
struct intel_buffer_object *obj,
|
||||
GLuint flag);
|
||||
drm_intel_bo *intel_bufferobj_source(struct intel_context *intel,
|
||||
drm_intel_bo *intel_bufferobj_source(struct brw_context *brw,
|
||||
struct intel_buffer_object *obj,
|
||||
GLuint align,
|
||||
GLuint *offset);
|
||||
|
||||
void intel_upload_data(struct intel_context *intel,
|
||||
void intel_upload_data(struct brw_context *brw,
|
||||
const void *ptr, GLuint size, GLuint align,
|
||||
drm_intel_bo **return_bo,
|
||||
GLuint *return_offset);
|
||||
|
||||
void *intel_upload_map(struct intel_context *intel,
|
||||
void *intel_upload_map(struct brw_context *brw,
|
||||
GLuint size, GLuint align);
|
||||
void intel_upload_unmap(struct intel_context *intel,
|
||||
void intel_upload_unmap(struct brw_context *brw,
|
||||
const void *ptr, GLuint size, GLuint align,
|
||||
drm_intel_bo **return_bo,
|
||||
GLuint *return_offset);
|
||||
|
||||
void intel_upload_finish(struct intel_context *intel);
|
||||
void intel_upload_finish(struct brw_context *brw);
|
||||
|
||||
/* Hook the bufferobject implementation into mesa:
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -39,8 +39,9 @@
|
|||
* If so, set the intel->front_buffer_dirty field to true.
|
||||
*/
|
||||
void
|
||||
intel_check_front_buffer_rendering(struct intel_context *intel)
|
||||
intel_check_front_buffer_rendering(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
const struct gl_framebuffer *fb = intel->ctx.DrawBuffer;
|
||||
if (_mesa_is_winsys_fbo(fb)) {
|
||||
/* drawing to window system buffer */
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@
|
|||
struct intel_context;
|
||||
struct intel_framebuffer;
|
||||
|
||||
extern void intel_check_front_buffer_rendering(struct intel_context *intel);
|
||||
extern void intel_check_front_buffer_rendering(struct brw_context *brw);
|
||||
|
||||
extern void intelInitBufferFuncs(struct dd_function_table *functions);
|
||||
|
||||
|
|
|
|||
|
|
@ -91,9 +91,10 @@ intelGetString(struct gl_context * ctx, GLenum name)
|
|||
}
|
||||
|
||||
void
|
||||
intel_resolve_for_dri2_flush(struct intel_context *intel,
|
||||
intel_resolve_for_dri2_flush(struct brw_context *brw,
|
||||
__DRIdrawable *drawable)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
if (intel->gen < 6) {
|
||||
/* MSAA and fast color clear are not supported, so don't waste time
|
||||
* checking whether a resolve is needed.
|
||||
|
|
@ -117,15 +118,16 @@ intel_resolve_for_dri2_flush(struct intel_context *intel,
|
|||
if (rb == NULL || rb->mt == NULL)
|
||||
continue;
|
||||
if (rb->mt->num_samples <= 1)
|
||||
intel_miptree_resolve_color(intel, rb->mt);
|
||||
intel_miptree_resolve_color(brw, rb->mt);
|
||||
else
|
||||
intel_miptree_downsample(intel, rb->mt);
|
||||
intel_miptree_downsample(brw, rb->mt);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
intel_flush_front(struct gl_context *ctx)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
__DRIcontext *driContext = intel->driContext;
|
||||
__DRIdrawable *driDrawable = driContext->driDrawablePriv;
|
||||
|
|
@ -143,7 +145,7 @@ intel_flush_front(struct gl_context *ctx)
|
|||
* performance. And no one cares about front-buffer render
|
||||
* performance.
|
||||
*/
|
||||
intel_resolve_for_dri2_flush(intel, driDrawable);
|
||||
intel_resolve_for_dri2_flush(brw, driDrawable);
|
||||
|
||||
screen->dri2.loader->flushFrontBuffer(driDrawable,
|
||||
driDrawable->loaderPrivate);
|
||||
|
|
@ -163,13 +165,13 @@ intel_bits_per_pixel(const struct intel_renderbuffer *rb)
|
|||
}
|
||||
|
||||
static void
|
||||
intel_query_dri2_buffers(struct intel_context *intel,
|
||||
intel_query_dri2_buffers(struct brw_context *brw,
|
||||
__DRIdrawable *drawable,
|
||||
__DRIbuffer **buffers,
|
||||
int *count);
|
||||
|
||||
static void
|
||||
intel_process_dri2_buffer(struct intel_context *intel,
|
||||
intel_process_dri2_buffer(struct brw_context *brw,
|
||||
__DRIdrawable *drawable,
|
||||
__DRIbuffer *buffer,
|
||||
struct intel_renderbuffer *rb,
|
||||
|
|
@ -180,7 +182,8 @@ intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
|
|||
{
|
||||
struct gl_framebuffer *fb = drawable->driverPrivate;
|
||||
struct intel_renderbuffer *rb;
|
||||
struct intel_context *intel = context->driverPrivate;
|
||||
struct brw_context *brw = context->driverPrivate;
|
||||
struct intel_context *intel = &brw->intel;
|
||||
__DRIbuffer *buffers = NULL;
|
||||
int i, count;
|
||||
const char *region_name;
|
||||
|
|
@ -193,7 +196,7 @@ intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
|
|||
if (unlikely(INTEL_DEBUG & DEBUG_DRI))
|
||||
fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
|
||||
|
||||
intel_query_dri2_buffers(intel, drawable, &buffers, &count);
|
||||
intel_query_dri2_buffers(brw, drawable, &buffers, &count);
|
||||
|
||||
if (buffers == NULL)
|
||||
return;
|
||||
|
|
@ -227,7 +230,7 @@ intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
|
|||
return;
|
||||
}
|
||||
|
||||
intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name);
|
||||
intel_process_dri2_buffer(brw, drawable, &buffers[i], rb, region_name);
|
||||
}
|
||||
|
||||
driUpdateFramebufferSize(&intel->ctx, drawable);
|
||||
|
|
@ -238,8 +241,9 @@ intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
|
|||
* state is required.
|
||||
*/
|
||||
void
|
||||
intel_prepare_render(struct intel_context *intel)
|
||||
intel_prepare_render(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
__DRIcontext *driContext = intel->driContext;
|
||||
__DRIdrawable *drawable;
|
||||
|
||||
|
|
@ -336,10 +340,11 @@ intelInvalidateState(struct gl_context * ctx, GLuint new_state)
|
|||
void
|
||||
_intel_flush(struct gl_context *ctx, const char *file, int line)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
|
||||
if (intel->batch.used)
|
||||
_intel_batchbuffer_flush(intel, file, line);
|
||||
_intel_batchbuffer_flush(brw, file, line);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -426,7 +431,7 @@ validate_context_version(struct intel_screen *screen,
|
|||
}
|
||||
|
||||
bool
|
||||
intelInitContext(struct intel_context *intel,
|
||||
intelInitContext(struct brw_context *brw,
|
||||
int api,
|
||||
unsigned major_version,
|
||||
unsigned minor_version,
|
||||
|
|
@ -436,6 +441,7 @@ intelInitContext(struct intel_context *intel,
|
|||
struct dd_function_table *functions,
|
||||
unsigned *dri_ctx_error)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
|
||||
__DRIscreen *sPriv = driContextPriv->driScreenPriv;
|
||||
|
|
@ -569,9 +575,9 @@ intelInitContext(struct intel_context *intel,
|
|||
if (INTEL_DEBUG & DEBUG_AUB)
|
||||
drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);
|
||||
|
||||
intel_batchbuffer_init(intel);
|
||||
intel_batchbuffer_init(brw);
|
||||
|
||||
intel_fbo_init(intel);
|
||||
intel_fbo_init(brw);
|
||||
|
||||
if (!driQueryOptionb(&intel->optionCache, "hiz")) {
|
||||
intel->has_hiz = false;
|
||||
|
|
@ -601,21 +607,22 @@ intelInitContext(struct intel_context *intel,
|
|||
void
|
||||
intelDestroyContext(__DRIcontext * driContextPriv)
|
||||
{
|
||||
struct intel_context *intel =
|
||||
(struct intel_context *) driContextPriv->driverPrivate;
|
||||
struct brw_context *brw =
|
||||
(struct brw_context *) driContextPriv->driverPrivate;
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
|
||||
assert(intel); /* should never be null */
|
||||
if (intel) {
|
||||
/* Dump a final BMP in case the application doesn't call SwapBuffers */
|
||||
if (INTEL_DEBUG & DEBUG_AUB) {
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
aub_dump_bmp(&intel->ctx);
|
||||
}
|
||||
|
||||
_mesa_meta_free(&intel->ctx);
|
||||
|
||||
intel->vtbl.destroy(intel);
|
||||
intel->vtbl.destroy(brw);
|
||||
|
||||
if (ctx->swrast_context) {
|
||||
_swsetup_DestroyContext(&intel->ctx);
|
||||
|
|
@ -626,7 +633,7 @@ intelDestroyContext(__DRIcontext * driContextPriv)
|
|||
if (ctx->swrast_context)
|
||||
_swrast_DestroyContext(&intel->ctx);
|
||||
|
||||
intel_batchbuffer_free(intel);
|
||||
intel_batchbuffer_free(brw);
|
||||
|
||||
drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
|
||||
intel->first_post_swapbuffers_batch = NULL;
|
||||
|
|
@ -679,9 +686,10 @@ intelUnbindContext(__DRIcontext * driContextPriv)
|
|||
* yet), we go turn that back off before anyone finds out.
|
||||
*/
|
||||
static void
|
||||
intel_gles3_srgb_workaround(struct intel_context *intel,
|
||||
intel_gles3_srgb_workaround(struct brw_context *brw,
|
||||
struct gl_framebuffer *fb)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
|
||||
if (_mesa_is_desktop_gl(ctx) || !fb->Visual.sRGBCapable)
|
||||
|
|
@ -704,22 +712,24 @@ intelMakeCurrent(__DRIcontext * driContextPriv,
|
|||
__DRIdrawable * driDrawPriv,
|
||||
__DRIdrawable * driReadPriv)
|
||||
{
|
||||
struct intel_context *intel;
|
||||
struct brw_context *brw;
|
||||
GET_CURRENT_CONTEXT(curCtx);
|
||||
|
||||
if (driContextPriv)
|
||||
intel = (struct intel_context *) driContextPriv->driverPrivate;
|
||||
brw = (struct brw_context *) driContextPriv->driverPrivate;
|
||||
else
|
||||
intel = NULL;
|
||||
brw = NULL;
|
||||
|
||||
/* According to the glXMakeCurrent() man page: "Pending commands to
|
||||
* the previous context, if any, are flushed before it is released."
|
||||
* But only flush if we're actually changing contexts.
|
||||
*/
|
||||
if (intel_context(curCtx) && intel_context(curCtx) != intel) {
|
||||
if (brw_context(curCtx) && brw_context(curCtx) != brw) {
|
||||
_mesa_flush(curCtx);
|
||||
}
|
||||
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
if (driContextPriv) {
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
struct gl_framebuffer *fb, *readFb;
|
||||
|
|
@ -734,11 +744,11 @@ intelMakeCurrent(__DRIcontext * driContextPriv,
|
|||
driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
|
||||
}
|
||||
|
||||
intel_prepare_render(intel);
|
||||
intel_prepare_render(brw);
|
||||
_mesa_make_current(ctx, fb, readFb);
|
||||
|
||||
intel_gles3_srgb_workaround(intel, ctx->WinSysDrawBuffer);
|
||||
intel_gles3_srgb_workaround(intel, ctx->WinSysReadBuffer);
|
||||
intel_gles3_srgb_workaround(brw, ctx->WinSysDrawBuffer);
|
||||
intel_gles3_srgb_workaround(brw, ctx->WinSysReadBuffer);
|
||||
}
|
||||
else {
|
||||
_mesa_make_current(NULL, NULL, NULL);
|
||||
|
|
@ -765,11 +775,12 @@ intelMakeCurrent(__DRIcontext * driContextPriv,
|
|||
* \see DRI2GetBuffersWithFormat()
|
||||
*/
|
||||
static void
|
||||
intel_query_dri2_buffers(struct intel_context *intel,
|
||||
intel_query_dri2_buffers(struct brw_context *brw,
|
||||
__DRIdrawable *drawable,
|
||||
__DRIbuffer **buffers,
|
||||
int *buffer_count)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
__DRIscreen *screen = intel->intelScreen->driScrnPriv;
|
||||
struct gl_framebuffer *fb = drawable->driverPrivate;
|
||||
int i = 0;
|
||||
|
|
@ -838,12 +849,13 @@ intel_query_dri2_buffers(struct intel_context *intel,
|
|||
* \see intel_region_alloc_for_handle()
|
||||
*/
|
||||
static void
|
||||
intel_process_dri2_buffer(struct intel_context *intel,
|
||||
intel_process_dri2_buffer(struct brw_context *brw,
|
||||
__DRIdrawable *drawable,
|
||||
__DRIbuffer *buffer,
|
||||
struct intel_renderbuffer *rb,
|
||||
const char *buffer_name)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct intel_region *region = NULL;
|
||||
|
||||
if (!rb)
|
||||
|
|
@ -886,7 +898,7 @@ intel_process_dri2_buffer(struct intel_context *intel,
|
|||
if (!region)
|
||||
return;
|
||||
|
||||
rb->mt = intel_miptree_create_for_dri2_buffer(intel,
|
||||
rb->mt = intel_miptree_create_for_dri2_buffer(brw,
|
||||
buffer->attachment,
|
||||
intel_rb_format(rb),
|
||||
num_samples,
|
||||
|
|
|
|||
|
|
@ -115,9 +115,9 @@ struct intel_context
|
|||
|
||||
struct
|
||||
{
|
||||
void (*destroy) (struct intel_context * intel);
|
||||
void (*finish_batch) (struct intel_context * intel);
|
||||
void (*new_batch) (struct intel_context * intel);
|
||||
void (*destroy) (struct brw_context * brw);
|
||||
void (*finish_batch) (struct brw_context * brw);
|
||||
void (*new_batch) (struct brw_context * brw);
|
||||
|
||||
void (*update_texture_surface)(struct gl_context *ctx,
|
||||
unsigned unit,
|
||||
|
|
@ -372,7 +372,7 @@ extern int INTEL_DEBUG;
|
|||
* intel_context.c:
|
||||
*/
|
||||
|
||||
extern bool intelInitContext(struct intel_context *intel,
|
||||
extern bool intelInitContext(struct brw_context *brw,
|
||||
int api,
|
||||
unsigned major_version,
|
||||
unsigned minor_version,
|
||||
|
|
@ -403,10 +403,10 @@ extern int intel_translate_logic_op(GLenum opcode);
|
|||
|
||||
void intel_update_renderbuffers(__DRIcontext *context,
|
||||
__DRIdrawable *drawable);
|
||||
void intel_prepare_render(struct intel_context *intel);
|
||||
void intel_prepare_render(struct brw_context *brw);
|
||||
|
||||
void
|
||||
intel_resolve_for_dri2_flush(struct intel_context *intel,
|
||||
intel_resolve_for_dri2_flush(struct brw_context *brw,
|
||||
__DRIdrawable *drawable);
|
||||
|
||||
extern void
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ intel_map_renderbuffer(struct gl_context *ctx,
|
|||
GLubyte **out_map,
|
||||
GLint *out_stride)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
|
||||
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
|
||||
void *map;
|
||||
|
|
@ -103,7 +103,7 @@ intel_map_renderbuffer(struct gl_context *ctx,
|
|||
return;
|
||||
}
|
||||
|
||||
intel_prepare_render(intel);
|
||||
intel_prepare_render(brw);
|
||||
|
||||
/* For a window-system renderbuffer, we need to flip the mapping we receive
|
||||
* upside-down. So we need to ask for a rectangle on flipped vertically, and
|
||||
|
|
@ -113,7 +113,7 @@ intel_map_renderbuffer(struct gl_context *ctx,
|
|||
y = rb->Height - y - h;
|
||||
}
|
||||
|
||||
intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
|
||||
intel_miptree_map(brw, irb->mt, irb->mt_level, irb->mt_layer,
|
||||
x, y, w, h, mode, &map, &stride);
|
||||
|
||||
if (rb->Name == 0) {
|
||||
|
|
@ -136,7 +136,7 @@ static void
|
|||
intel_unmap_renderbuffer(struct gl_context *ctx,
|
||||
struct gl_renderbuffer *rb)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
|
||||
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
|
||||
|
||||
|
|
@ -149,7 +149,7 @@ intel_unmap_renderbuffer(struct gl_context *ctx,
|
|||
return;
|
||||
}
|
||||
|
||||
intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
|
||||
intel_miptree_unmap(brw, irb->mt, irb->mt_level, irb->mt_layer);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -191,6 +191,7 @@ intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer
|
|||
GLenum internalFormat,
|
||||
GLuint width, GLuint height)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_screen *screen = intel->intelScreen;
|
||||
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
|
||||
|
|
@ -235,7 +236,7 @@ intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer
|
|||
if (width == 0 || height == 0)
|
||||
return true;
|
||||
|
||||
irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
|
||||
irb->mt = intel_miptree_create_for_renderbuffer(brw, rb->Format,
|
||||
width, height,
|
||||
rb->NumSamples);
|
||||
if (!irb->mt)
|
||||
|
|
@ -250,6 +251,7 @@ intel_image_target_renderbuffer_storage(struct gl_context *ctx,
|
|||
struct gl_renderbuffer *rb,
|
||||
void *image_handle)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_renderbuffer *irb;
|
||||
__DRIscreen *screen;
|
||||
|
|
@ -264,7 +266,7 @@ intel_image_target_renderbuffer_storage(struct gl_context *ctx,
|
|||
/* __DRIimage is opaque to the core so it has to be checked here */
|
||||
switch (image->format) {
|
||||
case MESA_FORMAT_RGBA8888_REV:
|
||||
_mesa_error(&intel->ctx, GL_INVALID_OPERATION,
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION,
|
||||
"glEGLImageTargetRenderbufferStorage(unsupported image format");
|
||||
return;
|
||||
break;
|
||||
|
|
@ -274,7 +276,7 @@ intel_image_target_renderbuffer_storage(struct gl_context *ctx,
|
|||
|
||||
irb = intel_renderbuffer(rb);
|
||||
intel_miptree_release(&irb->mt);
|
||||
irb->mt = intel_miptree_create_for_bo(intel,
|
||||
irb->mt = intel_miptree_create_for_bo(brw,
|
||||
image->region->bo,
|
||||
image->format,
|
||||
image->offset,
|
||||
|
|
@ -289,8 +291,7 @@ intel_image_target_renderbuffer_storage(struct gl_context *ctx,
|
|||
rb->Width = image->region->width;
|
||||
rb->Height = image->region->height;
|
||||
rb->Format = image->format;
|
||||
rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
|
||||
image->internal_format);
|
||||
rb->_BaseFormat = _mesa_base_fbo_format(ctx, image->internal_format);
|
||||
rb->NeedsFinishRenderTexture = true;
|
||||
}
|
||||
|
||||
|
|
@ -409,7 +410,7 @@ intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
|
|||
}
|
||||
|
||||
static bool
|
||||
intel_renderbuffer_update_wrapper(struct intel_context *intel,
|
||||
intel_renderbuffer_update_wrapper(struct brw_context *brw,
|
||||
struct intel_renderbuffer *irb,
|
||||
struct gl_texture_image *image,
|
||||
uint32_t layer)
|
||||
|
|
@ -440,8 +441,8 @@ intel_renderbuffer_update_wrapper(struct intel_context *intel,
|
|||
|
||||
intel_renderbuffer_set_draw_offset(irb);
|
||||
|
||||
if (mt->hiz_mt == NULL && brw_is_hiz_depth_format(intel, rb->Format)) {
|
||||
intel_miptree_alloc_hiz(intel, mt);
|
||||
if (mt->hiz_mt == NULL && brw_is_hiz_depth_format(brw, rb->Format)) {
|
||||
intel_miptree_alloc_hiz(brw, mt);
|
||||
if (!mt->hiz_mt)
|
||||
return false;
|
||||
}
|
||||
|
|
@ -475,7 +476,7 @@ intel_render_texture(struct gl_context * ctx,
|
|||
struct gl_framebuffer *fb,
|
||||
struct gl_renderbuffer_attachment *att)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct gl_renderbuffer *rb = att->Renderbuffer;
|
||||
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
|
||||
struct gl_texture_image *image = rb->TexImage;
|
||||
|
|
@ -502,7 +503,7 @@ intel_render_texture(struct gl_context * ctx,
|
|||
|
||||
intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
|
||||
|
||||
if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
|
||||
if (!intel_renderbuffer_update_wrapper(brw, irb, image, layer)) {
|
||||
_swrast_render_texture(ctx, fb, att);
|
||||
return;
|
||||
}
|
||||
|
|
@ -520,7 +521,7 @@ intel_render_texture(struct gl_context * ctx,
|
|||
static void
|
||||
intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
|
||||
DBG("Finish render %s texture\n", _mesa_get_format_name(rb->Format));
|
||||
|
||||
|
|
@ -529,7 +530,7 @@ intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
|
|||
* batch. Once again, we wish for a domain tracker in libdrm to cover
|
||||
* usage inside of a batchbuffer like GEM does in the kernel.
|
||||
*/
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
}
|
||||
|
||||
#define fbo_incomplete(fb, ...) do { \
|
||||
|
|
@ -550,6 +551,7 @@ intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
|
|||
static void
|
||||
intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_renderbuffer *depthRb =
|
||||
intel_get_renderbuffer(fb, BUFFER_DEPTH);
|
||||
|
|
@ -641,7 +643,7 @@ intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (!brw_render_target_supported(intel, rb)) {
|
||||
if (!brw_render_target_supported(brw, rb)) {
|
||||
fbo_incomplete(fb, "FBO incomplete: Unsupported HW "
|
||||
"texture/renderbuffer format attached: %s\n",
|
||||
_mesa_get_format_name(intel_rb_format(irb)));
|
||||
|
|
@ -665,6 +667,7 @@ intel_blit_framebuffer_with_blitter(struct gl_context *ctx,
|
|||
GLint dstX1, GLint dstY1,
|
||||
GLbitfield mask, GLenum filter)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
|
||||
if (mask & GL_COLOR_BUFFER_BIT) {
|
||||
|
|
@ -726,7 +729,7 @@ intel_blit_framebuffer_with_blitter(struct gl_context *ctx,
|
|||
return mask;
|
||||
}
|
||||
|
||||
if (!intel_miptree_blit(intel,
|
||||
if (!intel_miptree_blit(brw,
|
||||
src_irb->mt,
|
||||
src_irb->mt_level, src_irb->mt_layer,
|
||||
srcX0, srcY0, src_rb->Name == 0,
|
||||
|
|
@ -752,7 +755,7 @@ intel_blit_framebuffer(struct gl_context *ctx,
|
|||
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
|
||||
GLbitfield mask, GLenum filter)
|
||||
{
|
||||
mask = brw_blorp_framebuffer(intel_context(ctx),
|
||||
mask = brw_blorp_framebuffer(brw_context(ctx),
|
||||
srcX0, srcY0, srcX1, srcY1,
|
||||
dstX0, dstY0, dstX1, dstY1,
|
||||
mask, filter);
|
||||
|
|
@ -814,11 +817,11 @@ intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
|
|||
}
|
||||
|
||||
bool
|
||||
intel_renderbuffer_resolve_hiz(struct intel_context *intel,
|
||||
intel_renderbuffer_resolve_hiz(struct brw_context *brw,
|
||||
struct intel_renderbuffer *irb)
|
||||
{
|
||||
if (irb->mt)
|
||||
return intel_miptree_slice_resolve_hiz(intel,
|
||||
return intel_miptree_slice_resolve_hiz(brw,
|
||||
irb->mt,
|
||||
irb->mt_level,
|
||||
irb->mt_layer);
|
||||
|
|
@ -827,11 +830,11 @@ intel_renderbuffer_resolve_hiz(struct intel_context *intel,
|
|||
}
|
||||
|
||||
bool
|
||||
intel_renderbuffer_resolve_depth(struct intel_context *intel,
|
||||
intel_renderbuffer_resolve_depth(struct brw_context *brw,
|
||||
struct intel_renderbuffer *irb)
|
||||
{
|
||||
if (irb->mt)
|
||||
return intel_miptree_slice_resolve_depth(intel,
|
||||
return intel_miptree_slice_resolve_depth(brw,
|
||||
irb->mt,
|
||||
irb->mt_level,
|
||||
irb->mt_layer);
|
||||
|
|
@ -840,7 +843,7 @@ intel_renderbuffer_resolve_depth(struct intel_context *intel,
|
|||
}
|
||||
|
||||
void
|
||||
intel_renderbuffer_move_to_temp(struct intel_context *intel,
|
||||
intel_renderbuffer_move_to_temp(struct brw_context *brw,
|
||||
struct intel_renderbuffer *irb,
|
||||
bool invalidate)
|
||||
{
|
||||
|
|
@ -851,7 +854,7 @@ intel_renderbuffer_move_to_temp(struct intel_context *intel,
|
|||
|
||||
intel_miptree_get_dimensions_for_image(rb->TexImage, &width, &height, &depth);
|
||||
|
||||
new_mt = intel_miptree_create(intel, rb->TexImage->TexObject->Target,
|
||||
new_mt = intel_miptree_create(brw, rb->TexImage->TexObject->Target,
|
||||
intel_image->base.Base.TexFormat,
|
||||
intel_image->base.Base.Level,
|
||||
intel_image->base.Base.Level,
|
||||
|
|
@ -860,11 +863,11 @@ intel_renderbuffer_move_to_temp(struct intel_context *intel,
|
|||
irb->mt->num_samples,
|
||||
INTEL_MIPTREE_TILING_ANY);
|
||||
|
||||
if (brw_is_hiz_depth_format(intel, new_mt->format)) {
|
||||
intel_miptree_alloc_hiz(intel, new_mt);
|
||||
if (brw_is_hiz_depth_format(brw, new_mt->format)) {
|
||||
intel_miptree_alloc_hiz(brw, new_mt);
|
||||
}
|
||||
|
||||
intel_miptree_copy_teximage(intel, intel_image, new_mt, invalidate);
|
||||
intel_miptree_copy_teximage(brw, intel_image, new_mt, invalidate);
|
||||
|
||||
intel_miptree_reference(&irb->mt, intel_image->mt);
|
||||
intel_renderbuffer_set_draw_offset(irb);
|
||||
|
|
@ -876,8 +879,9 @@ intel_renderbuffer_move_to_temp(struct intel_context *intel,
|
|||
* Hook in device driver functions.
|
||||
*/
|
||||
void
|
||||
intel_fbo_init(struct intel_context *intel)
|
||||
intel_fbo_init(struct brw_context *brw)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
|
||||
intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
|
||||
intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ intel_create_wrapped_renderbuffer(struct gl_context * ctx,
|
|||
gl_format format);
|
||||
|
||||
extern void
|
||||
intel_fbo_init(struct intel_context *intel);
|
||||
intel_fbo_init(struct brw_context *brw);
|
||||
|
||||
void
|
||||
intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb);
|
||||
|
|
@ -175,7 +175,7 @@ intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb);
|
|||
* \return false if no resolve was needed
|
||||
*/
|
||||
bool
|
||||
intel_renderbuffer_resolve_hiz(struct intel_context *intel,
|
||||
intel_renderbuffer_resolve_hiz(struct brw_context *brw,
|
||||
struct intel_renderbuffer *irb);
|
||||
|
||||
/**
|
||||
|
|
@ -187,10 +187,10 @@ intel_renderbuffer_resolve_hiz(struct intel_context *intel,
|
|||
* \return false if no resolve was needed
|
||||
*/
|
||||
bool
|
||||
intel_renderbuffer_resolve_depth(struct intel_context *intel,
|
||||
intel_renderbuffer_resolve_depth(struct brw_context *brw,
|
||||
struct intel_renderbuffer *irb);
|
||||
|
||||
void intel_renderbuffer_move_to_temp(struct intel_context *intel,
|
||||
void intel_renderbuffer_move_to_temp(struct brw_context *brw,
|
||||
struct intel_renderbuffer *irb,
|
||||
bool invalidate);
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -476,19 +476,19 @@ enum intel_miptree_tiling_mode {
|
|||
};
|
||||
|
||||
bool
|
||||
intel_is_non_msrt_mcs_buffer_supported(struct intel_context *intel,
|
||||
intel_is_non_msrt_mcs_buffer_supported(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
void
|
||||
intel_get_non_msrt_mcs_alignment(struct intel_context *intel,
|
||||
intel_get_non_msrt_mcs_alignment(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt,
|
||||
unsigned *width_px, unsigned *height);
|
||||
|
||||
bool
|
||||
intel_miptree_alloc_non_msrt_mcs(struct intel_context *intel,
|
||||
intel_miptree_alloc_non_msrt_mcs(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
struct intel_mipmap_tree *intel_miptree_create(struct intel_context *intel,
|
||||
struct intel_mipmap_tree *intel_miptree_create(struct brw_context *brw,
|
||||
GLenum target,
|
||||
gl_format format,
|
||||
GLuint first_level,
|
||||
|
|
@ -501,7 +501,7 @@ struct intel_mipmap_tree *intel_miptree_create(struct intel_context *intel,
|
|||
enum intel_miptree_tiling_mode);
|
||||
|
||||
struct intel_mipmap_tree *
|
||||
intel_miptree_create_layout(struct intel_context *intel,
|
||||
intel_miptree_create_layout(struct brw_context *brw,
|
||||
GLenum target,
|
||||
gl_format format,
|
||||
GLuint first_level,
|
||||
|
|
@ -513,7 +513,7 @@ intel_miptree_create_layout(struct intel_context *intel,
|
|||
GLuint num_samples);
|
||||
|
||||
struct intel_mipmap_tree *
|
||||
intel_miptree_create_for_bo(struct intel_context *intel,
|
||||
intel_miptree_create_for_bo(struct brw_context *brw,
|
||||
drm_intel_bo *bo,
|
||||
gl_format format,
|
||||
uint32_t offset,
|
||||
|
|
@ -523,7 +523,7 @@ intel_miptree_create_for_bo(struct intel_context *intel,
|
|||
uint32_t tiling);
|
||||
|
||||
struct intel_mipmap_tree*
|
||||
intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
|
||||
intel_miptree_create_for_dri2_buffer(struct brw_context *brw,
|
||||
unsigned dri_attachment,
|
||||
gl_format format,
|
||||
uint32_t num_samples,
|
||||
|
|
@ -537,7 +537,7 @@ intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
|
|||
* - Depth is 1.
|
||||
*/
|
||||
struct intel_mipmap_tree*
|
||||
intel_miptree_create_for_renderbuffer(struct intel_context *intel,
|
||||
intel_miptree_create_for_renderbuffer(struct brw_context *brw,
|
||||
gl_format format,
|
||||
uint32_t width,
|
||||
uint32_t height,
|
||||
|
|
@ -589,12 +589,12 @@ void intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
|
|||
GLuint img, GLuint x, GLuint y);
|
||||
|
||||
void
|
||||
intel_miptree_copy_teximage(struct intel_context *intel,
|
||||
intel_miptree_copy_teximage(struct brw_context *brw,
|
||||
struct intel_texture_image *intelImage,
|
||||
struct intel_mipmap_tree *dst_mt, bool invalidate);
|
||||
|
||||
bool
|
||||
intel_miptree_alloc_mcs(struct intel_context *intel,
|
||||
intel_miptree_alloc_mcs(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt,
|
||||
GLuint num_samples);
|
||||
|
||||
|
|
@ -613,7 +613,7 @@ intel_miptree_alloc_mcs(struct intel_context *intel,
|
|||
*/
|
||||
|
||||
bool
|
||||
intel_miptree_alloc_hiz(struct intel_context *intel,
|
||||
intel_miptree_alloc_hiz(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
bool
|
||||
|
|
@ -634,7 +634,7 @@ intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
|
|||
* \return false if no resolve was needed
|
||||
*/
|
||||
bool
|
||||
intel_miptree_slice_resolve_hiz(struct intel_context *intel,
|
||||
intel_miptree_slice_resolve_hiz(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt,
|
||||
unsigned int level,
|
||||
unsigned int depth);
|
||||
|
|
@ -643,7 +643,7 @@ intel_miptree_slice_resolve_hiz(struct intel_context *intel,
|
|||
* \return false if no resolve was needed
|
||||
*/
|
||||
bool
|
||||
intel_miptree_slice_resolve_depth(struct intel_context *intel,
|
||||
intel_miptree_slice_resolve_depth(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt,
|
||||
unsigned int level,
|
||||
unsigned int depth);
|
||||
|
|
@ -652,14 +652,14 @@ intel_miptree_slice_resolve_depth(struct intel_context *intel,
|
|||
* \return false if no resolve was needed
|
||||
*/
|
||||
bool
|
||||
intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
|
||||
intel_miptree_all_slices_resolve_hiz(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
/**
|
||||
* \return false if no resolve was needed
|
||||
*/
|
||||
bool
|
||||
intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
|
||||
intel_miptree_all_slices_resolve_depth(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
/**\}*/
|
||||
|
|
@ -680,36 +680,31 @@ intel_miptree_used_for_rendering(struct intel_mipmap_tree *mt)
|
|||
}
|
||||
|
||||
void
|
||||
intel_miptree_resolve_color(struct intel_context *intel,
|
||||
intel_miptree_resolve_color(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
void
|
||||
intel_miptree_make_shareable(struct intel_context *intel,
|
||||
intel_miptree_make_shareable(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
void
|
||||
intel_miptree_downsample(struct intel_context *intel,
|
||||
intel_miptree_downsample(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
void
|
||||
intel_miptree_upsample(struct intel_context *intel,
|
||||
intel_miptree_upsample(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
/* i915_mipmap_tree.c:
|
||||
*/
|
||||
void i915_miptree_layout(struct intel_mipmap_tree *mt);
|
||||
void i945_miptree_layout(struct intel_mipmap_tree *mt);
|
||||
void brw_miptree_layout(struct intel_context *intel,
|
||||
struct intel_mipmap_tree *mt);
|
||||
void brw_miptree_layout(struct brw_context *brw, struct intel_mipmap_tree *mt);
|
||||
|
||||
void *intel_miptree_map_raw(struct intel_context *intel,
|
||||
void *intel_miptree_map_raw(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
void intel_miptree_unmap_raw(struct intel_context *intel,
|
||||
void intel_miptree_unmap_raw(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt);
|
||||
|
||||
void
|
||||
intel_miptree_map(struct intel_context *intel,
|
||||
intel_miptree_map(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt,
|
||||
unsigned int level,
|
||||
unsigned int slice,
|
||||
|
|
@ -722,13 +717,13 @@ intel_miptree_map(struct intel_context *intel,
|
|||
int *out_stride);
|
||||
|
||||
void
|
||||
intel_miptree_unmap(struct intel_context *intel,
|
||||
intel_miptree_unmap(struct brw_context *brw,
|
||||
struct intel_mipmap_tree *mt,
|
||||
unsigned int level,
|
||||
unsigned int slice);
|
||||
|
||||
void
|
||||
intel_hiz_exec(struct intel_context *intel, struct intel_mipmap_tree *mt,
|
||||
intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
|
||||
unsigned int level, unsigned int layer, enum gen6_hiz_op op);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
|||
|
|
@ -176,6 +176,7 @@ do_blit_bitmap( struct gl_context *ctx,
|
|||
const struct gl_pixelstore_attrib *unpack,
|
||||
const GLubyte *bitmap )
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct gl_framebuffer *fb = ctx->DrawBuffer;
|
||||
struct intel_renderbuffer *irb;
|
||||
|
|
@ -200,7 +201,7 @@ do_blit_bitmap( struct gl_context *ctx,
|
|||
return false;
|
||||
}
|
||||
|
||||
intel_prepare_render(intel);
|
||||
intel_prepare_render(brw);
|
||||
|
||||
if (fb->_NumColorDrawBuffers != 1) {
|
||||
perf_debug("accelerated glBitmap() only supports rendering to a "
|
||||
|
|
@ -258,7 +259,7 @@ do_blit_bitmap( struct gl_context *ctx,
|
|||
/* The blitter has no idea about fast color clears, so we need to resolve
|
||||
* the miptree before we do anything.
|
||||
*/
|
||||
intel_miptree_resolve_color(intel, irb->mt);
|
||||
intel_miptree_resolve_color(brw, irb->mt);
|
||||
|
||||
/* Chop it all into chunks that can be digested by hardware: */
|
||||
for (py = 0; py < height; py += DY) {
|
||||
|
|
@ -289,7 +290,7 @@ do_blit_bitmap( struct gl_context *ctx,
|
|||
if (count == 0)
|
||||
continue;
|
||||
|
||||
if (!intelEmitImmediateColorExpandBlit(intel,
|
||||
if (!intelEmitImmediateColorExpandBlit(brw,
|
||||
irb->mt->cpp,
|
||||
(GLubyte *)stipple,
|
||||
sz,
|
||||
|
|
@ -312,14 +313,14 @@ do_blit_bitmap( struct gl_context *ctx,
|
|||
out:
|
||||
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_SYNC))
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (_mesa_is_bufferobj(unpack->BufferObj)) {
|
||||
/* done with PBO so unmap it now */
|
||||
ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj);
|
||||
}
|
||||
|
||||
intel_check_front_buffer_rendering(intel);
|
||||
intel_check_front_buffer_rendering(brw);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ do_blit_copypixels(struct gl_context * ctx,
|
|||
GLsizei width, GLsizei height,
|
||||
GLint dstx, GLint dsty, GLenum type)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct gl_framebuffer *fb = ctx->DrawBuffer;
|
||||
struct gl_framebuffer *read_fb = ctx->ReadBuffer;
|
||||
|
|
@ -142,7 +143,7 @@ do_blit_copypixels(struct gl_context * ctx,
|
|||
return false;
|
||||
}
|
||||
|
||||
intel_prepare_render(intel);
|
||||
intel_prepare_render(brw);
|
||||
|
||||
intel_flush(&intel->ctx);
|
||||
|
||||
|
|
@ -168,7 +169,7 @@ do_blit_copypixels(struct gl_context * ctx,
|
|||
dstx += srcx - orig_srcx;
|
||||
dsty += srcy - orig_srcy;
|
||||
|
||||
if (!intel_miptree_blit(intel,
|
||||
if (!intel_miptree_blit(brw,
|
||||
read_irb->mt, read_irb->mt_level, read_irb->mt_layer,
|
||||
srcx, srcy, _mesa_is_winsys_fbo(read_fb),
|
||||
draw_irb->mt, draw_irb->mt_level, draw_irb->mt_layer,
|
||||
|
|
@ -184,7 +185,7 @@ do_blit_copypixels(struct gl_context * ctx,
|
|||
ctx->Query.CurrentOcclusionObject->Result += width * height;
|
||||
|
||||
out:
|
||||
intel_check_front_buffer_rendering(intel);
|
||||
intel_check_front_buffer_rendering(brw);
|
||||
|
||||
DBG("%s: success\n", __FUNCTION__);
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@ do_blit_readpixels(struct gl_context * ctx,
|
|||
GLenum format, GLenum type,
|
||||
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
|
||||
GLuint dst_offset;
|
||||
|
|
@ -124,25 +125,25 @@ do_blit_readpixels(struct gl_context * ctx,
|
|||
}
|
||||
|
||||
dirty = intel->front_buffer_dirty;
|
||||
intel_prepare_render(intel);
|
||||
intel_prepare_render(brw);
|
||||
intel->front_buffer_dirty = dirty;
|
||||
|
||||
all = (width * height * irb->mt->cpp == dst->Base.Size &&
|
||||
x == 0 && dst_offset == 0);
|
||||
|
||||
dst_buffer = intel_bufferobj_buffer(intel, dst,
|
||||
dst_buffer = intel_bufferobj_buffer(brw, dst,
|
||||
all ? INTEL_WRITE_FULL :
|
||||
INTEL_WRITE_PART);
|
||||
|
||||
struct intel_mipmap_tree *pbo_mt =
|
||||
intel_miptree_create_for_bo(intel,
|
||||
intel_miptree_create_for_bo(brw,
|
||||
dst_buffer,
|
||||
irb->mt->format,
|
||||
dst_offset,
|
||||
width, height,
|
||||
dst_stride, I915_TILING_NONE);
|
||||
|
||||
if (!intel_miptree_blit(intel,
|
||||
if (!intel_miptree_blit(brw,
|
||||
irb->mt, irb->mt_level, irb->mt_layer,
|
||||
x, y, _mesa_is_winsys_fbo(ctx->ReadBuffer),
|
||||
pbo_mt, 0, 0,
|
||||
|
|
@ -164,6 +165,7 @@ intelReadPixels(struct gl_context * ctx,
|
|||
GLenum format, GLenum type,
|
||||
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
bool dirty;
|
||||
|
||||
|
|
@ -182,7 +184,7 @@ intelReadPixels(struct gl_context * ctx,
|
|||
/* glReadPixels() wont dirty the front buffer, so reset the dirty
|
||||
* flag after calling intel_prepare_render(). */
|
||||
dirty = intel->front_buffer_dirty;
|
||||
intel_prepare_render(intel);
|
||||
intel_prepare_render(brw);
|
||||
intel->front_buffer_dirty = dirty;
|
||||
|
||||
/* Update Mesa state before calling _mesa_readpixels().
|
||||
|
|
|
|||
|
|
@ -157,14 +157,15 @@ intelDRI2Flush(__DRIdrawable *drawable)
|
|||
{
|
||||
GET_CURRENT_CONTEXT(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
if (intel == NULL)
|
||||
return;
|
||||
|
||||
intel_resolve_for_dri2_flush(intel, drawable);
|
||||
intel_resolve_for_dri2_flush(brw, drawable);
|
||||
intel->need_throttle = true;
|
||||
|
||||
if (intel->batch.used)
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
if (INTEL_DEBUG & DEBUG_AUB) {
|
||||
aub_dump_bmp(ctx);
|
||||
|
|
@ -283,14 +284,14 @@ intel_allocate_image(int dri_format, void *loaderPrivate)
|
|||
* Sets up a DRIImage structure to point to our shared image in a region
|
||||
*/
|
||||
static void
|
||||
intel_setup_image_from_mipmap_tree(struct intel_context *intel, __DRIimage *image,
|
||||
intel_setup_image_from_mipmap_tree(struct brw_context *brw, __DRIimage *image,
|
||||
struct intel_mipmap_tree *mt, GLuint level,
|
||||
GLuint zoffset)
|
||||
{
|
||||
unsigned int draw_x, draw_y;
|
||||
uint32_t mask_x, mask_y;
|
||||
|
||||
intel_miptree_make_shareable(intel, mt);
|
||||
intel_miptree_make_shareable(brw, mt);
|
||||
|
||||
intel_miptree_check_level_layer(mt, level, zoffset);
|
||||
|
||||
|
|
@ -376,19 +377,19 @@ intel_create_image_from_renderbuffer(__DRIcontext *context,
|
|||
int renderbuffer, void *loaderPrivate)
|
||||
{
|
||||
__DRIimage *image;
|
||||
struct intel_context *intel = context->driverPrivate;
|
||||
struct brw_context *brw = context->driverPrivate;
|
||||
struct gl_context *ctx = &brw->intel.ctx;
|
||||
struct gl_renderbuffer *rb;
|
||||
struct intel_renderbuffer *irb;
|
||||
|
||||
rb = _mesa_lookup_renderbuffer(&intel->ctx, renderbuffer);
|
||||
rb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
|
||||
if (!rb) {
|
||||
_mesa_error(&intel->ctx,
|
||||
GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
|
||||
_mesa_error(ctx, GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
irb = intel_renderbuffer(rb);
|
||||
intel_miptree_make_shareable(intel, irb->mt);
|
||||
intel_miptree_make_shareable(brw, irb->mt);
|
||||
image = calloc(1, sizeof *image);
|
||||
if (image == NULL)
|
||||
return NULL;
|
||||
|
|
@ -414,7 +415,8 @@ intel_create_image_from_texture(__DRIcontext *context, int target,
|
|||
void *loaderPrivate)
|
||||
{
|
||||
__DRIimage *image;
|
||||
struct intel_context *intel = context->driverPrivate;
|
||||
struct brw_context *brw = context->driverPrivate;
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_texture_object *obj;
|
||||
struct intel_texture_object *iobj;
|
||||
GLuint face = 0;
|
||||
|
|
@ -453,7 +455,7 @@ intel_create_image_from_texture(__DRIcontext *context, int target,
|
|||
image->internal_format = obj->Image[face][level]->InternalFormat;
|
||||
image->format = obj->Image[face][level]->TexFormat;
|
||||
image->data = loaderPrivate;
|
||||
intel_setup_image_from_mipmap_tree(intel, image, iobj->mt, level, zoffset);
|
||||
intel_setup_image_from_mipmap_tree(brw, image, iobj->mt, level, zoffset);
|
||||
image->dri_format = intel_dri_format(image->format);
|
||||
image->has_depthstencil = iobj->mt->stencil_mt? true : false;
|
||||
if (image->dri_format == MESA_FORMAT_NONE) {
|
||||
|
|
|
|||
|
|
@ -68,11 +68,12 @@ static void
|
|||
intel_fence_sync(struct gl_context *ctx, struct gl_sync_object *s,
|
||||
GLenum condition, GLbitfield flags)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_sync_object *sync = (struct intel_sync_object *)s;
|
||||
|
||||
assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
|
||||
intel_batchbuffer_emit_mi_flush(intel);
|
||||
intel_batchbuffer_emit_mi_flush(brw);
|
||||
|
||||
sync->bo = intel->batch.bo;
|
||||
drm_intel_bo_reference(sync->bo);
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ static GLboolean
|
|||
intel_alloc_texture_image_buffer(struct gl_context *ctx,
|
||||
struct gl_texture_image *image)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_texture_image *intel_image = intel_texture_image(image);
|
||||
struct gl_texture_object *texobj = image->TexObject;
|
||||
|
|
@ -90,7 +91,7 @@ intel_alloc_texture_image_buffer(struct gl_context *ctx,
|
|||
__FUNCTION__, texobj, image->Level,
|
||||
image->Width, image->Height, image->Depth, intel_texobj->mt);
|
||||
} else {
|
||||
intel_image->mt = intel_miptree_create_for_teximage(intel, intel_texobj,
|
||||
intel_image->mt = intel_miptree_create_for_teximage(brw, intel_texobj,
|
||||
intel_image,
|
||||
false);
|
||||
|
||||
|
|
@ -140,7 +141,7 @@ intel_map_texture_image(struct gl_context *ctx,
|
|||
GLubyte **map,
|
||||
GLint *stride)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_texture_image *intel_image = intel_texture_image(tex_image);
|
||||
struct intel_mipmap_tree *mt = intel_image->mt;
|
||||
|
||||
|
|
@ -157,7 +158,7 @@ intel_map_texture_image(struct gl_context *ctx,
|
|||
if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
|
||||
slice = tex_image->Face;
|
||||
|
||||
intel_miptree_map(intel, mt, tex_image->Level, slice, x, y, w, h, mode,
|
||||
intel_miptree_map(brw, mt, tex_image->Level, slice, x, y, w, h, mode,
|
||||
(void **)map, stride);
|
||||
}
|
||||
|
||||
|
|
@ -165,14 +166,14 @@ static void
|
|||
intel_unmap_texture_image(struct gl_context *ctx,
|
||||
struct gl_texture_image *tex_image, GLuint slice)
|
||||
{
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_texture_image *intel_image = intel_texture_image(tex_image);
|
||||
struct intel_mipmap_tree *mt = intel_image->mt;
|
||||
|
||||
if (tex_image->TexObject->Target == GL_TEXTURE_CUBE_MAP)
|
||||
slice = tex_image->Face;
|
||||
|
||||
intel_miptree_unmap(intel, mt, tex_image->Level, slice);
|
||||
intel_miptree_unmap(brw, mt, tex_image->Level, slice);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
|||
|
|
@ -48,12 +48,12 @@ void intelSetTexBuffer2(__DRIcontext *pDRICtx,
|
|||
GLint target, GLint format, __DRIdrawable *pDraw);
|
||||
|
||||
struct intel_mipmap_tree *
|
||||
intel_miptree_create_for_teximage(struct intel_context *intel,
|
||||
intel_miptree_create_for_teximage(struct brw_context *brw,
|
||||
struct intel_texture_object *intelObj,
|
||||
struct intel_texture_image *intelImage,
|
||||
bool expect_accelerated_upload);
|
||||
|
||||
GLuint intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit);
|
||||
GLuint intel_finalize_mipmap_tree(struct brw_context *brw, GLuint unit);
|
||||
|
||||
bool
|
||||
intel_texsubimage_tiled_memcpy(struct gl_context *ctx,
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@
|
|||
|
||||
|
||||
static bool
|
||||
intel_copy_texsubimage(struct intel_context *intel,
|
||||
intel_copy_texsubimage(struct brw_context *brw,
|
||||
struct intel_texture_image *intelImage,
|
||||
GLint dstx, GLint dsty, GLint slice,
|
||||
struct intel_renderbuffer *irb,
|
||||
|
|
@ -54,7 +54,7 @@ intel_copy_texsubimage(struct intel_context *intel,
|
|||
{
|
||||
const GLenum internalFormat = intelImage->base.Base.InternalFormat;
|
||||
|
||||
intel_prepare_render(intel);
|
||||
intel_prepare_render(brw);
|
||||
|
||||
/* glCopyTexSubImage() can be called on a multisampled renderbuffer (if
|
||||
* that renderbuffer is associated with the window system framebuffer),
|
||||
|
|
@ -75,7 +75,7 @@ intel_copy_texsubimage(struct intel_context *intel,
|
|||
}
|
||||
|
||||
/* blit from src buffer to texture */
|
||||
if (!intel_miptree_blit(intel,
|
||||
if (!intel_miptree_blit(brw,
|
||||
irb->mt, irb->mt_level, irb->mt_layer,
|
||||
x, y, irb->Base.Base.Name == 0,
|
||||
intelImage->mt, intelImage->base.Base.Level,
|
||||
|
|
@ -97,15 +97,16 @@ intelCopyTexSubImage(struct gl_context *ctx, GLuint dims,
|
|||
GLint x, GLint y,
|
||||
GLsizei width, GLsizei height)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
|
||||
/* Try BLORP first. It can handle almost everything. */
|
||||
if (brw_blorp_copytexsubimage(intel, rb, texImage, slice, x, y,
|
||||
if (brw_blorp_copytexsubimage(brw, rb, texImage, slice, x, y,
|
||||
xoffset, yoffset, width, height))
|
||||
return;
|
||||
|
||||
/* Next, try the BLT engine. */
|
||||
if (intel_copy_texsubimage(intel,
|
||||
if (intel_copy_texsubimage(brw,
|
||||
intel_texture_image(texImage),
|
||||
xoffset, yoffset, slice,
|
||||
intel_renderbuffer(rb), x, y, width, height)) {
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@
|
|||
* miptree of that size.
|
||||
*/
|
||||
struct intel_mipmap_tree *
|
||||
intel_miptree_create_for_teximage(struct intel_context *intel,
|
||||
intel_miptree_create_for_teximage(struct brw_context *brw,
|
||||
struct intel_texture_object *intelObj,
|
||||
struct intel_texture_image *intelImage,
|
||||
bool expect_accelerated_upload)
|
||||
|
|
@ -91,7 +91,7 @@ intel_miptree_create_for_teximage(struct intel_context *intel,
|
|||
}
|
||||
}
|
||||
|
||||
return intel_miptree_create(intel,
|
||||
return intel_miptree_create(brw,
|
||||
intelObj->base.Target,
|
||||
intelImage->base.Base.TexFormat,
|
||||
firstLevel,
|
||||
|
|
@ -114,6 +114,7 @@ try_pbo_upload(struct gl_context *ctx,
|
|||
{
|
||||
struct intel_texture_image *intelImage = intel_texture_image(image);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
|
||||
GLuint src_offset;
|
||||
drm_intel_bo *src_buffer;
|
||||
|
|
@ -150,7 +151,7 @@ try_pbo_upload(struct gl_context *ctx,
|
|||
return false;
|
||||
}
|
||||
|
||||
src_buffer = intel_bufferobj_source(intel, pbo, 64, &src_offset);
|
||||
src_buffer = intel_bufferobj_source(brw, pbo, 64, &src_offset);
|
||||
/* note: potential 64-bit ptr to 32-bit int cast */
|
||||
src_offset += (GLuint) (unsigned long) pixels;
|
||||
|
||||
|
|
@ -158,7 +159,7 @@ try_pbo_upload(struct gl_context *ctx,
|
|||
_mesa_image_row_stride(unpack, image->Width, format, type);
|
||||
|
||||
struct intel_mipmap_tree *pbo_mt =
|
||||
intel_miptree_create_for_bo(intel,
|
||||
intel_miptree_create_for_bo(brw,
|
||||
src_buffer,
|
||||
intelImage->mt->format,
|
||||
src_offset,
|
||||
|
|
@ -167,7 +168,7 @@ try_pbo_upload(struct gl_context *ctx,
|
|||
if (!pbo_mt)
|
||||
return false;
|
||||
|
||||
if (!intel_miptree_blit(intel,
|
||||
if (!intel_miptree_blit(brw,
|
||||
pbo_mt, 0, 0,
|
||||
0, 0, false,
|
||||
intelImage->mt, image->Level, image->Face,
|
||||
|
|
@ -253,7 +254,7 @@ intel_set_texture_image_region(struct gl_context *ctx,
|
|||
|
||||
ctx->Driver.FreeTextureImageBuffer(ctx, image);
|
||||
|
||||
intel_image->mt = intel_miptree_create_layout(intel, target, image->TexFormat,
|
||||
intel_image->mt = intel_miptree_create_layout(brw, target, image->TexFormat,
|
||||
0, 0,
|
||||
width, height, 1,
|
||||
true, 0 /* num_samples */);
|
||||
|
|
@ -294,7 +295,8 @@ intelSetTexBuffer2(__DRIcontext *pDRICtx, GLint target,
|
|||
__DRIdrawable *dPriv)
|
||||
{
|
||||
struct gl_framebuffer *fb = dPriv->driverPrivate;
|
||||
struct intel_context *intel = pDRICtx->driverPrivate;
|
||||
struct brw_context *brw = pDRICtx->driverPrivate;
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
struct intel_texture_object *intelObj;
|
||||
struct intel_renderbuffer *rb;
|
||||
|
|
@ -335,7 +337,7 @@ intelSetTexBuffer2(__DRIcontext *pDRICtx, GLint target,
|
|||
|
||||
_mesa_lock_texture(&intel->ctx, texObj);
|
||||
texImage = _mesa_get_tex_image(ctx, texObj, target, level);
|
||||
intel_miptree_make_shareable(intel, rb->mt);
|
||||
intel_miptree_make_shareable(brw, rb->mt);
|
||||
intel_set_texture_image_region(ctx, texImage, rb->mt->region, target,
|
||||
internalFormat, texFormat, 0,
|
||||
rb->mt->region->width,
|
||||
|
|
|
|||
|
|
@ -51,6 +51,7 @@ intel_blit_texsubimage(struct gl_context * ctx,
|
|||
GLenum format, GLenum type, const void *pixels,
|
||||
const struct gl_pixelstore_attrib *packing)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_texture_image *intelImage = intel_texture_image(texImage);
|
||||
|
||||
|
|
@ -88,14 +89,14 @@ intel_blit_texsubimage(struct gl_context * ctx,
|
|||
return false;
|
||||
|
||||
struct intel_mipmap_tree *temp_mt =
|
||||
intel_miptree_create(intel, GL_TEXTURE_2D, texImage->TexFormat,
|
||||
intel_miptree_create(brw, GL_TEXTURE_2D, texImage->TexFormat,
|
||||
0, 0,
|
||||
width, height, 1,
|
||||
false, 0, INTEL_MIPTREE_TILING_NONE);
|
||||
if (!temp_mt)
|
||||
goto err;
|
||||
|
||||
GLubyte *dst = intel_miptree_map_raw(intel, temp_mt);
|
||||
GLubyte *dst = intel_miptree_map_raw(brw, temp_mt);
|
||||
if (!dst)
|
||||
goto err;
|
||||
|
||||
|
|
@ -108,11 +109,11 @@ intel_blit_texsubimage(struct gl_context * ctx,
|
|||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
|
||||
}
|
||||
|
||||
intel_miptree_unmap_raw(intel, temp_mt);
|
||||
intel_miptree_unmap_raw(brw, temp_mt);
|
||||
|
||||
bool ret;
|
||||
|
||||
ret = intel_miptree_blit(intel,
|
||||
ret = intel_miptree_blit(brw,
|
||||
temp_mt, 0, 0,
|
||||
0, 0, false,
|
||||
intelImage->mt, texImage->Level, texImage->Face,
|
||||
|
|
@ -168,6 +169,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
|
|||
const struct gl_pixelstore_attrib *packing,
|
||||
bool for_glTexImage)
|
||||
{
|
||||
struct brw_context *brw = brw_context(ctx);
|
||||
struct intel_context *intel = intel_context(ctx);
|
||||
struct intel_texture_image *image = intel_texture_image(texImage);
|
||||
|
||||
|
|
@ -209,13 +211,13 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx,
|
|||
/* Since we are going to write raw data to the miptree, we need to resolve
|
||||
* any pending fast color clears before we start.
|
||||
*/
|
||||
intel_miptree_resolve_color(intel, image->mt);
|
||||
intel_miptree_resolve_color(brw, image->mt);
|
||||
|
||||
bo = image->mt->region->bo;
|
||||
|
||||
if (drm_intel_bo_references(intel->batch.bo, bo)) {
|
||||
perf_debug("Flushing before mapping a referenced bo.\n");
|
||||
intel_batchbuffer_flush(intel);
|
||||
intel_batchbuffer_flush(brw);
|
||||
}
|
||||
|
||||
if (unlikely(intel->perf_debug)) {
|
||||
|
|
|
|||
|
|
@ -38,8 +38,9 @@ intel_update_max_level(struct intel_texture_object *intelObj,
|
|||
/*
|
||||
*/
|
||||
GLuint
|
||||
intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
|
||||
intel_finalize_mipmap_tree(struct brw_context *brw, GLuint unit)
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
struct gl_context *ctx = &intel->ctx;
|
||||
struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
|
||||
struct intel_texture_object *intelObj = intel_texture_object(tObj);
|
||||
|
|
@ -95,7 +96,7 @@ intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
|
|||
_mesa_get_format_name(firstImage->base.Base.TexFormat),
|
||||
width, height, depth, tObj->BaseLevel, intelObj->_MaxLevel);
|
||||
|
||||
intelObj->mt = intel_miptree_create(intel,
|
||||
intelObj->mt = intel_miptree_create(brw,
|
||||
intelObj->base.Target,
|
||||
firstImage->base.Base.TexFormat,
|
||||
tObj->BaseLevel,
|
||||
|
|
@ -122,7 +123,7 @@ intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
|
|||
break;
|
||||
|
||||
if (intelObj->mt != intelImage->mt) {
|
||||
intel_miptree_copy_teximage(intel, intelImage, intelObj->mt,
|
||||
intel_miptree_copy_teximage(brw, intelImage, intelObj->mt,
|
||||
false /* invalidate */);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -40,9 +40,9 @@ test_compact_instruction(struct brw_compile *p, struct brw_instruction src)
|
|||
if (brw_try_compact_instruction(p, &dst, &src)) {
|
||||
struct brw_instruction uncompacted;
|
||||
|
||||
brw_uncompact_instruction(intel, &uncompacted, &dst);
|
||||
brw_uncompact_instruction(brw, &uncompacted, &dst);
|
||||
if (memcmp(&uncompacted, &src, sizeof(src))) {
|
||||
brw_debug_compact_uncompact(intel, &src, &uncompacted);
|
||||
brw_debug_compact_uncompact(brw, &src, &uncompacted);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue