Pull the texmem i915 driver onto a new branch closer to the current trunk.

Compiles but otherwise untested.
This commit is contained in:
Keith Whitwell 2006-08-02 10:01:03 +00:00
parent 365582dd6f
commit 527c05eb2a
38 changed files with 3231 additions and 6470 deletions

View file

@ -7,16 +7,6 @@ LIBNAME = i915_dri.so
MINIGLX_SOURCES = server/intel_dri.c
DRIVER_SOURCES = \
i915_context.c \
i915_debug.c \
i915_fragprog.c \
i915_metaops.c \
i915_program.c \
i915_state.c \
i915_tex.c \
i915_texprog.c \
i915_texstate.c \
i915_vtbl.c \
i830_context.c \
i830_metaops.c \
i830_state.c \
@ -24,18 +14,44 @@ DRIVER_SOURCES = \
i830_tex.c \
i830_texstate.c \
i830_vtbl.c \
intel_render.c \
intel_regions.c \
intel_buffer_objects.c \
intel_batchbuffer.c \
intel_mipmap_tree.c \
i915_tex_layout.c \
intel_tex_image.c \
intel_tex_subimage.c \
intel_tex_copy.c \
intel_tex_validate.c \
intel_tex_format.c \
intel_tex.c \
intel_pixel.c \
intel_pixel_copy.c \
intel_pixel_read.c \
intel_pixel_draw.c \
intel_buffers.c \
intel_blit.c \
i915_tex.c \
i915_texstate.c \
i915_context.c \
i915_debug.c \
i915_fragprog.c \
i915_metaops.c \
i915_program.c \
i915_state.c \
i915_texprog.c \
i915_vtbl.c \
intel_context.c \
intel_ioctl.c \
intel_pixel.c \
intel_render.c \
intel_rotate.c \
intel_screen.c \
intel_span.c \
intel_state.c \
intel_tex.c \
intel_texmem.c \
intel_tris.c
intel_tris.c \
intel_fbo.c \
intel_depthstencil.c \
intel_bufmgr.c
C_SOURCES = \
$(COMMON_SOURCES) \

View file

@ -58,10 +58,9 @@ GLboolean i830CreateContext( const __GLcontextModes *mesaVis,
void *sharedContextPrivate)
{
struct dd_function_table functions;
i830ContextPtr i830 = (i830ContextPtr) CALLOC_STRUCT(i830_context);
intelContextPtr intel = &i830->intel;
struct i830_context *i830 = CALLOC_STRUCT(i830_context);
struct intel_context *intel = &i830->intel;
GLcontext *ctx = &intel->ctx;
GLuint i;
if (!i830) return GL_FALSE;
i830InitVtbl( i830 );
@ -77,34 +76,14 @@ GLboolean i830CreateContext( const __GLcontextModes *mesaVis,
intel->ctx.Const.MaxTextureImageUnits = I830_TEX_UNITS;
intel->ctx.Const.MaxTextureCoordUnits = I830_TEX_UNITS;
intel->nr_heaps = 1;
intel->texture_heaps[0] =
driCreateTextureHeap( 0, intel,
intel->intelScreen->tex.size,
12,
I830_NR_TEX_REGIONS,
intel->sarea->texList,
(unsigned *) & intel->sarea->texAge,
& intel->swapped,
sizeof( struct i830_texture_object ),
(destroy_texture_object_t *)intelDestroyTexObj );
/* FIXME: driCalculateMaxTextureLevels assumes that mipmaps are tightly
* FIXME: packed, but they're not in Intel graphics hardware.
/* Advertise the full hardware capabilities. The new memory
* manager should cope much better with overload situations:
*/
intel->ctx.Const.MaxTextureUnits = I830_TEX_UNITS;
i = driQueryOptioni( &intel->intelScreen->optionCache, "allow_large_textures");
driCalculateMaxTextureLevels( intel->texture_heaps,
intel->nr_heaps,
&intel->ctx.Const,
4,
11, /* max 2D texture size is 2048x2048 */
8, /* max 3D texture size is 256^3 */
10, /* max CUBE texture size is 1024x1024 */
11, /* max RECT. supported */
12,
GL_FALSE,
i );
ctx->Const.MaxTextureLevels = 12;
ctx->Const.Max3DTextureLevels = 9;
ctx->Const.MaxCubeTextureLevels = 11;
ctx->Const.MaxTextureRectSize = (1<<11);
ctx->Const.MaxTextureUnits = I830_TEX_UNITS;
_tnl_init_vertices( ctx, ctx->Const.MaxArrayLockSize + 12,
18 * sizeof(GLfloat) );
@ -114,7 +93,7 @@ GLboolean i830CreateContext( const __GLcontextModes *mesaVis,
driInitExtensions( ctx, i830_extensions, GL_FALSE );
i830InitState( i830 );
i830InitMetaFuncs( i830 );
_tnl_allow_vertex_fog( ctx, 1 );
_tnl_allow_pixel_fog( ctx, 0 );

View file

@ -39,6 +39,7 @@
#define I830_UPLOAD_CTX 0x1
#define I830_UPLOAD_BUFFERS 0x2
#define I830_UPLOAD_STIPPLE 0x4
#define I830_UPLOAD_INVARIENT 0x8
#define I830_UPLOAD_TEX(i) (0x10<<(i))
#define I830_UPLOAD_TEXBLEND(i) (0x100<<(i))
#define I830_UPLOAD_TEX_ALL (0x0f0)
@ -48,17 +49,15 @@
*/
#define I830_DESTREG_CBUFADDR0 0
#define I830_DESTREG_CBUFADDR1 1
#define I830_DESTREG_CBUFADDR2 2
#define I830_DESTREG_DBUFADDR0 3
#define I830_DESTREG_DBUFADDR1 4
#define I830_DESTREG_DBUFADDR2 5
#define I830_DESTREG_DV0 6
#define I830_DESTREG_DV1 7
#define I830_DESTREG_SENABLE 8
#define I830_DESTREG_SR0 9
#define I830_DESTREG_SR1 10
#define I830_DESTREG_SR2 11
#define I830_DEST_SETUP_SIZE 12
#define I830_DESTREG_DBUFADDR0 2
#define I830_DESTREG_DBUFADDR1 3
#define I830_DESTREG_DV0 4
#define I830_DESTREG_DV1 5
#define I830_DESTREG_SENABLE 6
#define I830_DESTREG_SR0 7
#define I830_DESTREG_SR1 8
#define I830_DESTREG_SR2 9
#define I830_DEST_SETUP_SIZE 10
#define I830_CTXREG_STATE1 0
#define I830_CTXREG_STATE2 1
@ -84,14 +83,13 @@
#define I830_STP_SETUP_SIZE 2
#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
#define I830_TEXREG_TM0S0 1
#define I830_TEXREG_TM0S1 2
#define I830_TEXREG_TM0S2 3
#define I830_TEXREG_TM0S3 4
#define I830_TEXREG_TM0S4 5
#define I830_TEXREG_MCS 6 /* _3DSTATE_MAP_COORD_SETS */
#define I830_TEXREG_CUBE 7 /* _3DSTATE_MAP_SUBE */
#define I830_TEX_SETUP_SIZE 8
#define I830_TEXREG_TM0S1 1
#define I830_TEXREG_TM0S2 2
#define I830_TEXREG_TM0S3 3
#define I830_TEXREG_TM0S4 4
#define I830_TEXREG_MCS 5 /* _3DSTATE_MAP_COORD_SETS */
#define I830_TEXREG_CUBE 6 /* _3DSTATE_MAP_SUBE */
#define I830_TEX_SETUP_SIZE 7
#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
@ -110,6 +108,17 @@ struct i830_hw_state {
GLuint Tex[I830_TEX_UNITS][I830_TEX_SETUP_SIZE];
GLuint TexBlend[I830_TEX_UNITS][I830_TEXBLEND_SIZE];
GLuint TexBlendWordsUsed[I830_TEX_UNITS];
struct intel_region *draw_region;
struct intel_region *depth_region;
/* Regions aren't actually that appropriate here as the memory may
* be from a PBO or FBO. Just use the buffer id. Will have to do
* this for draw and depth for FBO's...
*/
GLuint tex_buffer[I830_TEX_UNITS];
GLuint tex_offset[I830_TEX_UNITS];
GLuint emitted; /* I810_UPLOAD_* */
GLuint active;
};
@ -118,15 +127,12 @@ struct i830_context
{
struct intel_context intel;
GLuint lodbias_tm0s3[MAX_TEXTURE_UNITS];
DECLARE_RENDERINPUTS(last_index_bitset);
struct i830_hw_state meta, initial, state, *current;
};
typedef struct i830_context *i830ContextPtr;
typedef struct i830_texture_object *i830TextureObjectPtr;
#define I830_CONTEXT(ctx) ((i830ContextPtr)(ctx))
@ -148,7 +154,7 @@ do { \
/* i830_vtbl.c
*/
extern void
i830InitVtbl( i830ContextPtr i830 );
i830InitVtbl( struct i830_context *i830 );
/* i830_context.c
*/
@ -160,22 +166,19 @@ i830CreateContext( const __GLcontextModes *mesaVis,
/* i830_tex.c, i830_texstate.c
*/
extern void
i830UpdateTextureState( intelContextPtr intel );
i830UpdateTextureState( struct intel_context *intel );
extern void
i830InitTextureFuncs( struct dd_function_table *functions );
extern intelTextureObjectPtr
i830AllocTexObj( struct gl_texture_object *tObj );
/* i830_texblend.c
*/
extern GLuint i830SetTexEnvCombine(i830ContextPtr i830,
extern GLuint i830SetTexEnvCombine(struct i830_context *i830,
const struct gl_tex_env_combine_state * combine, GLint blendUnit,
GLuint texel_op, GLuint *state, const GLfloat *factor );
extern void
i830EmitTextureBlend( i830ContextPtr i830 );
i830EmitTextureBlend( struct i830_context *i830 );
/* i830_state.c
@ -184,34 +187,29 @@ extern void
i830InitStateFuncs( struct dd_function_table *functions );
extern void
i830EmitState( i830ContextPtr i830 );
i830EmitState( struct i830_context *i830 );
extern void
i830InitState( i830ContextPtr i830 );
i830InitState( struct i830_context *i830 );
/* i830_metaops.c
*/
extern GLboolean
i830TryTextureReadPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack,
GLvoid *pixels );
extern GLboolean
i830TryTextureDrawPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid *pixels );
extern void
i830ClearWithTris( intelContextPtr intel, GLbitfield mask,
GLboolean all, GLint cx, GLint cy, GLint cw, GLint ch);
i830InitMetaFuncs( struct i830_context *i830 );
extern void
i830RotateWindow(intelContextPtr intel, __DRIdrawablePrivate *dPriv,
i830RotateWindow(struct intel_context *intel, __DRIdrawablePrivate *dPriv,
GLuint srcBuf);
/*======================================================================
* Inline conversion functions. These are better-typed than the
* macros used previously:
*/
static INLINE struct i830_context *
i830_context( GLcontext *ctx )
{
return (struct i830_context *)ctx;
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -407,7 +407,7 @@
#define LOGICOP_SET 0xf
#define MODE4_ENABLE_STENCIL_TEST_MASK ((1<<17)|(0xff00))
#define ENABLE_STENCIL_TEST_MASK (1<<17)
#define STENCIL_TEST_MASK(x) ((x)<<8)
#define STENCIL_TEST_MASK(x) (((x)&0xff)<<8)
#define MODE4_ENABLE_STENCIL_WRITE_MASK ((1<<16)|(0x00ff))
#define ENABLE_STENCIL_WRITE_MASK (1<<16)
#define STENCIL_WRITE_MASK(x) ((x)&0xff)

View file

@ -36,6 +36,7 @@
#include "intel_screen.h"
#include "intel_batchbuffer.h"
#include "intel_fbo.h"
#include "i830_context.h"
#include "i830_reg.h"
@ -44,7 +45,7 @@ static void
i830StencilFuncSeparate(GLcontext *ctx, GLenum face, GLenum func, GLint ref,
GLuint mask)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
int test = intel_translate_compare_func(func);
mask = mask & 0xff;
@ -69,7 +70,7 @@ i830StencilFuncSeparate(GLcontext *ctx, GLenum face, GLenum func, GLint ref,
static void
i830StencilMaskSeparate(GLcontext *ctx, GLenum face, GLuint mask)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
if (INTEL_DEBUG&DEBUG_DRI)
fprintf(stderr, "%s : mask 0x%x\n", __FUNCTION__, mask);
@ -86,7 +87,7 @@ static void
i830StencilOpSeparate(GLcontext *ctx, GLenum face, GLenum fail, GLenum zfail,
GLenum zpass)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
int fop, dfop, dpop;
if (INTEL_DEBUG&DEBUG_DRI)
@ -193,7 +194,7 @@ i830StencilOpSeparate(GLcontext *ctx, GLenum face, GLenum fail, GLenum zfail,
static void i830AlphaFunc(GLcontext *ctx, GLenum func, GLfloat ref)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
int test = intel_translate_compare_func(func);
GLubyte refByte;
GLuint refInt;
@ -221,7 +222,7 @@ static void i830AlphaFunc(GLcontext *ctx, GLenum func, GLfloat ref)
*/
static void i830EvalLogicOpBlendState(GLcontext *ctx)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
I830_STATECHANGE(i830, I830_UPLOAD_CTX);
@ -245,7 +246,7 @@ static void i830EvalLogicOpBlendState(GLcontext *ctx)
static void i830BlendColor(GLcontext *ctx, const GLfloat color[4])
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
GLubyte r, g, b, a;
if (INTEL_DEBUG&DEBUG_DRI)
@ -268,7 +269,7 @@ static void i830BlendColor(GLcontext *ctx, const GLfloat color[4])
*/
static void i830_set_blend_state( GLcontext * ctx )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
int funcA;
int funcRGB;
int eqnA;
@ -406,7 +407,7 @@ static void i830BlendFuncSeparate(GLcontext *ctx, GLenum sfactorRGB,
static void i830DepthFunc(GLcontext *ctx, GLenum func)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
int test = intel_translate_compare_func(func);
if (INTEL_DEBUG&DEBUG_DRI)
@ -420,7 +421,7 @@ static void i830DepthFunc(GLcontext *ctx, GLenum func)
static void i830DepthMask(GLcontext *ctx, GLboolean flag)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
if (INTEL_DEBUG&DEBUG_DRI)
fprintf(stderr, "%s flag (%d)\n", __FUNCTION__, flag);
@ -443,7 +444,7 @@ static void i830DepthMask(GLcontext *ctx, GLboolean flag)
*/
static void i830PolygonStipple( GLcontext *ctx, const GLubyte *mask )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
const GLubyte *m = mask;
GLubyte p[4];
int i,j,k;
@ -496,15 +497,14 @@ static void i830PolygonStipple( GLcontext *ctx, const GLubyte *mask )
static void i830Scissor(GLcontext *ctx, GLint x, GLint y,
GLsizei w, GLsizei h)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
intelScreenPrivate *screen = i830->intel.intelScreen;
struct i830_context *i830 = i830_context(ctx);
int x1, y1, x2, y2;
if (!i830->intel.driDrawable)
if (!ctx->DrawBuffer)
return;
x1 = x;
y1 = i830->intel.driDrawable->h - (y + h);
y1 = ctx->DrawBuffer->Height - (y + h);
x2 = x + w - 1;
y2 = y1 + h - 1;
@ -512,16 +512,10 @@ static void i830Scissor(GLcontext *ctx, GLint x, GLint y,
fprintf(stderr, "[%s] x(%d) y(%d) w(%d) h(%d)\n", __FUNCTION__,
x, y, w, h);
if (x1 < 0) x1 = 0;
if (y1 < 0) y1 = 0;
if (x2 < 0) x2 = 0;
if (y2 < 0) y2 = 0;
if (x2 >= screen->width) x2 = screen->width-1;
if (y2 >= screen->height) y2 = screen->height-1;
if (x1 >= screen->width) x1 = screen->width-1;
if (y1 >= screen->height) y1 = screen->height-1;
x1 = CLAMP(x1, 0, ctx->DrawBuffer->Width - 1);
y1 = CLAMP(y1, 0, ctx->DrawBuffer->Height - 1);
x2 = CLAMP(x2, 0, ctx->DrawBuffer->Width - 1);
y2 = CLAMP(y2, 0, ctx->DrawBuffer->Height - 1);
I830_STATECHANGE(i830, I830_UPLOAD_BUFFERS);
i830->state.Buffer[I830_DESTREG_SR1] = (y1 << 16) | (x1 & 0xffff);
@ -530,7 +524,7 @@ static void i830Scissor(GLcontext *ctx, GLint x, GLint y,
static void i830LogicOp(GLcontext *ctx, GLenum opcode)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
int tmp = intel_translate_logic_op( opcode );
if (INTEL_DEBUG&DEBUG_DRI)
@ -545,7 +539,7 @@ static void i830LogicOp(GLcontext *ctx, GLenum opcode)
static void i830CullFaceFrontFace(GLcontext *ctx, GLenum unused)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
GLuint mode;
if (INTEL_DEBUG&DEBUG_DRI)
@ -573,7 +567,7 @@ static void i830CullFaceFrontFace(GLcontext *ctx, GLenum unused)
static void i830LineWidth( GLcontext *ctx, GLfloat widthf )
{
i830ContextPtr i830 = I830_CONTEXT( ctx );
struct i830_context *i830 = i830_context( ctx );
int width;
int state5;
@ -594,7 +588,7 @@ static void i830LineWidth( GLcontext *ctx, GLfloat widthf )
static void i830PointSize(GLcontext *ctx, GLfloat size)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
GLint point_size = (int)size;
if (INTEL_DEBUG&DEBUG_DRI)
@ -616,7 +610,7 @@ static void i830ColorMask(GLcontext *ctx,
GLboolean r, GLboolean g,
GLboolean b, GLboolean a)
{
i830ContextPtr i830 = I830_CONTEXT( ctx );
struct i830_context *i830 = i830_context( ctx );
GLuint tmp = 0;
if (INTEL_DEBUG&DEBUG_DRI)
@ -638,7 +632,7 @@ static void i830ColorMask(GLcontext *ctx,
static void update_specular( GLcontext *ctx )
{
i830ContextPtr i830 = I830_CONTEXT( ctx );
struct i830_context *i830 = i830_context( ctx );
I830_STATECHANGE(i830, I830_UPLOAD_CTX);
i830->state.Ctx[I830_CTXREG_ENABLES_1] &= ~ENABLE_SPEC_ADD_MASK;
@ -664,7 +658,7 @@ static void i830LightModelfv(GLcontext *ctx, GLenum pname,
*/
static void i830ShadeModel(GLcontext *ctx, GLenum mode)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
I830_STATECHANGE(i830, I830_UPLOAD_CTX);
@ -690,7 +684,7 @@ static void i830ShadeModel(GLcontext *ctx, GLenum mode)
*/
static void i830Fogfv(GLcontext *ctx, GLenum pname, const GLfloat *param)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
if (INTEL_DEBUG&DEBUG_DRI)
fprintf(stderr, "%s\n", __FUNCTION__);
@ -710,7 +704,7 @@ static void i830Fogfv(GLcontext *ctx, GLenum pname, const GLfloat *param)
static void i830Enable(GLcontext *ctx, GLenum cap, GLboolean state)
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct i830_context *i830 = i830_context(ctx);
switch(cap) {
case GL_LIGHTING:
@ -806,20 +800,28 @@ static void i830Enable(GLcontext *ctx, GLenum cap, GLboolean state)
break;
case GL_STENCIL_TEST:
if (i830->intel.hw_stencil) {
I830_STATECHANGE(i830, I830_UPLOAD_CTX);
{
GLboolean hw_stencil = GL_FALSE;
if (ctx->DrawBuffer) {
struct intel_renderbuffer *irbStencil
= intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_STENCIL);
hw_stencil = (irbStencil && irbStencil->region);
}
if (hw_stencil) {
I830_STATECHANGE(i830, I830_UPLOAD_CTX);
if (state) {
i830->state.Ctx[I830_CTXREG_ENABLES_1] |= ENABLE_STENCIL_TEST;
i830->state.Ctx[I830_CTXREG_ENABLES_2] |= ENABLE_STENCIL_WRITE;
} else {
i830->state.Ctx[I830_CTXREG_ENABLES_1] &= ~ENABLE_STENCIL_TEST;
i830->state.Ctx[I830_CTXREG_ENABLES_2] &= ~ENABLE_STENCIL_WRITE;
i830->state.Ctx[I830_CTXREG_ENABLES_1] |= DISABLE_STENCIL_TEST;
i830->state.Ctx[I830_CTXREG_ENABLES_2] |= DISABLE_STENCIL_WRITE;
}
} else {
FALLBACK( &i830->intel, I830_FALLBACK_STENCIL, state );
if (state) {
i830->state.Ctx[I830_CTXREG_ENABLES_1] |= ENABLE_STENCIL_TEST;
i830->state.Ctx[I830_CTXREG_ENABLES_2] |= ENABLE_STENCIL_WRITE;
} else {
i830->state.Ctx[I830_CTXREG_ENABLES_1] &= ~ENABLE_STENCIL_TEST;
i830->state.Ctx[I830_CTXREG_ENABLES_2] &= ~ENABLE_STENCIL_WRITE;
i830->state.Ctx[I830_CTXREG_ENABLES_1] |= DISABLE_STENCIL_TEST;
i830->state.Ctx[I830_CTXREG_ENABLES_2] |= DISABLE_STENCIL_WRITE;
}
} else {
FALLBACK( &i830->intel, I830_FALLBACK_STENCIL, state );
}
}
break;
@ -844,7 +846,7 @@ static void i830Enable(GLcontext *ctx, GLenum cap, GLboolean state)
}
static void i830_init_packets( i830ContextPtr i830 )
static void i830_init_packets( struct i830_context *i830 )
{
intelScreenPrivate *screen = i830->intel.intelScreen;
@ -902,6 +904,7 @@ static void i830_init_packets( i830ContextPtr i830 )
DISABLE_COLOR_BLEND |
DISABLE_DEPTH_TEST);
#if 000 /* XXX all the stencil enable state is set in i830Enable(), right? */
if (i830->intel.hw_stencil) {
i830->state.Ctx[I830_CTXREG_ENABLES_2] = (_3DSTATE_ENABLES_2_CMD |
ENABLE_STENCIL_WRITE |
@ -911,7 +914,9 @@ static void i830_init_packets( i830ContextPtr i830 )
/* set no color comps disabled */
ENABLE_COLOR_WRITE |
ENABLE_DEPTH_WRITE);
} else {
} else
#endif
{
i830->state.Ctx[I830_CTXREG_ENABLES_2] = (_3DSTATE_ENABLES_2_CMD |
DISABLE_STENCIL_WRITE |
ENABLE_TEX_CACHE |
@ -1012,13 +1017,10 @@ static void i830_init_packets( i830ContextPtr i830 )
(BUF_3D_ID_DEPTH |
BUF_3D_PITCH(screen->depth.pitch) | /* pitch in bytes */
BUF_3D_USE_FENCE);
i830->state.Buffer[I830_DESTREG_DBUFADDR2] = screen->depth.offset;
i830->state.Buffer[I830_DESTREG_DV0] = _3DSTATE_DST_BUF_VARS_CMD;
switch (screen->fbFormat) {
case DV_PF_555:
case DV_PF_565:
i830->state.Buffer[I830_DESTREG_DV1] = (DSTORG_HORT_BIAS(0x8) | /* .5 */
DSTORG_VERT_BIAS(0x8) | /* .5 */
@ -1068,7 +1070,7 @@ void i830InitStateFuncs( struct dd_function_table *functions )
functions->StencilOpSeparate = i830StencilOpSeparate;
}
void i830InitState( i830ContextPtr i830 )
void i830InitState( struct i830_context *i830 )
{
GLcontext *ctx = &i830->intel.ctx;
@ -1080,7 +1082,8 @@ void i830InitState( i830ContextPtr i830 )
i830->current = &i830->state;
i830->state.emitted = 0;
i830->state.active = (I830_UPLOAD_TEXBLEND(0) |
i830->state.active = (I830_UPLOAD_INVARIENT |
I830_UPLOAD_TEXBLEND(0) |
I830_UPLOAD_STIPPLE |
I830_UPLOAD_CTX |
I830_UPLOAD_BUFFERS);

View file

@ -45,261 +45,12 @@
/**
* Set the texture wrap modes.
*
* The i830M (and related graphics cores) do not support GL_CLAMP. The Intel
* drivers for "other operating systems" implement GL_CLAMP as
* GL_CLAMP_TO_EDGE, so the same is done here.
*
* \param t Texture object whose wrap modes are to be set
* \param swrap Wrap mode for the \a s texture coordinate
* \param twrap Wrap mode for the \a t texture coordinate
*/
static void i830SetTexWrapping(i830TextureObjectPtr tex,
GLenum swrap,
GLenum twrap)
{
tex->Setup[I830_TEXREG_MCS] &= ~(TEXCOORD_ADDR_U_MASK|TEXCOORD_ADDR_V_MASK);
switch( swrap ) {
case GL_REPEAT:
tex->Setup[I830_TEXREG_MCS] |= TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_WRAP);
break;
case GL_CLAMP:
case GL_CLAMP_TO_EDGE:
tex->Setup[I830_TEXREG_MCS] |= TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_CLAMP);
break;
case GL_CLAMP_TO_BORDER:
tex->Setup[I830_TEXREG_MCS] |=
TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_CLAMP_BORDER);
break;
case GL_MIRRORED_REPEAT:
tex->Setup[I830_TEXREG_MCS] |=
TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_MIRROR);
break;
default:
break;
}
switch( twrap ) {
case GL_REPEAT:
tex->Setup[I830_TEXREG_MCS] |= TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_WRAP);
break;
case GL_CLAMP:
case GL_CLAMP_TO_EDGE:
tex->Setup[I830_TEXREG_MCS] |= TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_CLAMP);
break;
case GL_CLAMP_TO_BORDER:
tex->Setup[I830_TEXREG_MCS] |=
TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_CLAMP_BORDER);
break;
case GL_MIRRORED_REPEAT:
tex->Setup[I830_TEXREG_MCS] |=
TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_MIRROR);
break;
default:
break;
}
}
/**
* Set the texture magnification and minification modes.
*
* \param t Texture whose filter modes are to be set
* \param minf Texture minification mode
* \param magf Texture magnification mode
* \param bias LOD bias for this texture unit.
*/
static void i830SetTexFilter( i830TextureObjectPtr t, GLenum minf, GLenum magf,
GLfloat maxanisotropy )
{
int minFilt = 0, mipFilt = 0, magFilt = 0;
if(INTEL_DEBUG&DEBUG_DRI)
fprintf(stderr, "%s\n", __FUNCTION__);
if ( maxanisotropy > 1.0 ) {
minFilt = FILTER_ANISOTROPIC;
magFilt = FILTER_ANISOTROPIC;
}
else {
switch (minf) {
case GL_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NONE;
break;
case GL_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NONE;
break;
case GL_NEAREST_MIPMAP_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_LINEAR_MIPMAP_NEAREST:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_NEAREST_MIPMAP_LINEAR:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_LINEAR;
break;
case GL_LINEAR_MIPMAP_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_LINEAR;
break;
default:
break;
}
switch (magf) {
case GL_NEAREST:
magFilt = FILTER_NEAREST;
break;
case GL_LINEAR:
magFilt = FILTER_LINEAR;
break;
default:
break;
}
}
t->Setup[I830_TEXREG_TM0S3] &= ~TM0S3_MIN_FILTER_MASK;
t->Setup[I830_TEXREG_TM0S3] &= ~TM0S3_MIP_FILTER_MASK;
t->Setup[I830_TEXREG_TM0S3] &= ~TM0S3_MAG_FILTER_MASK;
t->Setup[I830_TEXREG_TM0S3] |= ((minFilt << TM0S3_MIN_FILTER_SHIFT) |
(mipFilt << TM0S3_MIP_FILTER_SHIFT) |
(magFilt << TM0S3_MAG_FILTER_SHIFT));
}
static void i830SetTexBorderColor(i830TextureObjectPtr t, GLubyte color[4])
{
if(INTEL_DEBUG&DEBUG_DRI)
fprintf(stderr, "%s\n", __FUNCTION__);
t->Setup[I830_TEXREG_TM0S4] =
INTEL_PACKCOLOR8888(color[0],color[1],color[2],color[3]);
}
/**
* Allocate space for and load the mesa images into the texture memory block.
* This will happen before drawing with a new texture, or drawing with a
* texture after it was swapped out or teximaged again.
*/
intelTextureObjectPtr i830AllocTexObj( struct gl_texture_object *texObj )
{
i830TextureObjectPtr t = CALLOC_STRUCT( i830_texture_object );
if ( !t )
return NULL;
texObj->DriverData = t;
t->intel.base.tObj = texObj;
t->intel.dirty = I830_UPLOAD_TEX_ALL;
make_empty_list( &t->intel.base );
t->Setup[I830_TEXREG_TM0LI] = 0; /* not used */
t->Setup[I830_TEXREG_TM0S0] = 0;
t->Setup[I830_TEXREG_TM0S1] = 0;
t->Setup[I830_TEXREG_TM0S2] = 0;
t->Setup[I830_TEXREG_TM0S3] = 0;
t->Setup[I830_TEXREG_MCS] = (_3DSTATE_MAP_COORD_SET_CMD |
MAP_UNIT(0) |
ENABLE_TEXCOORD_PARAMS |
TEXCOORDS_ARE_NORMAL |
TEXCOORDTYPE_CARTESIAN |
ENABLE_ADDR_V_CNTL |
TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_WRAP) |
ENABLE_ADDR_U_CNTL |
TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_WRAP));
i830SetTexWrapping( t, texObj->WrapS, texObj->WrapT );
i830SetTexFilter( t, texObj->MinFilter, texObj->MagFilter,
texObj->MaxAnisotropy );
i830SetTexBorderColor( t, texObj->_BorderChan );
return &t->intel;
}
static void i830TexParameter( GLcontext *ctx, GLenum target,
struct gl_texture_object *tObj,
GLenum pname, const GLfloat *params )
{
i830TextureObjectPtr t = (i830TextureObjectPtr) tObj->DriverData;
if (!t)
return;
switch (pname) {
case GL_TEXTURE_MIN_FILTER:
case GL_TEXTURE_MAG_FILTER:
case GL_TEXTURE_MAX_ANISOTROPY_EXT:
i830SetTexFilter( t, tObj->MinFilter, tObj->MagFilter,
tObj->MaxAnisotropy);
break;
case GL_TEXTURE_WRAP_S:
case GL_TEXTURE_WRAP_T:
i830SetTexWrapping( t, tObj->WrapS, tObj->WrapT );
break;
case GL_TEXTURE_BORDER_COLOR:
i830SetTexBorderColor( t, tObj->_BorderChan );
break;
case GL_TEXTURE_BASE_LEVEL:
case GL_TEXTURE_MAX_LEVEL:
case GL_TEXTURE_MIN_LOD:
case GL_TEXTURE_MAX_LOD:
/* The i830 and its successors can do a lot of this without
* reloading the textures. A project for someone?
*/
intelFlush( ctx );
driSwapOutTextureObject( (driTextureObject *) t );
break;
default:
return;
}
t->intel.dirty = I830_UPLOAD_TEX_ALL;
}
static void i830TexEnv( GLcontext *ctx, GLenum target,
GLenum pname, const GLfloat *param )
{
i830ContextPtr i830 = I830_CONTEXT( ctx );
GLuint unit = ctx->Texture.CurrentUnit;
switch (pname) {
case GL_TEXTURE_ENV_COLOR:
#if 0
{
GLubyte r, g, b, a;
GLuint col;
UNCLAMPED_FLOAT_TO_UBYTE(r, param[RCOMP]);
UNCLAMPED_FLOAT_TO_UBYTE(g, param[GCOMP]);
UNCLAMPED_FLOAT_TO_UBYTE(b, param[BCOMP]);
UNCLAMPED_FLOAT_TO_UBYTE(a, param[ACOMP]);
col = ((a << 24) | (r << 16) | (g << 8) | b);
if (col != i830->state.TexEnv[unit][I830_TEXENVREG_COL1]) {
I830_STATECHANGE(i830, I830_UPLOAD_TEXENV);
i830->state.TexEnv[unit][I830_TEXENVREG_COL1] = col;
}
break;
}
#endif
case GL_TEXTURE_ENV_MODE:
case GL_COMBINE_RGB:
case GL_COMBINE_ALPHA:
@ -320,13 +71,13 @@ static void i830TexEnv( GLcontext *ctx, GLenum target,
break;
case GL_TEXTURE_LOD_BIAS: {
struct i830_context *i830 = i830_context( ctx );
GLuint unit = ctx->Texture.CurrentUnit;
int b = (int) ((*param) * 16.0);
if (b > 63) b = 63;
if (b < -64) b = -64;
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
i830->state.Tex[unit][I830_TEXREG_TM0S3] &= ~TM0S3_LOD_BIAS_MASK;
i830->state.Tex[unit][I830_TEXREG_TM0S3] |=
((b << TM0S3_LOD_BIAS_SHIFT) & TM0S3_LOD_BIAS_MASK);
i830->lodbias_tm0s3[unit] = ((b << TM0S3_LOD_BIAS_SHIFT) & TM0S3_LOD_BIAS_MASK);
break;
}
@ -335,22 +86,10 @@ static void i830TexEnv( GLcontext *ctx, GLenum target,
}
}
static void i830BindTexture( GLcontext *ctx, GLenum target,
struct gl_texture_object *texObj )
{
i830TextureObjectPtr tex;
if (!texObj->DriverData)
i830AllocTexObj( texObj );
tex = (i830TextureObjectPtr)texObj->DriverData;
}
void i830InitTextureFuncs( struct dd_function_table *functions )
{
functions->BindTexture = i830BindTexture;
functions->TexEnv = i830TexEnv;
functions->TexParameter = i830TexParameter;
}

View file

@ -101,7 +101,7 @@ static GLuint emit_factor( GLuint blendUnit, GLuint *state, GLuint count,
}
static __inline__ GLuint GetTexelOp(GLint unit)
static INLINE GLuint GetTexelOp(GLint unit)
{
switch(unit) {
case 0: return TEXBLENDARG_TEXEL0;
@ -132,7 +132,7 @@ static __inline__ GLuint GetTexelOp(GLint unit)
* partial support for the extension?
*/
GLuint
i830SetTexEnvCombine(i830ContextPtr i830,
i830SetTexEnvCombine(struct i830_context *i830,
const struct gl_tex_env_combine_state * combine,
GLint blendUnit,
GLuint texel_op,
@ -394,7 +394,7 @@ i830SetTexEnvCombine(i830ContextPtr i830,
}
static void emit_texblend( i830ContextPtr i830, GLuint unit, GLuint blendUnit,
static void emit_texblend( struct i830_context *i830, GLuint unit, GLuint blendUnit,
GLboolean last_stage )
{
struct gl_texture_unit *texUnit = &i830->intel.ctx.Texture.Unit[unit];
@ -423,7 +423,7 @@ static void emit_texblend( i830ContextPtr i830, GLuint unit, GLuint blendUnit,
I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(blendUnit), GL_TRUE);
}
static void emit_passthrough( i830ContextPtr i830 )
static void emit_passthrough( struct i830_context *i830 )
{
GLuint tmp[I830_TEXBLEND_SIZE], tmp_sz;
GLuint unit = 0;
@ -442,7 +442,7 @@ static void emit_passthrough( i830ContextPtr i830 )
I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(unit), GL_TRUE);
}
void i830EmitTextureBlend( i830ContextPtr i830 )
void i830EmitTextureBlend( struct i830_context *i830 )
{
GLcontext *ctx = &i830->intel.ctx;
GLuint unit, last_stage = 0, blendunit = 0;

View file

@ -38,441 +38,274 @@
#include "intel_screen.h"
#include "intel_ioctl.h"
#include "intel_tex.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "i830_context.h"
#include "i830_reg.h"
static const GLint initial_offsets[6][2] = { {0,0},
{0,2},
{1,0},
{1,2},
{1,1},
{1,3} };
static const GLint step_offsets[6][2] = { {0,2},
{0,2},
{-1,2},
{-1,2},
{-1,1},
{-1,1} };
#define I830_TEX_UNIT_ENABLED(unit) (1<<unit)
static GLboolean i830SetTexImages( i830ContextPtr i830,
struct gl_texture_object *tObj )
static GLuint translate_texture_format( GLuint mesa_format )
{
GLuint total_height, pitch, i, textureFormat;
i830TextureObjectPtr t = (i830TextureObjectPtr) tObj->DriverData;
const struct gl_texture_image *baseImage = tObj->Image[0][tObj->BaseLevel];
GLint firstLevel, lastLevel, numLevels;
switch( baseImage->TexFormat->MesaFormat ) {
switch (mesa_format) {
case MESA_FORMAT_L8:
t->intel.texelBytes = 1;
textureFormat = MAPSURF_8BIT | MT_8BIT_L8;
break;
return MAPSURF_8BIT | MT_8BIT_L8;
case MESA_FORMAT_I8:
t->intel.texelBytes = 1;
textureFormat = MAPSURF_8BIT | MT_8BIT_I8;
break;
return MAPSURF_8BIT | MT_8BIT_I8;
case MESA_FORMAT_A8:
t->intel.texelBytes = 1;
textureFormat = MAPSURF_8BIT | MT_8BIT_I8; /* Kludge -- check with conform, glean */
break;
return MAPSURF_8BIT | MT_8BIT_I8; /* Kludge! */
case MESA_FORMAT_AL88:
t->intel.texelBytes = 2;
textureFormat = MAPSURF_16BIT | MT_16BIT_AY88;
break;
return MAPSURF_16BIT | MT_16BIT_AY88;
case MESA_FORMAT_RGB565:
t->intel.texelBytes = 2;
textureFormat = MAPSURF_16BIT | MT_16BIT_RGB565;
break;
return MAPSURF_16BIT | MT_16BIT_RGB565;
case MESA_FORMAT_ARGB1555:
t->intel.texelBytes = 2;
textureFormat = MAPSURF_16BIT | MT_16BIT_ARGB1555;
break;
return MAPSURF_16BIT | MT_16BIT_ARGB1555;
case MESA_FORMAT_ARGB4444:
t->intel.texelBytes = 2;
textureFormat = MAPSURF_16BIT | MT_16BIT_ARGB4444;
break;
return MAPSURF_16BIT | MT_16BIT_ARGB4444;
case MESA_FORMAT_ARGB8888:
t->intel.texelBytes = 4;
textureFormat = MAPSURF_32BIT | MT_32BIT_ARGB8888;
break;
return MAPSURF_32BIT | MT_32BIT_ARGB8888;
case MESA_FORMAT_YCBCR_REV:
t->intel.texelBytes = 2;
textureFormat = (MAPSURF_422 | MT_422_YCRCB_NORMAL |
TM0S1_COLORSPACE_CONVERSION);
break;
return (MAPSURF_422 | MT_422_YCRCB_NORMAL);
case MESA_FORMAT_YCBCR:
t->intel.texelBytes = 2;
textureFormat = (MAPSURF_422 | MT_422_YCRCB_SWAPY | /* ??? */
TM0S1_COLORSPACE_CONVERSION);
break;
return (MAPSURF_422 | MT_422_YCRCB_SWAPY);
case MESA_FORMAT_RGB_FXT1:
case MESA_FORMAT_RGBA_FXT1:
t->intel.texelBytes = 2;
textureFormat = MAPSURF_COMPRESSED | MT_COMPRESS_FXT1;
break;
return (MAPSURF_COMPRESSED | MT_COMPRESS_FXT1);
case MESA_FORMAT_RGBA_DXT1:
case MESA_FORMAT_RGB_DXT1:
/*
* DXTn pitches are Width/4 * blocksize in bytes
* for DXT1: blocksize=8 so Width/4*8 = Width * 2
* for DXT3/5: blocksize=16 so Width/4*16 = Width * 4
*/
t->intel.texelBytes = 2;
textureFormat = (MAPSURF_COMPRESSED | MT_COMPRESS_DXT1);
break;
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT1);
case MESA_FORMAT_RGBA_DXT3:
t->intel.texelBytes = 4;
textureFormat = (MAPSURF_COMPRESSED | MT_COMPRESS_DXT2_3);
break;
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT2_3);
case MESA_FORMAT_RGBA_DXT5:
t->intel.texelBytes = 4;
textureFormat = (MAPSURF_COMPRESSED | MT_COMPRESS_DXT4_5);
break;
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT4_5);
default:
fprintf(stderr, "%s: bad image format\n", __FUNCTION__);
fprintf(stderr, "%s: bad image format %x\n", __FUNCTION__,
mesa_format);
abort();
return 0;
}
/* Compute which mipmap levels we really want to send to the hardware.
* This depends on the base image size, GL_TEXTURE_MIN_LOD,
* GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL.
* Yes, this looks overly complicated, but it's all needed.
*/
driCalculateTextureFirstLastLevel( (driTextureObject *) t );
/* Figure out the amount of memory required to hold all the mipmap
* levels. Choose the smallest pitch to accomodate the largest
* mipmap:
*/
firstLevel = t->intel.base.firstLevel;
lastLevel = t->intel.base.lastLevel;
numLevels = lastLevel - firstLevel + 1;
/* All images must be loaded at this pitch. Count the number of
* lines required:
*/
switch (tObj->Target) {
case GL_TEXTURE_CUBE_MAP: {
const GLuint dim = tObj->Image[0][firstLevel]->Width;
GLuint face;
pitch = dim * t->intel.texelBytes;
pitch *= 2; /* double pitch for cube layouts */
pitch = (pitch + 3) & ~3;
total_height = dim * 4;
for ( face = 0 ; face < 6 ; face++) {
GLuint x = initial_offsets[face][0] * dim;
GLuint y = initial_offsets[face][1] * dim;
GLuint d = dim;
t->intel.base.dirty_images[face] = ~0;
assert(tObj->Image[face][firstLevel]->Width == dim);
assert(tObj->Image[face][firstLevel]->Height == dim);
for (i = 0; i < numLevels; i++) {
t->intel.image[face][i].image = tObj->Image[face][firstLevel + i];
if (!t->intel.image[face][i].image) {
fprintf(stderr, "no image %d %d\n", face, i);
break; /* can't happen */
}
t->intel.image[face][i].offset =
y * pitch + x * t->intel.texelBytes;
t->intel.image[face][i].internalFormat = baseImage->_BaseFormat;
d >>= 1;
x += step_offsets[face][0] * d;
y += step_offsets[face][1] * d;
}
}
break;
}
default:
pitch = tObj->Image[0][firstLevel]->Width * t->intel.texelBytes;
pitch = (pitch + 3) & ~3;
t->intel.base.dirty_images[0] = ~0;
for ( total_height = i = 0 ; i < numLevels ; i++ ) {
t->intel.image[0][i].image = tObj->Image[0][firstLevel + i];
if (!t->intel.image[0][i].image)
break;
t->intel.image[0][i].offset = total_height * pitch;
t->intel.image[0][i].internalFormat = baseImage->_BaseFormat;
if (t->intel.image[0][i].image->IsCompressed)
{
if (t->intel.image[0][i].image->Height > 4)
total_height += t->intel.image[0][i].image->Height/4;
else
total_height += 1;
}
else
total_height += MAX2(2, t->intel.image[0][i].image->Height);
}
break;
}
t->intel.Pitch = pitch;
t->intel.base.totalSize = total_height*pitch;
t->intel.max_level = i-1;
t->Setup[I830_TEXREG_TM0S1] =
(((tObj->Image[0][firstLevel]->Height - 1) << TM0S1_HEIGHT_SHIFT) |
((tObj->Image[0][firstLevel]->Width - 1) << TM0S1_WIDTH_SHIFT) |
textureFormat);
t->Setup[I830_TEXREG_TM0S2] =
(((pitch / 4) - 1) << TM0S2_PITCH_SHIFT) |
TM0S2_CUBE_FACE_ENA_MASK;
t->Setup[I830_TEXREG_TM0S3] &= ~TM0S3_MAX_MIP_MASK;
t->Setup[I830_TEXREG_TM0S3] &= ~TM0S3_MIN_MIP_MASK;
t->Setup[I830_TEXREG_TM0S3] |= ((numLevels - 1)*4) << TM0S3_MIN_MIP_SHIFT;
t->intel.dirty = I830_UPLOAD_TEX_ALL;
return intelUploadTexImages( &i830->intel, &t->intel, 0 );
}
static void i830_import_tex_unit( i830ContextPtr i830,
i830TextureObjectPtr t,
GLuint unit )
{
if(INTEL_DEBUG&DEBUG_TEXTURE)
fprintf(stderr, "%s unit(%d)\n", __FUNCTION__, unit);
if (i830->intel.CurrentTexObj[unit])
i830->intel.CurrentTexObj[unit]->base.bound &= ~(1U << unit);
i830->intel.CurrentTexObj[unit] = (intelTextureObjectPtr)t;
t->intel.base.bound |= (1 << unit);
I830_STATECHANGE( i830, I830_UPLOAD_TEX(unit) );
i830->state.Tex[unit][I830_TEXREG_TM0LI] = (_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
(LOAD_TEXTURE_MAP0 << unit) | 4);
i830->state.Tex[unit][I830_TEXREG_TM0S0] = (TM0S0_USE_FENCE |
t->intel.TextureOffset);
i830->state.Tex[unit][I830_TEXREG_TM0S1] = t->Setup[I830_TEXREG_TM0S1];
i830->state.Tex[unit][I830_TEXREG_TM0S2] = t->Setup[I830_TEXREG_TM0S2];
i830->state.Tex[unit][I830_TEXREG_TM0S3] &= TM0S3_LOD_BIAS_MASK;
i830->state.Tex[unit][I830_TEXREG_TM0S3] |= (t->Setup[I830_TEXREG_TM0S3] &
~TM0S3_LOD_BIAS_MASK);
i830->state.Tex[unit][I830_TEXREG_TM0S4] = t->Setup[I830_TEXREG_TM0S4];
i830->state.Tex[unit][I830_TEXREG_MCS] = (t->Setup[I830_TEXREG_MCS] &
~MAP_UNIT_MASK);
i830->state.Tex[unit][I830_TEXREG_CUBE] = t->Setup[I830_TEXREG_CUBE];
i830->state.Tex[unit][I830_TEXREG_MCS] |= MAP_UNIT(unit);
t->intel.dirty &= ~I830_UPLOAD_TEX(unit);
}
static GLboolean enable_tex_common( GLcontext *ctx, GLuint unit )
/* The i915 (and related graphics cores) do not support GL_CLAMP. The
* Intel drivers for "other operating systems" implement GL_CLAMP as
* GL_CLAMP_TO_EDGE, so the same is done here.
*/
static GLuint translate_wrap_mode( GLenum wrap )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
struct gl_texture_object *tObj = texUnit->_Current;
i830TextureObjectPtr t = (i830TextureObjectPtr)tObj->DriverData;
switch( wrap ) {
case GL_REPEAT:
return TEXCOORDMODE_WRAP;
case GL_CLAMP:
case GL_CLAMP_TO_EDGE:
return TEXCOORDMODE_CLAMP; /* not really correct */
case GL_CLAMP_TO_BORDER:
return TEXCOORDMODE_CLAMP_BORDER;
case GL_MIRRORED_REPEAT:
return TEXCOORDMODE_MIRROR;
default:
return TEXCOORDMODE_WRAP;
}
}
if (0) fprintf(stderr, "%s\n", __FUNCTION__);
/* Fallback if there's a texture border */
if ( tObj->Image[0][tObj->BaseLevel]->Border > 0 ) {
fprintf(stderr, "Texture border\n");
return GL_FALSE;
/* Recalculate all state from scratch. Perhaps not the most
* efficient, but this has gotten complex enough that we need
* something which is understandable and reliable.
*/
static GLboolean i830_update_tex_unit( struct intel_context *intel,
GLuint unit,
GLuint ss3 )
{
GLcontext *ctx = &intel->ctx;
struct i830_context *i830 = i830_context(ctx);
struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
struct intel_texture_object *intelObj = intel_texture_object(tObj);
struct gl_texture_image *firstImage;
GLuint *state = i830->state.Tex[unit];
memset(state, 0, sizeof(state));
if (!intel_finalize_mipmap_tree(intel, unit))
return GL_FALSE;
/* Get first image here, since intelObj->firstLevel will get set in
* the intel_finalize_mipmap_tree() call above.
*/
firstImage = tObj->Image[0][intelObj->firstLevel];
i830->state.tex_buffer[unit] = intelObj->mt->region->buffer;
i830->state.tex_offset[unit] = intel_miptree_image_offset(intelObj->mt, 0,
intelObj->firstLevel);
state[I830_TEXREG_TM0LI] = (_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
(LOAD_TEXTURE_MAP0 << unit) | 4);
/* state[I830_TEXREG_TM0S0] = (TM0S0_USE_FENCE | */
/* t->intel.TextureOffset); */
state[I830_TEXREG_TM0S1] =
(((firstImage->Height - 1) << TM0S1_HEIGHT_SHIFT) |
((firstImage->Width - 1) << TM0S1_WIDTH_SHIFT) |
translate_texture_format( firstImage->TexFormat->MesaFormat));
state[I830_TEXREG_TM0S2] =
(((((intelObj->mt->pitch * intelObj->mt->cpp) / 4) - 1) << TM0S2_PITCH_SHIFT) |
TM0S2_CUBE_FACE_ENA_MASK);
{
if (tObj->Target == GL_TEXTURE_CUBE_MAP)
state[I830_TEXREG_CUBE] = (_3DSTATE_MAP_CUBE | MAP_UNIT(unit) |
CUBE_NEGX_ENABLE |
CUBE_POSX_ENABLE |
CUBE_NEGY_ENABLE |
CUBE_POSY_ENABLE |
CUBE_NEGZ_ENABLE |
CUBE_POSZ_ENABLE);
else
state[I830_TEXREG_CUBE] = (_3DSTATE_MAP_CUBE | MAP_UNIT(unit));
}
/* Upload teximages (not pipelined)
*/
if (t->intel.base.dirty_images[0]) {
if (!i830SetTexImages( i830, tObj )) {
{
GLuint minFilt, mipFilt, magFilt;
switch (tObj->MinFilter) {
case GL_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NONE;
break;
case GL_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NONE;
break;
case GL_NEAREST_MIPMAP_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_LINEAR_MIPMAP_NEAREST:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_NEAREST_MIPMAP_LINEAR:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_LINEAR;
break;
case GL_LINEAR_MIPMAP_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_LINEAR;
break;
default:
return GL_FALSE;
}
if ( tObj->MaxAnisotropy > 1.0 ) {
minFilt = FILTER_ANISOTROPIC;
magFilt = FILTER_ANISOTROPIC;
}
else {
switch (tObj->MagFilter) {
case GL_NEAREST:
magFilt = FILTER_NEAREST;
break;
case GL_LINEAR:
magFilt = FILTER_LINEAR;
break;
default:
return GL_FALSE;
}
}
state[I830_TEXREG_TM0S3] = i830->lodbias_tm0s3[unit];
#if 0
/* YUV conversion:
*/
if (firstImage->TexFormat->MesaFormat == MESA_FORMAT_YCBCR ||
firstImage->TexFormat->MesaFormat == MESA_FORMAT_YCBCR_REV)
state[I830_TEXREG_TM0S3] |= SS2_COLORSPACE_CONVERSION;
#endif
state[I830_TEXREG_TM0S3] |= ((intelObj->lastLevel -
intelObj->firstLevel)*4) << TM0S3_MIN_MIP_SHIFT;
state[I830_TEXREG_TM0S3] |= ((minFilt << TM0S3_MIN_FILTER_SHIFT) |
(mipFilt << TM0S3_MIP_FILTER_SHIFT) |
(magFilt << TM0S3_MAG_FILTER_SHIFT));
}
/* Update state if this is a different texture object to last
* time.
*/
if (i830->intel.CurrentTexObj[unit] != &t->intel ||
(t->intel.dirty & I830_UPLOAD_TEX(unit))) {
i830_import_tex_unit( i830, t, unit);
{
GLenum ws = tObj->WrapS;
GLenum wt = tObj->WrapT;
/* 3D textures not available on i830
*/
if (tObj->Target == GL_TEXTURE_3D)
return GL_FALSE;
state[I830_TEXREG_MCS] = (_3DSTATE_MAP_COORD_SET_CMD |
MAP_UNIT(unit) |
ENABLE_TEXCOORD_PARAMS |
ss3 |
ENABLE_ADDR_V_CNTL |
TEXCOORD_ADDR_V_MODE(translate_wrap_mode(wt)) |
ENABLE_ADDR_U_CNTL |
TEXCOORD_ADDR_U_MODE(translate_wrap_mode(ws)));
}
state[I830_TEXREG_TM0S4] = INTEL_PACKCOLOR8888(tObj->_BorderChan[0],
tObj->_BorderChan[1],
tObj->_BorderChan[2],
tObj->_BorderChan[3]);
I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(unit), GL_TRUE);
return GL_TRUE;
}
static GLboolean enable_tex_rect( GLcontext *ctx, GLuint unit )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
GLuint mcs = i830->state.Tex[unit][I830_TEXREG_MCS];
mcs &= ~TEXCOORDS_ARE_NORMAL;
mcs |= TEXCOORDS_ARE_IN_TEXELUNITS;
if ((mcs != i830->state.Tex[unit][I830_TEXREG_MCS])
|| (0 != i830->state.Tex[unit][I830_TEXREG_CUBE])) {
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
i830->state.Tex[unit][I830_TEXREG_MCS] = mcs;
i830->state.Tex[unit][I830_TEXREG_CUBE] = 0;
}
return GL_TRUE;
}
static GLboolean enable_tex_2d( GLcontext *ctx, GLuint unit )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
GLuint mcs = i830->state.Tex[unit][I830_TEXREG_MCS];
mcs &= ~TEXCOORDS_ARE_IN_TEXELUNITS;
mcs |= TEXCOORDS_ARE_NORMAL;
if ((mcs != i830->state.Tex[unit][I830_TEXREG_MCS])
|| (0 != i830->state.Tex[unit][I830_TEXREG_CUBE])) {
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
i830->state.Tex[unit][I830_TEXREG_MCS] = mcs;
i830->state.Tex[unit][I830_TEXREG_CUBE] = 0;
}
return GL_TRUE;
}
static GLboolean enable_tex_cube( GLcontext *ctx, GLuint unit )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
struct gl_texture_object *tObj = texUnit->_Current;
i830TextureObjectPtr t = (i830TextureObjectPtr)tObj->DriverData;
GLuint mcs = i830->state.Tex[unit][I830_TEXREG_MCS];
const GLuint cube = CUBE_NEGX_ENABLE | CUBE_POSX_ENABLE
| CUBE_NEGY_ENABLE | CUBE_POSY_ENABLE
| CUBE_NEGZ_ENABLE | CUBE_POSZ_ENABLE;
GLuint face;
mcs &= ~TEXCOORDS_ARE_IN_TEXELUNITS;
mcs |= TEXCOORDS_ARE_NORMAL;
if ((mcs != i830->state.Tex[unit][I830_TEXREG_MCS])
|| (cube != i830->state.Tex[unit][I830_TEXREG_CUBE])) {
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
i830->state.Tex[unit][I830_TEXREG_MCS] = mcs;
i830->state.Tex[unit][I830_TEXREG_CUBE] = cube;
}
/* Upload teximages (not pipelined)
/* memcmp was already disabled, but definitely won't work as the
* region might now change and that wouldn't be detected:
*/
if ( t->intel.base.dirty_images[0] || t->intel.base.dirty_images[1] ||
t->intel.base.dirty_images[2] || t->intel.base.dirty_images[3] ||
t->intel.base.dirty_images[4] || t->intel.base.dirty_images[5] ) {
i830SetTexImages( i830, tObj );
}
I830_STATECHANGE( i830, I830_UPLOAD_TEX(unit) );
return GL_TRUE;
}
/* upload (per face) */
for (face = 0; face < 6; face++) {
if (t->intel.base.dirty_images[face]) {
if (!intelUploadTexImages( &i830->intel, &t->intel, face )) {
return GL_FALSE;
}
void i830UpdateTextureState( struct intel_context *intel )
{
struct i830_context *i830 = i830_context(&intel->ctx);
GLboolean ok = GL_TRUE;
GLuint i;
for (i = 0 ; i < I830_TEX_UNITS && ok ; i++) {
switch (intel->ctx.Texture.Unit[i]._ReallyEnabled) {
case TEXTURE_1D_BIT:
case TEXTURE_2D_BIT:
case TEXTURE_CUBE_BIT:
ok = i830_update_tex_unit( intel, i, TEXCOORDS_ARE_NORMAL );
break;
case TEXTURE_RECT_BIT:
ok = i830_update_tex_unit( intel, i, TEXCOORDS_ARE_IN_TEXELUNITS );
break;
case 0:
if (i830->state.active & I830_UPLOAD_TEX(i))
I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(i), GL_FALSE);
break;
case TEXTURE_3D_BIT:
default:
ok = GL_FALSE;
break;
}
}
return GL_TRUE;
}
static GLboolean disable_tex( GLcontext *ctx, GLuint unit )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
/* This is happening too often. I need to conditionally send diffuse
* state to the card. Perhaps a diffuse dirty flag of some kind.
* Will need to change this logic if more than 2 texture units are
* used. We need to only do this up to the last unit enabled, or unit
* one if nothing is enabled.
*/
if ( i830->intel.CurrentTexObj[unit] != NULL ) {
/* The old texture is no longer bound to this texture unit.
* Mark it as such.
*/
i830->intel.CurrentTexObj[unit]->base.bound &= ~(1U << 0);
i830->intel.CurrentTexObj[unit] = NULL;
}
return GL_TRUE;
}
static GLboolean i830UpdateTexUnit( GLcontext *ctx, GLuint unit )
{
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
if (texUnit->_ReallyEnabled &&
INTEL_CONTEXT(ctx)->intelScreen->tex.size < 2048 * 1024)
return GL_FALSE;
switch(texUnit->_ReallyEnabled) {
case TEXTURE_1D_BIT:
case TEXTURE_2D_BIT:
return (enable_tex_common( ctx, unit ) &&
enable_tex_2d( ctx, unit ));
case TEXTURE_RECT_BIT:
return (enable_tex_common( ctx, unit ) &&
enable_tex_rect( ctx, unit ));
case TEXTURE_CUBE_BIT:
return (enable_tex_common( ctx, unit ) &&
enable_tex_cube( ctx, unit ));
case 0:
return disable_tex( ctx, unit );
default:
return GL_FALSE;
}
}
void i830UpdateTextureState( intelContextPtr intel )
{
i830ContextPtr i830 = I830_CONTEXT(intel);
GLcontext *ctx = &intel->ctx;
GLboolean ok;
if (0) fprintf(stderr, "%s\n", __FUNCTION__);
I830_ACTIVESTATE(i830, I830_UPLOAD_TEX_ALL, GL_FALSE);
ok = (i830UpdateTexUnit( ctx, 0 ) &&
i830UpdateTexUnit( ctx, 1 ) &&
i830UpdateTexUnit( ctx, 2 ) &&
i830UpdateTexUnit( ctx, 3 ));
FALLBACK( intel, I830_FALLBACK_TEXTURE, !ok );
if (ok)
@ -481,3 +314,8 @@ void i830UpdateTextureState( intelContextPtr intel )

View file

@ -30,11 +30,12 @@
#include "i830_reg.h"
#include "intel_batchbuffer.h"
#include "intel_regions.h"
#include "tnl/t_context.h"
#include "tnl/t_vertex.h"
static GLboolean i830_check_vertex_size( intelContextPtr intel,
static GLboolean i830_check_vertex_size( struct intel_context *intel,
GLuint expected );
#define SZ_TO_HW(sz) ((sz-2)&0x3)
@ -59,10 +60,10 @@ do { \
#define VRTX_TEX_SET_FMT(n, x) ((x)<<((n)*2))
#define TEXBIND_SET(n, x) ((x)<<((n)*4))
static void i830_render_start( intelContextPtr intel )
static void i830_render_start( struct intel_context *intel )
{
GLcontext *ctx = &intel->ctx;
i830ContextPtr i830 = I830_CONTEXT(intel);
struct i830_context *i830 = i830_context(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
DECLARE_RENDERINPUTS(index_bitset);
@ -166,6 +167,7 @@ static void i830_render_start( intelContextPtr intel )
v2 != i830->state.Ctx[I830_CTXREG_VF2] ||
mcsb1 != i830->state.Ctx[I830_CTXREG_MCSB1] ||
!RENDERINPUTS_EQUAL( index_bitset, i830->last_index_bitset )) {
int k;
I830_STATECHANGE( i830, I830_UPLOAD_CTX );
@ -185,14 +187,15 @@ static void i830_render_start( intelContextPtr intel )
i830->state.Ctx[I830_CTXREG_MCSB1] = mcsb1;
RENDERINPUTS_COPY( i830->last_index_bitset, index_bitset );
assert(i830_check_vertex_size( intel, intel->vertex_size ));
k = i830_check_vertex_size( intel, intel->vertex_size );
assert(k);
}
}
static void i830_reduced_primitive_state( intelContextPtr intel,
static void i830_reduced_primitive_state( struct intel_context *intel,
GLenum rprim )
{
i830ContextPtr i830 = I830_CONTEXT(intel);
struct i830_context *i830 = i830_context(&intel->ctx);
GLuint st1 = i830->state.Stipple[I830_STPREG_ST1];
st1 &= ~ST1_ENABLE;
@ -220,10 +223,10 @@ static void i830_reduced_primitive_state( intelContextPtr intel,
/* Pull apart the vertex format registers and figure out how large a
* vertex is supposed to be.
*/
static GLboolean i830_check_vertex_size( intelContextPtr intel,
static GLboolean i830_check_vertex_size( struct intel_context *intel,
GLuint expected )
{
i830ContextPtr i830 = I830_CONTEXT(intel);
struct i830_context *i830 = i830_context(&intel->ctx);
int vft0 = i830->current->Ctx[I830_CTXREG_VF];
int vft1 = i830->current->Ctx[I830_CTXREG_VF2];
int nrtex = (vft0 & VFT0_TEX_COUNT_MASK) >> VFT0_TEX_COUNT_SHIFT;
@ -260,16 +263,11 @@ static GLboolean i830_check_vertex_size( intelContextPtr intel,
return sz == expected;
}
static void i830_emit_invarient_state( intelContextPtr intel )
static void i830_emit_invarient_state( struct intel_context *intel )
{
BATCH_LOCALS;
BEGIN_BATCH( 200 );
OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(0));
OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(1));
OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(2));
OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(3));
BEGIN_BATCH(40, 0);
OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
OUT_BATCH(0);
@ -333,13 +331,6 @@ static void i830_emit_invarient_state( intelContextPtr intel )
TRI_FAN_PROVOKE_VRTX(2) |
TRI_STRIP_PROVOKE_VRTX(2));
OUT_BATCH(_3DSTATE_SCISSOR_ENABLE_CMD |
DISABLE_SCISSOR_RECT);
OUT_BATCH(_3DSTATE_SCISSOR_RECT_0_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
OUT_BATCH(_3DSTATE_VERTEX_TRANSFORM);
OUT_BATCH(DISABLE_VIEWPORT_TRANSFORM | DISABLE_PERSPECTIVE_DIVIDE);
@ -358,11 +349,13 @@ static void i830_emit_invarient_state( intelContextPtr intel )
#define emit( intel, state, size ) \
do { \
int k; \
BEGIN_BATCH( size / sizeof(GLuint)); \
for (k = 0 ; k < size / sizeof(GLuint) ; k++) \
BEGIN_BATCH(size / sizeof(GLuint), 0); \
for (k = 0 ; k < size / sizeof(GLuint) ; k++) { \
if (0) _mesa_printf(" 0x%08x\n", state[k]); \
OUT_BATCH(state[k]); \
} \
ADVANCE_BATCH(); \
} while (0);
} while (0)
static GLuint get_state_size( struct i830_hw_state *state )
{
@ -370,6 +363,9 @@ static GLuint get_state_size( struct i830_hw_state *state )
GLuint sz = 0;
GLuint i;
if (dirty & I830_UPLOAD_INVARIENT)
sz += 40 * sizeof(int);
if (dirty & I830_UPLOAD_CTX)
sz += sizeof(state->Ctx);
@ -393,81 +389,133 @@ static GLuint get_state_size( struct i830_hw_state *state )
/* Push the state into the sarea and/or texture memory.
*/
static void i830_emit_state( intelContextPtr intel )
static void i830_emit_state( struct intel_context *intel )
{
i830ContextPtr i830 = I830_CONTEXT(intel);
struct i830_context *i830 = i830_context(&intel->ctx);
struct i830_hw_state *state = i830->current;
int i;
GLuint dirty = state->active & ~state->emitted;
GLuint counter = intel->batch.counter;
BATCH_LOCALS;
if (intel->batch.space < get_state_size(state)) {
intelFlushBatch(intel, GL_TRUE);
dirty = state->active & ~state->emitted;
counter = intel->batch.counter;
/* We don't hold the lock at this point, so want to make sure that
* there won't be a buffer wrap.
*
* It might be better to talk about explicit places where
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel->batch,
get_state_size(state),
0);
if (dirty & I830_UPLOAD_INVARIENT) {
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I830_UPLOAD_INVARIENT:\n");
i830_emit_invarient_state( intel );
}
if (dirty & I830_UPLOAD_CTX) {
if (VERBOSE) fprintf(stderr, "I830_UPLOAD_CTX:\n");
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I830_UPLOAD_CTX:\n");
emit( i830, state->Ctx, sizeof(state->Ctx) );
}
if (dirty & I830_UPLOAD_BUFFERS) {
if (VERBOSE) fprintf(stderr, "I830_UPLOAD_BUFFERS:\n");
emit( i830, state->Buffer, sizeof(state->Buffer) );
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I830_UPLOAD_BUFFERS:\n");
BEGIN_BATCH(I830_DEST_SETUP_SIZE+2, 0);
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR1]);
OUT_RELOC(state->draw_region->buffer, DRM_MM_TT|DRM_MM_WRITE, 0);
if (state->depth_region) {
OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR1]);
OUT_RELOC(state->depth_region->buffer, DRM_MM_TT |DRM_MM_WRITE, 0);
}
OUT_BATCH(state->Buffer[I830_DESTREG_DV0]);
OUT_BATCH(state->Buffer[I830_DESTREG_DV1]);
OUT_BATCH(state->Buffer[I830_DESTREG_SENABLE]);
OUT_BATCH(state->Buffer[I830_DESTREG_SR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_SR1]);
OUT_BATCH(state->Buffer[I830_DESTREG_SR2]);
ADVANCE_BATCH();
}
if (dirty & I830_UPLOAD_STIPPLE) {
if (VERBOSE) fprintf(stderr, "I830_UPLOAD_STIPPLE:\n");
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I830_UPLOAD_STIPPLE:\n");
emit( i830, state->Stipple, sizeof(state->Stipple) );
}
for (i = 0; i < I830_TEX_UNITS; i++) {
if ((dirty & I830_UPLOAD_TEX(i))) {
if (VERBOSE) fprintf(stderr, "I830_UPLOAD_TEX(%d):\n", i);
emit( i830, state->Tex[i], sizeof(state->Tex[i]));
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I830_UPLOAD_TEX(%d):\n", i);
BEGIN_BATCH(I830_TEX_SETUP_SIZE+1, 0);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0LI]);
if (state->tex_buffer[i]) {
OUT_RELOC(state->tex_buffer[i],
DRM_MM_TT|DRM_MM_READ,
state->tex_offset[i] | TM0S0_USE_FENCE);
}
else {
assert(i == 0);
assert(state == &i830->meta);
OUT_BATCH(0);
}
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S1]);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S2]);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S3]);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S4]);
OUT_BATCH(state->Tex[i][I830_TEXREG_MCS]);
OUT_BATCH(state->Tex[i][I830_TEXREG_CUBE]);
}
if (dirty & I830_UPLOAD_TEXBLEND(i)) {
if (VERBOSE) fprintf(stderr, "I830_UPLOAD_TEXBLEND(%d):\n", i);
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I830_UPLOAD_TEXBLEND(%d): %d words\n", i,
state->TexBlendWordsUsed[i]);
emit( i830, state->TexBlend[i],
state->TexBlendWordsUsed[i] * 4 );
}
}
state->emitted |= dirty;
intel->batch.last_emit_state = counter;
assert(counter == intel->batch.counter);
}
static void i830_destroy_context( intelContextPtr intel )
static void i830_destroy_context( struct intel_context *intel )
{
_tnl_free_vertices(&intel->ctx);
}
static void
i830_set_color_region(intelContextPtr intel, const intelRegion *region)
static void i830_set_draw_region( struct intel_context *intel,
struct intel_region *draw_region,
struct intel_region *depth_region)
{
i830ContextPtr i830 = I830_CONTEXT(intel);
struct i830_context *i830 = i830_context(&intel->ctx);
intel_region_release(intel, &i830->state.draw_region);
intel_region_release(intel, &i830->state.depth_region);
intel_region_reference(&i830->state.draw_region, draw_region);
intel_region_reference(&i830->state.depth_region, depth_region);
/* XXX FBO: Need code from i915_set_draw_region() */
I830_STATECHANGE( i830, I830_UPLOAD_BUFFERS );
I830_STATECHANGE( i830, I830_UPLOAD_BUFFERS );
i830->state.Buffer[I830_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(region->pitch) | BUF_3D_USE_FENCE);
i830->state.Buffer[I830_DESTREG_CBUFADDR2] = region->offset;
}
static void
i830_set_z_region(intelContextPtr intel, const intelRegion *region)
{
i830ContextPtr i830 = I830_CONTEXT(intel);
I830_STATECHANGE( i830, I830_UPLOAD_BUFFERS );
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(draw_region->pitch) | BUF_3D_USE_FENCE);
i830->state.Buffer[I830_DESTREG_DBUFADDR1] =
(BUF_3D_ID_DEPTH | BUF_3D_PITCH(region->pitch) | BUF_3D_USE_FENCE);
i830->state.Buffer[I830_DESTREG_DBUFADDR2] = region->offset;
(BUF_3D_ID_DEPTH | BUF_3D_PITCH(depth_region->pitch) | BUF_3D_USE_FENCE);
}
#if 0
static void
i830_update_color_z_regions(intelContextPtr intel,
const intelRegion *colorRegion,
@ -483,45 +531,36 @@ i830_update_color_z_regions(intelContextPtr intel,
(BUF_3D_ID_DEPTH | BUF_3D_PITCH(depthRegion->pitch) | BUF_3D_USE_FENCE);
i830->state.Buffer[I830_DESTREG_DBUFADDR2] = depthRegion->offset;
}
#endif
/* This isn't really handled at the moment.
*/
static void i830_lost_hardware( intelContextPtr intel )
static void i830_lost_hardware( struct intel_context *intel )
{
I830_CONTEXT(intel)->state.emitted = 0;
struct i830_context *i830 = i830_context(&intel->ctx);
i830->state.emitted = 0;
}
static void i830_emit_flush( intelContextPtr intel )
static GLuint i830_flush_cmd( void )
{
BATCH_LOCALS;
BEGIN_BATCH(2);
OUT_BATCH( MI_FLUSH | FLUSH_MAP_CACHE );
OUT_BATCH( 0 );
ADVANCE_BATCH();
return MI_FLUSH | FLUSH_MAP_CACHE;
}
void i830InitVtbl( i830ContextPtr i830 )
void i830InitVtbl( struct i830_context *i830 )
{
i830->intel.vtbl.alloc_tex_obj = i830AllocTexObj;
i830->intel.vtbl.check_vertex_size = i830_check_vertex_size;
i830->intel.vtbl.clear_with_tris = i830ClearWithTris;
i830->intel.vtbl.rotate_window = i830RotateWindow;
i830->intel.vtbl.destroy = i830_destroy_context;
i830->intel.vtbl.emit_invarient_state = i830_emit_invarient_state;
i830->intel.vtbl.emit_state = i830_emit_state;
i830->intel.vtbl.lost_hardware = i830_lost_hardware;
i830->intel.vtbl.reduced_primitive_state = i830_reduced_primitive_state;
i830->intel.vtbl.set_color_region = i830_set_color_region;
i830->intel.vtbl.set_z_region = i830_set_z_region;
i830->intel.vtbl.update_color_z_regions = i830_update_color_z_regions;
i830->intel.vtbl.set_draw_region = i830_set_draw_region;
i830->intel.vtbl.update_texture_state = i830UpdateTextureState;
i830->intel.vtbl.emit_flush = i830_emit_flush;
i830->intel.vtbl.flush_cmd = i830_flush_cmd;
i830->intel.vtbl.render_start = i830_render_start;
}

View file

@ -41,6 +41,10 @@
#include "utils.h"
#include "i915_reg.h"
#include "intel_bufmgr.h"
#include "intel_regions.h"
#include "intel_batchbuffer.h"
/***************************************
* Mesa's Driver Functions
***************************************/
@ -65,7 +69,7 @@ static void i915InvalidateState( GLcontext *ctx, GLuint new_state )
_ac_InvalidateState( ctx, new_state );
_tnl_InvalidateState( ctx, new_state );
_tnl_invalidate_vertex_state( ctx, new_state );
INTEL_CONTEXT(ctx)->NewGLState |= new_state;
intel_context(ctx)->NewGLState |= new_state;
/* Todo: gather state values under which tracked parameters become
* invalidated, add callbacks for things like
@ -99,14 +103,16 @@ GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
void *sharedContextPrivate)
{
struct dd_function_table functions;
i915ContextPtr i915 = (i915ContextPtr) CALLOC_STRUCT(i915_context);
intelContextPtr intel = &i915->intel;
struct i915_context *i915 = (struct i915_context *) CALLOC_STRUCT(i915_context);
struct intel_context *intel = &i915->intel;
GLcontext *ctx = &intel->ctx;
GLuint i;
if (!i915) return GL_FALSE;
_mesa_printf( "\ntexmem branch (i915, drop3)\n\n");
i915InitVtbl( i915 );
i915InitMetaFuncs( i915 );
i915InitDriverFunctions( &functions );
@ -120,49 +126,28 @@ GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
ctx->Const.MaxTextureImageUnits = I915_TEX_UNITS;
ctx->Const.MaxTextureCoordUnits = I915_TEX_UNITS;
intel->nr_heaps = 1;
intel->texture_heaps[0] =
driCreateTextureHeap( 0, intel,
intel->intelScreen->tex.size,
12,
I830_NR_TEX_REGIONS,
intel->sarea->texList,
(unsigned *) & intel->sarea->texAge,
& intel->swapped,
sizeof( struct i915_texture_object ),
(destroy_texture_object_t *)intelDestroyTexObj );
/* FIXME: driCalculateMaxTextureLevels assumes that mipmaps are
* tightly packed, but they're not in Intel graphics
* hardware.
/* Advertise the full hardware capabilities. The new memory
* manager should cope much better with overload situations:
*/
ctx->Const.MaxTextureLevels = 12;
ctx->Const.Max3DTextureLevels = 9;
ctx->Const.MaxCubeTextureLevels = 12;
ctx->Const.MaxTextureRectSize = (1<<11);
ctx->Const.MaxTextureUnits = I915_TEX_UNITS;
i = driQueryOptioni( &intel->intelScreen->optionCache, "allow_large_textures");
driCalculateMaxTextureLevels( intel->texture_heaps,
intel->nr_heaps,
&intel->ctx.Const,
4,
11, /* max 2D texture size is 2048x2048 */
8, /* 3D texture */
11, /* cube texture. */
11, /* rect texture */
12,
GL_FALSE,
i );
/* GL_ARB_fragment_program limits - don't think Mesa actually
* validates programs against these, and in any case one ARB
* instruction can translate to more than one HW instruction, so
* we'll still have to check and fallback each time.
*/
*/
ctx->Const.FragmentProgram.MaxNativeTemps = I915_MAX_TEMPORARY;
ctx->Const.FragmentProgram.MaxNativeAttribs = 11; /* 8 tex, 2 color, fog */
ctx->Const.FragmentProgram.MaxNativeParameters = I915_MAX_CONSTANT;
ctx->Const.FragmentProgram.MaxNativeAluInstructions = I915_MAX_ALU_INSN;
ctx->Const.FragmentProgram.MaxNativeTexInstructions = I915_MAX_TEX_INSN;
ctx->Const.FragmentProgram.MaxNativeInstructions = (I915_MAX_ALU_INSN +
I915_MAX_TEX_INSN);
I915_MAX_TEX_INSN);
ctx->Const.FragmentProgram.MaxNativeTexIndirections = I915_MAX_TEX_INDIRECT;
ctx->Const.FragmentProgram.MaxNativeAddressRegs = 0; /* I don't think we have one */

View file

@ -45,6 +45,8 @@
#define I915_UPLOAD_PROGRAM 0x8
#define I915_UPLOAD_CONSTANTS 0x10
#define I915_UPLOAD_FOG 0x20
#define I915_UPLOAD_INVARIENT 0x40
#define I915_UPLOAD_DEFAULTS 0x80
#define I915_UPLOAD_TEX(i) (0x00010000<<(i))
#define I915_UPLOAD_TEX_ALL (0x00ff0000)
#define I915_UPLOAD_TEX_0_SHIFT 16
@ -54,10 +56,8 @@
*/
#define I915_DESTREG_CBUFADDR0 0
#define I915_DESTREG_CBUFADDR1 1
#define I915_DESTREG_CBUFADDR2 2
#define I915_DESTREG_DBUFADDR0 3
#define I915_DESTREG_DBUFADDR1 4
#define I915_DESTREG_DBUFADDR2 5
#define I915_DESTREG_DV0 6
#define I915_DESTREG_DV1 7
#define I915_DESTREG_SENABLE 8
@ -88,7 +88,6 @@
#define I915_STPREG_ST1 1
#define I915_STP_SETUP_SIZE 2
#define I915_TEXREG_MS2 0
#define I915_TEXREG_MS3 1
#define I915_TEXREG_MS4 2
#define I915_TEXREG_SS2 3
@ -96,6 +95,15 @@
#define I915_TEXREG_SS4 5
#define I915_TEX_SETUP_SIZE 6
#define I915_DEFREG_C0 0
#define I915_DEFREG_C1 1
#define I915_DEFREG_S0 2
#define I915_DEFREG_S1 3
#define I915_DEFREG_Z0 4
#define I915_DEFREG_Z1 5
#define I915_DEF_SETUP_SIZE 6
#define I915_MAX_CONSTANT 32
#define I915_CONSTANT_SIZE (2+(4*I915_MAX_CONSTANT))
@ -165,8 +173,6 @@ struct i915_fragment_program {
GLuint nr_params;
/* Helpers for i915_texprog.c:
*/
GLuint src_texture; /* Reg containing sampled texture color,
@ -187,13 +193,6 @@ struct i915_fragment_program {
struct i915_texture_object
{
struct intel_texture_object intel;
GLenum lastTarget;
GLboolean refs_border_color;
GLuint Setup[I915_TEX_SETUP_SIZE];
};
#define I915_TEX_UNITS 8
@ -203,11 +202,27 @@ struct i915_hw_state {
GLuint Buffer[I915_DEST_SETUP_SIZE];
GLuint Stipple[I915_STP_SETUP_SIZE];
GLuint Fog[I915_FOG_SETUP_SIZE];
GLuint Defaults[I915_DEF_SETUP_SIZE];
GLuint Tex[I915_TEX_UNITS][I915_TEX_SETUP_SIZE];
GLuint Constant[I915_CONSTANT_SIZE];
GLuint ConstantSize;
GLuint Program[I915_PROGRAM_SIZE];
GLuint ProgramSize;
/* Region pointers for relocation:
*/
struct intel_region *draw_region;
struct intel_region *depth_region;
/* struct intel_region *tex_region[I915_TEX_UNITS]; */
/* Regions aren't actually that appropriate here as the memory may
* be from a PBO or FBO. Just use the buffer id. Will have to do
* this for draw and depth for FBO's...
*/
GLuint tex_buffer[I915_TEX_UNITS];
GLuint tex_offset[I915_TEX_UNITS];
GLuint active; /* I915_UPLOAD_* */
GLuint emitted; /* I915_UPLOAD_* */
};
@ -222,6 +237,8 @@ struct i915_context
GLuint last_ReallyEnabled;
GLuint vertex_fog;
GLuint lodbias_ss2[MAX_TEXTURE_UNITS];
struct i915_fragment_program tex_program;
struct i915_fragment_program *current_program;
@ -230,24 +247,14 @@ struct i915_context
};
typedef struct i915_context *i915ContextPtr;
typedef struct i915_texture_object *i915TextureObjectPtr;
#define I915_CONTEXT(ctx) ((i915ContextPtr)(ctx))
#define I915_STATECHANGE(i915, flag) \
do { \
if (0) fprintf(stderr, "I915_STATECHANGE %x in %s\n", flag, __FUNCTION__); \
INTEL_FIREVERTICES( &(i915)->intel ); \
(i915)->state.emitted &= ~(flag); \
} while (0)
#define I915_ACTIVESTATE(i915, flag, mode) \
do { \
if (0) fprintf(stderr, "I915_ACTIVESTATE %x %d in %s\n", \
flag, mode, __FUNCTION__); \
INTEL_FIREVERTICES( &(i915)->intel ); \
if (mode) \
(i915)->state.active |= (flag); \
@ -259,7 +266,13 @@ do { \
/*======================================================================
* i915_vtbl.c
*/
extern void i915InitVtbl( i915ContextPtr i915 );
extern void i915InitVtbl( struct i915_context *i915 );
extern void
i915_state_draw_region(struct intel_context *intel,
struct i915_hw_state *state,
struct intel_region *color_region,
struct intel_region *depth_region);
@ -296,7 +309,7 @@ extern GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
/*======================================================================
* i915_texprog.c
*/
extern void i915ValidateTextureProgram( i915ContextPtr i915 );
extern void i915ValidateTextureProgram( struct i915_context *i915 );
/*======================================================================
@ -310,48 +323,43 @@ extern void i915_print_ureg( const char *msg, GLuint ureg );
* i915_state.c
*/
extern void i915InitStateFunctions( struct dd_function_table *functions );
extern void i915InitState( i915ContextPtr i915 );
extern void i915_update_fog(GLcontext *ctxx);
extern void i915InitState( struct i915_context *i915 );
extern void i915_update_fog( GLcontext *ctx );
/*======================================================================
* i915_tex.c
*/
extern void i915UpdateTextureState( intelContextPtr intel );
extern void i915UpdateTextureState( struct intel_context *intel );
extern void i915InitTextureFuncs( struct dd_function_table *functions );
extern intelTextureObjectPtr i915AllocTexObj( struct gl_texture_object *texObj );
/*======================================================================
* i915_metaops.c
*/
extern GLboolean
i915TryTextureReadPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack,
GLvoid *pixels );
void i915InitMetaFuncs( struct i915_context *i915 );
extern GLboolean
i915TryTextureDrawPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid *pixels );
extern void
i915ClearWithTris( intelContextPtr intel, GLbitfield mask,
GLboolean all, GLint cx, GLint cy, GLint cw, GLint ch);
extern void
i915RotateWindow(intelContextPtr intel, __DRIdrawablePrivate *dPriv,
GLuint srcBuf);
/*======================================================================
* i915_fragprog.c
*/
extern void i915ValidateFragmentProgram( i915ContextPtr i915 );
extern void i915ValidateFragmentProgram( struct i915_context *i915 );
extern void i915InitFragProgFuncs( struct dd_function_table *functions );
/*======================================================================
* Inline conversion functions. These are better-typed than the
* macros used previously:
*/
static INLINE struct i915_context *
i915_context( GLcontext *ctx )
{
return (struct i915_context *)ctx;
}
#define I915_CONTEXT(ctx) i915_context(ctx)
#endif

View file

@ -806,7 +806,7 @@ static void check_wpos( struct i915_fragment_program *p )
static void translate_program( struct i915_fragment_program *p )
{
i915ContextPtr i915 = I915_CONTEXT(p->ctx);
struct i915_context *i915 = I915_CONTEXT(p->ctx);
i915_init_program( i915, p );
check_wpos( p );
@ -840,7 +840,7 @@ static void i915BindProgram( GLcontext *ctx,
struct gl_program *prog )
{
if (target == GL_FRAGMENT_PROGRAM_ARB) {
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
struct i915_fragment_program *p = (struct i915_fragment_program *)prog;
if (i915->current_program == p)
@ -896,7 +896,7 @@ static void i915DeleteProgram( GLcontext *ctx,
struct gl_program *prog )
{
if (prog->Target == GL_FRAGMENT_PROGRAM_ARB) {
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
struct i915_fragment_program *p = (struct i915_fragment_program *)prog;
if (i915->current_program == p)
@ -940,10 +940,10 @@ static void i915ProgramStringNotify( GLcontext *ctx,
}
void i915ValidateFragmentProgram( i915ContextPtr i915 )
void i915ValidateFragmentProgram( struct i915_context *i915 )
{
GLcontext *ctx = &i915->intel.ctx;
intelContextPtr intel = INTEL_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
@ -1028,6 +1028,7 @@ void i915ValidateFragmentProgram( i915ContextPtr i915 )
if (s2 != i915->state.Ctx[I915_CTXREG_LIS2] ||
s4 != i915->state.Ctx[I915_CTXREG_LIS4]) {
int k;
I915_STATECHANGE( i915, I915_UPLOAD_CTX );
@ -1044,7 +1045,8 @@ void i915ValidateFragmentProgram( i915ContextPtr i915 )
i915->state.Ctx[I915_CTXREG_LIS2] = s2;
i915->state.Ctx[I915_CTXREG_LIS4] = s4;
assert(intel->vtbl.check_vertex_size( intel, intel->vertex_size ));
k = intel->vtbl.check_vertex_size( intel, intel->vertex_size );
assert(k);
}
if (!p->params_uptodate)

View file

@ -34,45 +34,46 @@
#include "intel_screen.h"
#include "intel_batchbuffer.h"
#include "intel_ioctl.h"
#include "intel_regions.h"
#include "intel_rotate.h"
#include "i915_context.h"
#include "i915_reg.h"
/* A large amount of state doesn't need to be uploaded.
/* We touch almost everything:
*/
#define ACTIVE (I915_UPLOAD_PROGRAM | \
I915_UPLOAD_STIPPLE | \
#define ACTIVE (I915_UPLOAD_INVARIENT | \
I915_UPLOAD_CTX | \
I915_UPLOAD_BUFFERS | \
I915_UPLOAD_STIPPLE | \
I915_UPLOAD_PROGRAM | \
I915_UPLOAD_FOG | \
I915_UPLOAD_TEX(0))
#define SET_STATE( i915, STATE ) \
#define SET_STATE( i915, STATE ) \
do { \
i915->current->emitted &= ~ACTIVE; \
i915->current = &i915->STATE; \
i915->current = &i915->STATE; \
i915->current->emitted &= ~ACTIVE; \
} while (0)
/* Operations where the 3D engine is decoupled temporarily from the
* current GL state and used for other purposes than simply rendering
* incoming triangles.
*/
static void set_initial_state( i915ContextPtr i915 )
{
memcpy(&i915->meta, &i915->initial, sizeof(i915->meta) );
i915->meta.active = ACTIVE;
i915->meta.emitted = 0;
}
static void set_no_depth_stencil_write( i915ContextPtr i915 )
static void meta_no_stencil_write( struct intel_context *intel )
{
struct i915_context *i915 = i915_context(&intel->ctx);
/* ctx->Driver.Enable( ctx, GL_STENCIL_TEST, GL_FALSE )
*/
i915->meta.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_TEST_ENABLE |
S5_STENCIL_WRITE_ENABLE);
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
static void meta_no_depth_write( struct intel_context *intel )
{
struct i915_context *i915 = i915_context(&intel->ctx);
/* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_FALSE )
*/
i915->meta.Ctx[I915_CTXREG_LIS6] &= ~(S6_DEPTH_TEST_ENABLE |
@ -81,12 +82,33 @@ static void set_no_depth_stencil_write( i915ContextPtr i915 )
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
static void meta_depth_replace( struct intel_context *intel )
{
struct i915_context *i915 = i915_context(&intel->ctx);
/* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_TRUE )
* ctx->Driver.DepthMask( ctx, GL_TRUE )
*/
i915->meta.Ctx[I915_CTXREG_LIS6] |= (S6_DEPTH_TEST_ENABLE |
S6_DEPTH_WRITE_ENABLE);
/* ctx->Driver.DepthFunc( ctx, GL_REPLACE )
*/
i915->meta.Ctx[I915_CTXREG_LIS6] &= ~S6_DEPTH_TEST_FUNC_MASK;
i915->meta.Ctx[I915_CTXREG_LIS6] |=
COMPAREFUNC_ALWAYS << S6_DEPTH_TEST_FUNC_SHIFT;
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
/* Set stencil unit to replace always with the reference value.
*/
static void set_stencil_replace( i915ContextPtr i915,
static void meta_stencil_replace( struct intel_context *intel,
GLuint s_mask,
GLuint s_clear)
{
struct i915_context *i915 = i915_context(&intel->ctx);
GLuint op = STENCILOP_REPLACE;
GLuint func = COMPAREFUNC_ALWAYS;
@ -95,13 +117,6 @@ static void set_stencil_replace( i915ContextPtr i915,
i915->meta.Ctx[I915_CTXREG_LIS5] |= (S5_STENCIL_TEST_ENABLE |
S5_STENCIL_WRITE_ENABLE);
/* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_FALSE )
*/
i915->meta.Ctx[I915_CTXREG_LIS6] &= ~(S6_DEPTH_TEST_ENABLE |
S6_DEPTH_WRITE_ENABLE);
/* ctx->Driver.StencilMask( ctx, s_mask )
*/
i915->meta.Ctx[I915_CTXREG_STATE4] &= ~MODE4_ENABLE_STENCIL_WRITE_MASK;
@ -109,7 +124,6 @@ static void set_stencil_replace( i915ContextPtr i915,
i915->meta.Ctx[I915_CTXREG_STATE4] |= (ENABLE_STENCIL_WRITE_MASK |
STENCIL_WRITE_MASK(s_mask));
/* ctx->Driver.StencilOp( ctx, GL_REPLACE, GL_REPLACE, GL_REPLACE )
*/
i915->meta.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_FAIL_MASK |
@ -138,8 +152,9 @@ static void set_stencil_replace( i915ContextPtr i915,
}
static void set_color_mask( i915ContextPtr i915, GLboolean state )
static void meta_color_mask( struct intel_context *intel, GLboolean state )
{
struct i915_context *i915 = i915_context(&intel->ctx);
const GLuint mask = (S5_WRITEDISABLE_RED |
S5_WRITEDISABLE_GREEN |
S5_WRITEDISABLE_BLUE |
@ -160,6 +175,28 @@ static void set_color_mask( i915ContextPtr i915, GLboolean state )
static void meta_import_pixel_state( struct intel_context *intel )
{
struct i915_context *i915 = i915_context(&intel->ctx);
memcpy(i915->meta.Fog, i915->state.Fog, I915_FOG_SETUP_SIZE * 4);
i915->meta.Ctx[I915_CTXREG_LIS5] = i915->state.Ctx[I915_CTXREG_LIS5];
i915->meta.Ctx[I915_CTXREG_LIS6] = i915->state.Ctx[I915_CTXREG_LIS6];
i915->meta.Ctx[I915_CTXREG_STATE4] = i915->state.Ctx[I915_CTXREG_STATE4];
i915->meta.Ctx[I915_CTXREG_BLENDCOLOR1] = i915->state.Ctx[I915_CTXREG_BLENDCOLOR1];
i915->meta.Ctx[I915_CTXREG_IAB] = i915->state.Ctx[I915_CTXREG_IAB];
i915->meta.Buffer[I915_DESTREG_SENABLE] = i915->state.Buffer[I915_DESTREG_SENABLE];
i915->meta.Buffer[I915_DESTREG_SR1] = i915->state.Buffer[I915_DESTREG_SR1];
i915->meta.Buffer[I915_DESTREG_SR2] = i915->state.Buffer[I915_DESTREG_SR2];
i915->meta.emitted &= ~I915_UPLOAD_FOG;
i915->meta.emitted &= ~I915_UPLOAD_BUFFERS;
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
#define REG( type, nr ) (((type)<<5)|(nr))
@ -211,8 +248,10 @@ static void set_color_mask( i915ContextPtr i915, GLboolean state )
static void set_no_texture( i915ContextPtr i915 )
static void meta_no_texture( struct intel_context *intel )
{
struct i915_context *i915 = i915_context(&intel->ctx);
static const GLuint prog[] = {
_3DSTATE_PIXEL_SHADER_PROGRAM,
@ -241,9 +280,10 @@ static void set_no_texture( i915ContextPtr i915 )
i915->meta.emitted &= ~I915_UPLOAD_PROGRAM;
}
static void enable_texture_blend_replace( i915ContextPtr i915 )
static void meta_texture_blend_replace( struct intel_context *intel )
{
struct i915_context *i915 = i915_context(&intel->ctx);
static const GLuint prog[] = {
_3DSTATE_PIXEL_SHADER_PROGRAM,
@ -286,85 +326,126 @@ static void enable_texture_blend_replace( i915ContextPtr i915 )
/* Set up an arbitary piece of memory as a rectangular texture
* (including the front or back buffer).
*/
static void set_tex_rect_source( i915ContextPtr i915,
GLuint offset,
GLuint width,
GLuint height,
GLuint pitch, /* in bytes! */
GLuint textureFormat )
static GLboolean meta_tex_rect_source( struct intel_context *intel,
GLuint buffer,
GLuint offset,
GLuint pitch,
GLuint height,
GLenum format,
GLenum type)
{
struct i915_context *i915 = i915_context(&intel->ctx);
GLuint unit = 0;
GLint numLevels = 1;
GLuint *state = i915->meta.Tex[0];
GLuint textureFormat;
GLuint cpp;
#if 0
printf("TexRect source offset 0x%x pitch %d\n", offset, pitch);
#endif
/* A full implementation of this would do the upload through
* glTexImage2d, and get all the conversion operations at that
* point. We are restricted, but still at least have access to the
* fragment program swizzle.
*/
switch (format) {
case GL_BGRA:
switch (type) {
case GL_UNSIGNED_INT_8_8_8_8_REV:
case GL_UNSIGNED_BYTE:
textureFormat = (MAPSURF_32BIT | MT_32BIT_ARGB8888);
cpp = 4;
break;
default:
return GL_FALSE;
}
break;
case GL_RGBA:
switch (type) {
case GL_UNSIGNED_INT_8_8_8_8_REV:
case GL_UNSIGNED_BYTE:
textureFormat = (MAPSURF_32BIT | MT_32BIT_ABGR8888);
cpp = 4;
break;
default:
return GL_FALSE;
}
break;
case GL_BGR:
switch (type) {
case GL_UNSIGNED_SHORT_5_6_5_REV:
textureFormat = (MAPSURF_16BIT | MT_16BIT_RGB565);
cpp = 2;
break;
default:
return GL_FALSE;
}
break;
case GL_RGB:
switch (type) {
case GL_UNSIGNED_SHORT_5_6_5:
textureFormat = (MAPSURF_16BIT | MT_16BIT_RGB565);
cpp = 2;
break;
default:
return GL_FALSE;
}
break;
/* fprintf(stderr, "%s: offset: %x w: %d h: %d pitch %d format %x\n", */
/* __FUNCTION__, offset, width, height, pitch, textureFormat ); */
default:
return GL_FALSE;
}
if ((pitch * cpp) & 3) {
_mesa_printf("%s: texture is not dword pitch\n", __FUNCTION__);
return GL_FALSE;
}
/* intel_region_release(intel, &i915->meta.tex_region[0]); */
/* intel_region_reference(&i915->meta.tex_region[0], region); */
i915->meta.tex_buffer[0] = buffer;
i915->meta.tex_offset[0] = offset;
state[I915_TEXREG_MS2] = offset;
state[I915_TEXREG_MS3] = (((height - 1) << MS3_HEIGHT_SHIFT) |
((width - 1) << MS3_WIDTH_SHIFT) |
textureFormat |
MS3_USE_FENCE_REGS);
((pitch - 1) << MS3_WIDTH_SHIFT) |
textureFormat |
MS3_USE_FENCE_REGS);
state[I915_TEXREG_MS4] = ((((pitch / 4) - 1) << MS4_PITCH_SHIFT) |
((((numLevels-1) * 4)) << MS4_MAX_LOD_SHIFT));
state[I915_TEXREG_MS4] = (((((pitch * cpp) / 4) - 1) << MS4_PITCH_SHIFT) |
MS4_CUBE_FACE_ENA_MASK |
((((numLevels-1) * 4)) << MS4_MAX_LOD_SHIFT));
state[I915_TEXREG_SS2] = ((FILTER_NEAREST << SS2_MIN_FILTER_SHIFT) |
(MIPFILTER_NONE << SS2_MIP_FILTER_SHIFT) |
(FILTER_NEAREST << SS2_MAG_FILTER_SHIFT));
(MIPFILTER_NONE << SS2_MIP_FILTER_SHIFT) |
(FILTER_NEAREST << SS2_MAG_FILTER_SHIFT));
state[I915_TEXREG_SS3] = ((TEXCOORDMODE_WRAP << SS3_TCX_ADDR_MODE_SHIFT) |
(TEXCOORDMODE_WRAP << SS3_TCY_ADDR_MODE_SHIFT) |
(TEXCOORDMODE_WRAP << SS3_TCZ_ADDR_MODE_SHIFT) |
(unit<<SS3_TEXTUREMAP_INDEX_SHIFT));
(TEXCOORDMODE_WRAP << SS3_TCY_ADDR_MODE_SHIFT) |
(TEXCOORDMODE_WRAP << SS3_TCZ_ADDR_MODE_SHIFT) |
(unit<<SS3_TEXTUREMAP_INDEX_SHIFT));
state[I915_TEXREG_SS4] = 0;
i915->meta.emitted &= ~I915_UPLOAD_TEX(0);
return GL_TRUE;
}
/* Select between front and back draw buffers.
/**
* Set the color and depth drawing region for meta ops.
*/
static void set_draw_region( i915ContextPtr i915, const intelRegion *region )
static void meta_draw_region( struct intel_context *intel,
struct intel_region *color_region,
struct intel_region *depth_region )
{
#if 0
printf("Rotate into region: offset 0x%x pitch %d\n",
region->offset, region->pitch);
#endif
i915->meta.Buffer[I915_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(region->pitch) | BUF_3D_USE_FENCE);
i915->meta.Buffer[I915_DESTREG_CBUFADDR2] = region->offset;
i915->meta.emitted &= ~I915_UPLOAD_BUFFERS;
struct i915_context *i915 = i915_context(&intel->ctx);
i915_state_draw_region(intel, &i915->meta, color_region, depth_region);
}
#if 0
/* Setup an arbitary draw format, useful for targeting texture or agp
* memory.
*/
static void set_draw_format( i915ContextPtr i915,
GLuint format,
GLuint depth_format)
static void set_vertex_format( struct intel_context *intel )
{
i915->meta.Buffer[I915_DESTREG_DV1] = (DSTORG_HORT_BIAS(0x8) | /* .5 */
DSTORG_VERT_BIAS(0x8) | /* .5 */
format |
LOD_PRECLAMP_OGL |
TEX_DEFAULT_COLOR_OGL |
depth_format);
struct i915_context *i915 = i915_context(&intel->ctx);
i915->meta.emitted &= ~I915_UPLOAD_BUFFERS;
/* fprintf(stderr, "%s: DV1: %x\n", */
/* __FUNCTION__, i915->meta.Buffer[I915_DESTREG_DV1]); */
}
#endif
static void set_vertex_format( i915ContextPtr i915 )
{
i915->meta.Ctx[I915_CTXREG_LIS2] =
(S2_TEXCOORD_FMT(0, TEXCOORDFMT_2D) |
S2_TEXCOORD_FMT(1, TEXCOORDFMT_NOT_PRESENT) |
@ -379,322 +460,54 @@ static void set_vertex_format( i915ContextPtr i915 )
i915->meta.Ctx[I915_CTXREG_LIS4] |=
(S4_VFMT_COLOR |
S4_VFMT_SPEC_FOG |
S4_VFMT_XYZW);
S4_VFMT_XYZ);
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
static void draw_quad(i915ContextPtr i915,
GLfloat x0, GLfloat x1,
GLfloat y0, GLfloat y1,
GLubyte red, GLubyte green,
GLubyte blue, GLubyte alpha,
GLfloat s0, GLfloat s1,
GLfloat t0, GLfloat t1 )
{
GLuint vertex_size = 8;
GLuint *vb = intelEmitInlinePrimitiveLocked( &i915->intel,
PRIM3D_TRIFAN,
4 * vertex_size,
vertex_size );
intelVertex tmp;
int i;
if (0)
fprintf(stderr, "%s: %f,%f-%f,%f 0x%x%x%x%x %f,%f-%f,%f\n",
__FUNCTION__,
x0,y0,x1,y1,red,green,blue,alpha,s0,t0,s1,t1);
/* initial vertex, left bottom */
tmp.v.x = x0;
tmp.v.y = y0;
tmp.v.z = 1.0;
tmp.v.w = 1.0;
tmp.v.color.red = red;
tmp.v.color.green = green;
tmp.v.color.blue = blue;
tmp.v.color.alpha = alpha;
tmp.v.specular.red = 0;
tmp.v.specular.green = 0;
tmp.v.specular.blue = 0;
tmp.v.specular.alpha = 0;
tmp.v.u0 = s0;
tmp.v.v0 = t0;
for (i = 0 ; i < vertex_size ; i++)
vb[i] = tmp.ui[i];
/* right bottom */
vb += vertex_size;
tmp.v.x = x1;
tmp.v.u0 = s1;
for (i = 0 ; i < vertex_size ; i++)
vb[i] = tmp.ui[i];
/* right top */
vb += vertex_size;
tmp.v.y = y1;
tmp.v.v0 = t1;
for (i = 0 ; i < vertex_size ; i++)
vb[i] = tmp.ui[i];
/* left top */
vb += vertex_size;
tmp.v.x = x0;
tmp.v.u0 = s0;
for (i = 0 ; i < vertex_size ; i++)
vb[i] = tmp.ui[i];
}
static void draw_poly(i915ContextPtr i915,
GLubyte red, GLubyte green, GLubyte blue, GLubyte alpha,
GLuint numVerts,
/*const*/ GLfloat verts[][2],
/*const*/ GLfloat texcoords[][2])
{
GLuint vertex_size = 8;
GLuint *vb = intelEmitInlinePrimitiveLocked( &i915->intel,
PRIM3D_TRIFAN,
numVerts * vertex_size,
vertex_size );
intelVertex tmp;
int i, k;
/* initial constant vertex fields */
tmp.v.z = 1.0;
tmp.v.w = 1.0;
tmp.v.color.red = red;
tmp.v.color.green = green;
tmp.v.color.blue = blue;
tmp.v.color.alpha = alpha;
tmp.v.specular.red = 0;
tmp.v.specular.green = 0;
tmp.v.specular.blue = 0;
tmp.v.specular.alpha = 0;
for (k = 0; k < numVerts; k++) {
tmp.v.x = verts[k][0];
tmp.v.y = verts[k][1];
tmp.v.u0 = texcoords[k][0];
tmp.v.v0 = texcoords[k][1];
for (i = 0 ; i < vertex_size ; i++)
vb[i] = tmp.ui[i];
vb += vertex_size;
}
}
void
i915ClearWithTris(intelContextPtr intel, GLbitfield mask,
GLboolean all,
GLint cx, GLint cy, GLint cw, GLint ch)
{
i915ContextPtr i915 = I915_CONTEXT( intel );
__DRIdrawablePrivate *dPriv = intel->driDrawable;
intelScreenPrivate *screen = intel->intelScreen;
int x0, y0, x1, y1;
SET_STATE( i915, meta );
set_initial_state( i915 );
set_no_texture( i915 );
set_vertex_format( i915 );
LOCK_HARDWARE(intel);
if (!all) {
x0 = cx;
y0 = cy;
x1 = x0 + cw;
y1 = y0 + ch;
} else {
x0 = 0;
y0 = 0;
x1 = x0 + dPriv->w;
y1 = y0 + dPriv->h;
}
/* Don't do any clipping to screen - these are window coordinates.
* The active cliprects will be applied as for any other geometry.
*/
if (mask & BUFFER_BIT_FRONT_LEFT) {
set_no_depth_stencil_write( i915 );
set_color_mask( i915, GL_TRUE );
set_draw_region( i915, &screen->front );
draw_quad(i915, x0, x1, y0, y1,
intel->clear_red, intel->clear_green,
intel->clear_blue, intel->clear_alpha,
0, 0, 0, 0);
}
if (mask & BUFFER_BIT_BACK_LEFT) {
set_no_depth_stencil_write( i915 );
set_color_mask( i915, GL_TRUE );
set_draw_region( i915, &screen->back );
draw_quad(i915, x0, x1, y0, y1,
intel->clear_red, intel->clear_green,
intel->clear_blue, intel->clear_alpha,
0, 0, 0, 0);
}
if (mask & BUFFER_BIT_STENCIL) {
set_stencil_replace( i915,
intel->ctx.Stencil.WriteMask[0],
intel->ctx.Stencil.Clear);
set_color_mask( i915, GL_FALSE );
set_draw_region( i915, &screen->front ); /* could be either? */
draw_quad( i915, x0, x1, y0, y1, 0, 0, 0, 0, 0, 0, 0, 0 );
}
UNLOCK_HARDWARE(intel);
SET_STATE( i915, state );
}
/**
* Copy the window contents named by dPriv to the rotated (or reflected)
* color buffer.
* srcBuf is BUFFER_BIT_FRONT_LEFT or BUFFER_BIT_BACK_LEFT to indicate the source.
/* Operations where the 3D engine is decoupled temporarily from the
* current GL state and used for other purposes than simply rendering
* incoming triangles.
*/
void
i915RotateWindow(intelContextPtr intel, __DRIdrawablePrivate *dPriv,
GLuint srcBuf)
static void install_meta_state( struct intel_context *intel )
{
i915ContextPtr i915 = I915_CONTEXT( intel );
intelScreenPrivate *screen = intel->intelScreen;
const GLuint cpp = screen->cpp;
drm_clip_rect_t fullRect;
GLuint textureFormat, srcOffset, srcPitch;
const drm_clip_rect_t *clipRects;
int numClipRects;
int i;
struct i915_context *i915 = i915_context(&intel->ctx);
memcpy(&i915->meta, &i915->initial, sizeof(i915->meta) );
i915->meta.active = ACTIVE;
i915->meta.emitted = 0;
int xOrig, yOrig;
int origNumClipRects;
drm_clip_rect_t *origRects;
/*
* set up hardware state
*/
intelFlush( &intel->ctx );
SET_STATE( i915, meta );
set_initial_state( i915 );
set_no_texture( i915 );
set_vertex_format( i915 );
set_no_depth_stencil_write( i915 );
set_color_mask( i915, GL_TRUE );
LOCK_HARDWARE(intel);
/* save current drawing origin and cliprects (restored at end) */
xOrig = intel->drawX;
yOrig = intel->drawY;
origNumClipRects = intel->numClipRects;
origRects = intel->pClipRects;
if (!intel->numClipRects)
goto done;
/*
* set drawing origin, cliprects for full-screen access to rotated screen
*/
fullRect.x1 = 0;
fullRect.y1 = 0;
fullRect.x2 = screen->rotatedWidth;
fullRect.y2 = screen->rotatedHeight;
intel->drawX = 0;
intel->drawY = 0;
intel->numClipRects = 1;
intel->pClipRects = &fullRect;
set_draw_region( i915, &screen->rotated );
if (cpp == 4)
textureFormat = MAPSURF_32BIT | MT_32BIT_ARGB8888;
else
textureFormat = MAPSURF_16BIT | MT_16BIT_RGB565;
if (srcBuf == BUFFER_BIT_FRONT_LEFT) {
srcPitch = screen->front.pitch; /* in bytes */
srcOffset = screen->front.offset; /* bytes */
clipRects = dPriv->pClipRects;
numClipRects = dPriv->numClipRects;
}
else {
srcPitch = screen->back.pitch; /* in bytes */
srcOffset = screen->back.offset; /* bytes */
clipRects = dPriv->pBackClipRects;
numClipRects = dPriv->numBackClipRects;
}
/* set the whole screen up as a texture to avoid alignment issues */
set_tex_rect_source(i915,
srcOffset,
screen->width,
screen->height,
srcPitch,
textureFormat);
enable_texture_blend_replace(i915);
/*
* loop over the source window's cliprects
*/
for (i = 0; i < numClipRects; i++) {
int srcX0 = clipRects[i].x1;
int srcY0 = clipRects[i].y1;
int srcX1 = clipRects[i].x2;
int srcY1 = clipRects[i].y2;
GLfloat verts[4][2], tex[4][2];
int j;
/* build vertices for four corners of clip rect */
verts[0][0] = srcX0; verts[0][1] = srcY0;
verts[1][0] = srcX1; verts[1][1] = srcY0;
verts[2][0] = srcX1; verts[2][1] = srcY1;
verts[3][0] = srcX0; verts[3][1] = srcY1;
/* .. and texcoords */
tex[0][0] = srcX0; tex[0][1] = srcY0;
tex[1][0] = srcX1; tex[1][1] = srcY0;
tex[2][0] = srcX1; tex[2][1] = srcY1;
tex[3][0] = srcX0; tex[3][1] = srcY1;
/* transform coords to rotated screen coords */
for (j = 0; j < 4; j++) {
matrix23TransformCoordf(&screen->rotMatrix,
&verts[j][0], &verts[j][1]);
}
/* draw polygon to map source image to dest region */
draw_poly(i915, 255, 255, 255, 255, 4, verts, tex);
} /* cliprect loop */
intelFlushBatchLocked( intel, GL_FALSE, GL_FALSE, GL_FALSE );
done:
/* restore original drawing origin and cliprects */
intel->drawX = xOrig;
intel->drawY = yOrig;
intel->numClipRects = origNumClipRects;
intel->pClipRects = origRects;
UNLOCK_HARDWARE(intel);
SET_STATE( i915, state );
SET_STATE(i915, meta);
set_vertex_format(intel);
meta_no_texture(intel);
}
static void leave_meta_state( struct intel_context *intel )
{
struct i915_context *i915 = i915_context(&intel->ctx);
intel_region_release(intel, &i915->meta.draw_region);
intel_region_release(intel, &i915->meta.depth_region);
/* intel_region_release(intel, &i915->meta.tex_region[0]); */
SET_STATE(i915, state);
}
void i915InitMetaFuncs( struct i915_context *i915 )
{
i915->intel.vtbl.install_meta_state = install_meta_state;
i915->intel.vtbl.leave_meta_state = leave_meta_state;
i915->intel.vtbl.meta_no_depth_write = meta_no_depth_write;
i915->intel.vtbl.meta_no_stencil_write = meta_no_stencil_write;
i915->intel.vtbl.meta_stencil_replace = meta_stencil_replace;
i915->intel.vtbl.meta_depth_replace = meta_depth_replace;
i915->intel.vtbl.meta_color_mask = meta_color_mask;
i915->intel.vtbl.meta_no_texture = meta_no_texture;
i915->intel.vtbl.meta_texture_blend_replace = meta_texture_blend_replace;
i915->intel.vtbl.meta_tex_rect_source = meta_tex_rect_source;
i915->intel.vtbl.meta_draw_region = meta_draw_region;
i915->intel.vtbl.meta_import_pixel_state = meta_import_pixel_state;
}

View file

@ -144,7 +144,8 @@ GLuint i915_emit_arith( struct i915_fragment_program *p,
GLuint nr_const = 0;
assert(GET_UREG_TYPE(dest) != REG_TYPE_CONST);
assert(dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest)));
dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest));
assert(dest);
if (GET_UREG_TYPE(src0) == REG_TYPE_CONST) c[nr_const++] = 0;
if (GET_UREG_TYPE(src1) == REG_TYPE_CONST) c[nr_const++] = 1;
@ -202,7 +203,8 @@ GLuint i915_emit_texld( struct i915_fragment_program *p,
GLuint op )
{
assert(GET_UREG_TYPE(dest) != REG_TYPE_CONST);
assert(dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest)));
dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest));
assert(dest);
if (GET_UREG_TYPE(coord) != REG_TYPE_T) {
p->nr_tex_indirect++;
@ -358,7 +360,7 @@ void i915_program_error( struct i915_fragment_program *p, const char *msg )
p->error = 1;
}
void i915_init_program( i915ContextPtr i915, struct i915_fragment_program *p )
void i915_init_program( struct i915_context *i915, struct i915_fragment_program *p )
{
GLcontext *ctx = &i915->intel.ctx;
TNLcontext *tnl = TNL_CONTEXT( ctx );
@ -431,7 +433,7 @@ void i915_fini_program( struct i915_fragment_program *p )
p->declarations[0] |= program_size + decl_size - 2;
}
void i915_upload_program( i915ContextPtr i915, struct i915_fragment_program *p )
void i915_upload_program( struct i915_context *i915, struct i915_fragment_program *p )
{
GLuint program_size = p->csr - p->program;
GLuint decl_size = p->decl - p->declarations;

View file

@ -84,7 +84,7 @@
/* One neat thing about the UREG representation:
*/
static __inline int swizzle( int reg, int x, int y, int z, int w )
static INLINE int swizzle( int reg, int x, int y, int z, int w )
{
return ((reg & ~UREG_XYZW_CHANNEL_MASK) |
CHANNEL_SRC( GET_CHANNEL_SRC( reg, x ), 0 ) |
@ -95,7 +95,7 @@ static __inline int swizzle( int reg, int x, int y, int z, int w )
/* Another neat thing about the UREG representation:
*/
static __inline int negate( int reg, int x, int y, int z, int w )
static INLINE int negate( int reg, int x, int y, int z, int w )
{
return reg ^ (((x&1)<<UREG_CHANNEL_X_NEGATE_SHIFT)|
((y&1)<<UREG_CHANNEL_Y_NEGATE_SHIFT)|
@ -149,10 +149,10 @@ extern GLuint i915_emit_param4fv( struct i915_fragment_program *p,
extern void i915_program_error( struct i915_fragment_program *p,
const char *msg );
extern void i915_init_program( i915ContextPtr i915,
extern void i915_init_program( struct i915_context *i915,
struct i915_fragment_program *p );
extern void i915_upload_program( i915ContextPtr i915,
extern void i915_upload_program( struct i915_context *i915,
struct i915_fragment_program *p );
extern void i915_fini_program( struct i915_fragment_program *p );

View file

@ -435,7 +435,7 @@
#define LOGICOP_MASK (0xf<<18)
#define MODE4_ENABLE_STENCIL_TEST_MASK ((1<<17)|(0xff00))
#define ENABLE_STENCIL_TEST_MASK (1<<17)
#define STENCIL_TEST_MASK(x) ((x)<<8)
#define STENCIL_TEST_MASK(x) (((x)&0xff)<<8)
#define MODE4_ENABLE_STENCIL_WRITE_MASK ((1<<16)|(0x00ff))
#define ENABLE_STENCIL_WRITE_MASK (1<<16)
#define STENCIL_WRITE_MASK(x) ((x)&0xff)
@ -826,10 +826,14 @@
#define ST1_ENABLE (1<<16)
#define ST1_MASK (0xffff)
#define _3DSTATE_DEFAULT_Z ((0x3<<29)|(0x1d<<24)|(0x98<<16))
#define _3DSTATE_DEFAULT_DIFFUSE ((0x3<<29)|(0x1d<<24)|(0x99<<16))
#define _3DSTATE_DEFAULT_SPECULAR ((0x3<<29)|(0x1d<<24)|(0x9a<<16))
#define MI_FLUSH ((0<<29)|(4<<23))
#define FLUSH_MAP_CACHE (1<<0)
#define FLUSH_RENDER_CACHE (1<<1)
#define MI_FLUSH ((0<<29)|(4<<23))
#define FLUSH_MAP_CACHE (1<<0)
#define INHIBIT_FLUSH_RENDER_CACHE (1<<2)
#endif

View file

@ -36,6 +36,7 @@
#include "texmem.h"
#include "intel_fbo.h"
#include "intel_screen.h"
#include "intel_batchbuffer.h"
@ -48,7 +49,7 @@ static void
i915StencilFuncSeparate(GLcontext *ctx, GLenum face, GLenum func, GLint ref,
GLuint mask)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
int test = intel_translate_compare_func( func );
mask = mask & 0xff;
@ -73,7 +74,7 @@ i915StencilFuncSeparate(GLcontext *ctx, GLenum face, GLenum func, GLint ref,
static void
i915StencilMaskSeparate(GLcontext *ctx, GLenum face, GLuint mask)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
if (INTEL_DEBUG&DEBUG_DRI)
fprintf(stderr, "%s : mask 0x%x\n", __FUNCTION__, mask);
@ -91,7 +92,7 @@ static void
i915StencilOpSeparate(GLcontext *ctx, GLenum face, GLenum fail, GLenum zfail,
GLenum zpass)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
int fop = intel_translate_stencil_op(fail);
int dfop = intel_translate_stencil_op(zfail);
int dpop = intel_translate_stencil_op(zpass);
@ -116,7 +117,7 @@ i915StencilOpSeparate(GLcontext *ctx, GLenum face, GLenum fail, GLenum zfail,
static void i915AlphaFunc(GLcontext *ctx, GLenum func, GLfloat ref)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
int test = intel_translate_compare_func( func );
GLubyte refByte;
@ -137,7 +138,7 @@ static void i915AlphaFunc(GLcontext *ctx, GLenum func, GLfloat ref)
*/
static void i915EvalLogicOpBlendState(GLcontext *ctx)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
I915_STATECHANGE(i915, I915_UPLOAD_CTX);
@ -157,7 +158,7 @@ static void i915EvalLogicOpBlendState(GLcontext *ctx)
static void i915BlendColor(GLcontext *ctx, const GLfloat color[4])
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
GLubyte r, g, b, a;
if (INTEL_DEBUG&DEBUG_DRI)
@ -194,7 +195,7 @@ static GLuint translate_blend_equation( GLenum mode )
static void i915UpdateBlendState( GLcontext *ctx )
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
GLuint iab = (i915->state.Ctx[I915_CTXREG_IAB] &
~(IAB_SRC_FACTOR_MASK |
IAB_DST_FACTOR_MASK |
@ -261,7 +262,7 @@ static void i915BlendEquationSeparate(GLcontext *ctx, GLenum eqRGB,
static void i915DepthFunc(GLcontext *ctx, GLenum func)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
int test = intel_translate_compare_func( func );
if (INTEL_DEBUG&DEBUG_DRI)
@ -274,7 +275,7 @@ static void i915DepthFunc(GLcontext *ctx, GLenum func)
static void i915DepthMask(GLcontext *ctx, GLboolean flag)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
if (INTEL_DEBUG&DEBUG_DRI)
fprintf(stderr, "%s flag (%d)\n", __FUNCTION__, flag);
@ -295,7 +296,7 @@ static void i915DepthMask(GLcontext *ctx, GLboolean flag)
*/
static void i915PolygonStipple( GLcontext *ctx, const GLubyte *mask )
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
const GLubyte *m = mask;
GLubyte p[4];
int i,j,k;
@ -348,15 +349,14 @@ static void i915PolygonStipple( GLcontext *ctx, const GLubyte *mask )
static void i915Scissor(GLcontext *ctx, GLint x, GLint y,
GLsizei w, GLsizei h)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
intelScreenPrivate *screen = i915->intel.intelScreen;
struct i915_context *i915 = I915_CONTEXT(ctx);
int x1, y1, x2, y2;
if (!i915->intel.driDrawable)
if (!ctx->DrawBuffer)
return;
x1 = x;
y1 = i915->intel.driDrawable->h - (y + h);
y1 = ctx->DrawBuffer->Height - (y + h);
x2 = x + w - 1;
y2 = y1 + h - 1;
@ -364,16 +364,10 @@ static void i915Scissor(GLcontext *ctx, GLint x, GLint y,
fprintf(stderr, "[%s] x(%d) y(%d) w(%d) h(%d)\n", __FUNCTION__,
x, y, w, h);
if (x1 < 0) x1 = 0;
if (y1 < 0) y1 = 0;
if (x2 < 0) x2 = 0;
if (y2 < 0) y2 = 0;
if (x2 >= screen->width) x2 = screen->width-1;
if (y2 >= screen->height) y2 = screen->height-1;
if (x1 >= screen->width) x1 = screen->width-1;
if (y1 >= screen->height) y1 = screen->height-1;
x1 = CLAMP(x1, 0, ctx->DrawBuffer->Width - 1);
y1 = CLAMP(y1, 0, ctx->DrawBuffer->Height - 1);
x2 = CLAMP(x2, 0, ctx->DrawBuffer->Width - 1);
y2 = CLAMP(y2, 0, ctx->DrawBuffer->Height - 1);
I915_STATECHANGE(i915, I915_UPLOAD_BUFFERS);
i915->state.Buffer[I915_DESTREG_SR1] = (y1 << 16) | (x1 & 0xffff);
@ -382,7 +376,7 @@ static void i915Scissor(GLcontext *ctx, GLint x, GLint y,
static void i915LogicOp(GLcontext *ctx, GLenum opcode)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
int tmp = intel_translate_logic_op(opcode);
if (INTEL_DEBUG&DEBUG_DRI)
@ -397,11 +391,12 @@ static void i915LogicOp(GLcontext *ctx, GLenum opcode)
static void i915CullFaceFrontFace(GLcontext *ctx, GLenum unused)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
GLuint mode;
if (INTEL_DEBUG&DEBUG_DRI)
fprintf(stderr, "%s\n", __FUNCTION__);
fprintf(stderr, "%s %d\n", __FUNCTION__,
ctx->DrawBuffer ? ctx->DrawBuffer->Name : 0);
if (!ctx->Polygon.CullFlag) {
mode = S4_CULLMODE_NONE;
@ -409,6 +404,8 @@ static void i915CullFaceFrontFace(GLcontext *ctx, GLenum unused)
else if (ctx->Polygon.CullFaceMode != GL_FRONT_AND_BACK) {
mode = S4_CULLMODE_CW;
if (ctx->DrawBuffer && ctx->DrawBuffer->Name != 0)
mode ^= (S4_CULLMODE_CW ^ S4_CULLMODE_CCW);
if (ctx->Polygon.CullFaceMode == GL_FRONT)
mode ^= (S4_CULLMODE_CW ^ S4_CULLMODE_CCW);
if (ctx->Polygon.FrontFace != GL_CCW)
@ -425,7 +422,7 @@ static void i915CullFaceFrontFace(GLcontext *ctx, GLenum unused)
static void i915LineWidth( GLcontext *ctx, GLfloat widthf )
{
i915ContextPtr i915 = I915_CONTEXT( ctx );
struct i915_context *i915 = I915_CONTEXT( ctx );
int lis4 = i915->state.Ctx[I915_CTXREG_LIS4] & ~S4_LINE_WIDTH_MASK;
int width;
@ -444,7 +441,7 @@ static void i915LineWidth( GLcontext *ctx, GLfloat widthf )
static void i915PointSize(GLcontext *ctx, GLfloat size)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
int lis4 = i915->state.Ctx[I915_CTXREG_LIS4] & ~S4_POINT_WIDTH_MASK;
GLint point_size = (int)size;
@ -469,7 +466,7 @@ static void i915ColorMask(GLcontext *ctx,
GLboolean r, GLboolean g,
GLboolean b, GLboolean a)
{
i915ContextPtr i915 = I915_CONTEXT( ctx );
struct i915_context *i915 = I915_CONTEXT( ctx );
GLuint tmp = i915->state.Ctx[I915_CTXREG_LIS5] & ~S5_WRITEDISABLE_MASK;
if (INTEL_DEBUG&DEBUG_DRI)
@ -490,7 +487,7 @@ static void update_specular( GLcontext *ctx )
{
/* A hack to trigger the rebuild of the fragment program.
*/
INTEL_CONTEXT(ctx)->NewGLState |= _NEW_TEXTURE;
intel_context(ctx)->NewGLState |= _NEW_TEXTURE;
I915_CONTEXT(ctx)->tex_program.translated = 0;
}
@ -507,7 +504,7 @@ static void i915LightModelfv(GLcontext *ctx, GLenum pname,
static void i915ShadeModel(GLcontext *ctx, GLenum mode)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
I915_STATECHANGE(i915, I915_UPLOAD_CTX);
if (mode == GL_SMOOTH) {
@ -526,7 +523,7 @@ static void i915ShadeModel(GLcontext *ctx, GLenum mode)
*/
void i915_update_fog( GLcontext *ctx )
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
GLenum mode;
GLboolean enabled;
GLboolean try_pixel_fog;
@ -619,7 +616,7 @@ void i915_update_fog( GLcontext *ctx )
static void i915Fogfv(GLcontext *ctx, GLenum pname, const GLfloat *param)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
switch (pname) {
case GL_FOG_COORDINATE_SOURCE_EXT:
@ -671,7 +668,7 @@ static void i915Hint(GLcontext *ctx, GLenum target, GLenum state)
static void i915Enable(GLcontext *ctx, GLenum cap, GLboolean state)
{
i915ContextPtr i915 = I915_CONTEXT(ctx);
struct i915_context *i915 = I915_CONTEXT(ctx);
switch(cap) {
case GL_TEXTURE_2D:
@ -699,7 +696,7 @@ static void i915Enable(GLcontext *ctx, GLenum cap, GLboolean state)
/* Logicop doesn't seem to work at 16bpp:
*/
if (i915->intel.intelScreen->cpp == 2)
if (i915->intel.intelScreen->cpp == 2) /* XXX FBO fix */
FALLBACK( &i915->intel, I915_FALLBACK_LOGICOP, state );
break;
@ -750,16 +747,24 @@ static void i915Enable(GLcontext *ctx, GLenum cap, GLboolean state)
break;
case GL_STENCIL_TEST:
if (i915->intel.hw_stencil) {
I915_STATECHANGE(i915, I915_UPLOAD_CTX);
if (state)
i915->state.Ctx[I915_CTXREG_LIS5] |= (S5_STENCIL_TEST_ENABLE |
S5_STENCIL_WRITE_ENABLE);
else
i915->state.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_TEST_ENABLE |
S5_STENCIL_WRITE_ENABLE);
} else {
FALLBACK( &i915->intel, I915_FALLBACK_STENCIL, state );
{
GLboolean hw_stencil = GL_FALSE;
if (ctx->DrawBuffer) {
struct intel_renderbuffer *irbStencil
= intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_STENCIL);
hw_stencil = (irbStencil && irbStencil->region);
}
if (hw_stencil) {
I915_STATECHANGE(i915, I915_UPLOAD_CTX);
if (state)
i915->state.Ctx[I915_CTXREG_LIS5] |= (S5_STENCIL_TEST_ENABLE |
S5_STENCIL_WRITE_ENABLE);
else
i915->state.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_TEST_ENABLE |
S5_STENCIL_WRITE_ENABLE);
} else {
FALLBACK( &i915->intel, I915_FALLBACK_STENCIL, state );
}
}
break;
@ -793,7 +798,7 @@ static void i915Enable(GLcontext *ctx, GLenum cap, GLboolean state)
}
static void i915_init_packets( i915ContextPtr i915 )
static void i915_init_packets( struct i915_context *i915 )
{
intelScreenPrivate *screen = i915->intel.intelScreen;
@ -816,7 +821,7 @@ static void i915_init_packets( i915ContextPtr i915 )
i915->state.Ctx[I915_CTXREG_LIS4] = 0;
i915->state.Ctx[I915_CTXREG_LIS5] = 0;
if (screen->cpp == 2)
if (screen->cpp == 2) /* XXX FBO fix */
i915->state.Ctx[I915_CTXREG_LIS5] |= S5_COLOR_DITHER_ENABLE;
@ -831,7 +836,6 @@ static void i915_init_packets( i915ContextPtr i915 )
ENABLE_STENCIL_WRITE_MASK |
STENCIL_WRITE_MASK(0xff));
i915->state.Ctx[I915_CTXREG_IAB] = (_3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD |
IAB_MODIFY_ENABLE |
IAB_MODIFY_FUNC |
@ -866,27 +870,24 @@ static void i915_init_packets( i915ContextPtr i915 )
I915_STATECHANGE(i915, I915_UPLOAD_BUFFERS);
/* color buffer offset/stride */
i915->state.Buffer[I915_DESTREG_CBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
/* XXX FBO: remove this? Also get set in i915_set_draw_region() */
i915->state.Buffer[I915_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK |
BUF_3D_PITCH(screen->front.pitch) | /* pitch in bytes */
BUF_3D_USE_FENCE);
/*i915->state.Buffer[I915_DESTREG_CBUFADDR2] is the offset */
/* depth/Z buffer offset/stride */
i915->state.Buffer[I915_DESTREG_DBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
/* XXX FBO: remove this? Also get set in i915_set_draw_region() */
i915->state.Buffer[I915_DESTREG_DBUFADDR1] =
(BUF_3D_ID_DEPTH |
BUF_3D_PITCH(screen->depth.pitch) | /* pitch in bytes */
BUF_3D_USE_FENCE);
i915->state.Buffer[I915_DESTREG_DBUFADDR2] = screen->depth.offset;
i915->state.Buffer[I915_DESTREG_DV0] = _3DSTATE_DST_BUF_VARS_CMD;
/* color/depth pixel format */
/* XXX FBO: remove this? Also get set in i915_set_draw_region() */
#if 0 /* seems we don't need this */
switch (screen->fbFormat) {
case DV_PF_555:
case DV_PF_565:
i915->state.Buffer[I915_DESTREG_DV1] = (DSTORG_HORT_BIAS(0x8) | /* .5 */
DSTORG_VERT_BIAS(0x8) | /* .5 */
@ -905,6 +906,8 @@ static void i915_init_packets( i915ContextPtr i915 )
DEPTH_FRMT_24_FIXED_8_OTHER);
break;
}
#endif
/* scissor */
i915->state.Buffer[I915_DESTREG_SENABLE] = (_3DSTATE_SCISSOR_ENABLE_CMD |
@ -915,13 +918,27 @@ static void i915_init_packets( i915ContextPtr i915 )
}
#if 0
{
I915_STATECHANGE(i915, I915_UPLOAD_DEFAULTS);
i915->state.Default[I915_DEFREG_C0] = _3DSTATE_DEFAULT_DIFFUSE;
i915->state.Default[I915_DEFREG_C1] = 0;
i915->state.Default[I915_DEFREG_S0] = _3DSTATE_DEFAULT_SPECULAR;
i915->state.Default[I915_DEFREG_S1] = 0;
i915->state.Default[I915_DEFREG_Z0] = _3DSTATE_DEFAULT_Z;
i915->state.Default[I915_DEFREG_Z1] = 0;
}
#endif
/* These will be emitted every at the head of every buffer, unless
* we get hardware contexts working.
*/
i915->state.active = (I915_UPLOAD_PROGRAM |
I915_UPLOAD_STIPPLE |
I915_UPLOAD_CTX |
I915_UPLOAD_BUFFERS);
I915_UPLOAD_STIPPLE |
I915_UPLOAD_CTX |
I915_UPLOAD_BUFFERS |
I915_UPLOAD_INVARIENT);
}
void i915InitStateFunctions( struct dd_function_table *functions )
@ -951,7 +968,7 @@ void i915InitStateFunctions( struct dd_function_table *functions )
}
void i915InitState( i915ContextPtr i915 )
void i915InitState( struct i915_context *i915 )
{
GLcontext *ctx = &i915->intel.ctx;

View file

@ -45,76 +45,10 @@
/**
* Allocate space for and load the mesa images into the texture memory block.
* This will happen before drawing with a new texture, or drawing with a
* texture after it was swapped out or teximaged again.
*/
intelTextureObjectPtr i915AllocTexObj( struct gl_texture_object *texObj )
{
i915TextureObjectPtr t = CALLOC_STRUCT( i915_texture_object );
if ( !t )
return NULL;
texObj->DriverData = t;
t->intel.base.tObj = texObj;
t->intel.dirty = I915_UPLOAD_TEX_ALL;
make_empty_list( &t->intel.base );
return &t->intel;
}
static void i915TexParameter( GLcontext *ctx, GLenum target,
struct gl_texture_object *tObj,
GLenum pname, const GLfloat *params )
{
i915TextureObjectPtr t = (i915TextureObjectPtr) tObj->DriverData;
switch (pname) {
case GL_TEXTURE_MIN_FILTER:
case GL_TEXTURE_MAG_FILTER:
case GL_TEXTURE_MAX_ANISOTROPY_EXT:
case GL_TEXTURE_WRAP_S:
case GL_TEXTURE_WRAP_T:
case GL_TEXTURE_WRAP_R:
case GL_TEXTURE_BORDER_COLOR:
t->intel.dirty = I915_UPLOAD_TEX_ALL;
break;
case GL_TEXTURE_COMPARE_MODE:
t->intel.dirty = I915_UPLOAD_TEX_ALL;
break;
case GL_TEXTURE_COMPARE_FUNC:
t->intel.dirty = I915_UPLOAD_TEX_ALL;
break;
case GL_TEXTURE_BASE_LEVEL:
case GL_TEXTURE_MAX_LEVEL:
case GL_TEXTURE_MIN_LOD:
case GL_TEXTURE_MAX_LOD:
/* The i915 and its successors can do a lot of this without
* reloading the textures. A project for someone?
*/
intelFlush( ctx );
driSwapOutTextureObject( (driTextureObject *) t );
t->intel.dirty = I915_UPLOAD_TEX_ALL;
break;
default:
return;
}
}
static void i915TexEnv( GLcontext *ctx, GLenum target,
GLenum pname, const GLfloat *param )
{
i915ContextPtr i915 = I915_CONTEXT( ctx );
GLuint unit = ctx->Texture.CurrentUnit;
struct i915_context *i915 = I915_CONTEXT( ctx );
switch (pname) {
case GL_TEXTURE_ENV_COLOR: /* Should be a tracked param */
@ -139,13 +73,12 @@ static void i915TexEnv( GLcontext *ctx, GLenum target,
break;
case GL_TEXTURE_LOD_BIAS: {
int b = (int) ((*param) * 16.0);
GLuint unit = ctx->Texture.CurrentUnit;
GLint b = (int) ((*param) * 16.0);
if (b > 255) b = 255;
if (b < -256) b = -256;
I915_STATECHANGE(i915, I915_UPLOAD_TEX(unit));
i915->state.Tex[unit][I915_TEXREG_SS2] &= ~SS2_LOD_BIAS_MASK;
i915->state.Tex[unit][I915_TEXREG_SS2] |=
((b << SS2_LOD_BIAS_SHIFT) & SS2_LOD_BIAS_MASK);
i915->lodbias_ss2[unit] = ((b << SS2_LOD_BIAS_SHIFT) & SS2_LOD_BIAS_MASK);
break;
}
@ -156,20 +89,8 @@ static void i915TexEnv( GLcontext *ctx, GLenum target,
static void i915BindTexture( GLcontext *ctx, GLenum target,
struct gl_texture_object *texObj )
struct gl_texture_object *texobj )
{
i915TextureObjectPtr tex;
if (!texObj->DriverData)
i915AllocTexObj( texObj );
tex = (i915TextureObjectPtr)texObj->DriverData;
if (tex->lastTarget != texObj->Target) {
tex->intel.dirty = I915_UPLOAD_TEX_ALL;
tex->lastTarget = texObj->Target;
}
/* Need this if image format changes between bound textures.
* Could try and shortcircuit by checking for differences in
* state between incoming and outgoing textures:
@ -183,5 +104,4 @@ void i915InitTextureFuncs( struct dd_function_table *functions )
{
functions->BindTexture = i915BindTexture;
functions->TexEnv = i915TexEnv;
functions->TexParameter = i915TexParameter;
}

View file

@ -536,7 +536,7 @@ static void emit_program_fini( struct i915_fragment_program *p )
}
static void i915EmitTextureProgram( i915ContextPtr i915 )
static void i915EmitTextureProgram( struct i915_context *i915 )
{
GLcontext *ctx = &i915->intel.ctx;
struct i915_fragment_program *p = &i915->tex_program;
@ -570,9 +570,9 @@ static void i915EmitTextureProgram( i915ContextPtr i915 )
}
void i915ValidateTextureProgram( i915ContextPtr i915 )
void i915ValidateTextureProgram( struct i915_context *i915 )
{
intelContextPtr intel = &i915->intel;
struct intel_context *intel = &i915->intel;
GLcontext *ctx = &intel->ctx;
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
@ -642,7 +642,8 @@ void i915ValidateTextureProgram( i915ContextPtr i915 )
*/
if (s2 != i915->state.Ctx[I915_CTXREG_LIS2] ||
s4 != i915->state.Ctx[I915_CTXREG_LIS4]) {
int k;
I915_STATECHANGE( i915, I915_UPLOAD_CTX );
i915->tex_program.translated = 0;
@ -660,7 +661,8 @@ void i915ValidateTextureProgram( i915ContextPtr i915 )
i915->state.Ctx[I915_CTXREG_LIS2] = s2;
i915->state.Ctx[I915_CTXREG_LIS4] = s4;
assert(intel->vtbl.check_vertex_size( intel, intel->vertex_size ));
k = intel->vtbl.check_vertex_size( intel, intel->vertex_size );
assert(k);
}
if (!i915->tex_program.translated ||

File diff suppressed because it is too large Load diff

View file

@ -37,14 +37,16 @@
#include "tnl/t_vertex.h"
#include "intel_batchbuffer.h"
#include "intel_tex.h"
#include "intel_regions.h"
#include "i915_reg.h"
#include "i915_context.h"
static void i915_render_start( intelContextPtr intel )
static void i915_render_start( struct intel_context *intel )
{
GLcontext *ctx = &intel->ctx;
i915ContextPtr i915 = I915_CONTEXT(intel);
struct i915_context *i915 = i915_context(&intel->ctx);
if (ctx->FragmentProgram._Active)
i915ValidateFragmentProgram( i915 );
@ -53,42 +55,42 @@ static void i915_render_start( intelContextPtr intel )
}
static void i915_reduced_primitive_state( intelContextPtr intel,
static void i915_reduced_primitive_state( struct intel_context *intel,
GLenum rprim )
{
i915ContextPtr i915 = I915_CONTEXT(intel);
GLuint st1 = i915->state.Stipple[I915_STPREG_ST1];
struct i915_context *i915 = i915_context(&intel->ctx);
GLuint st1 = i915->state.Stipple[I915_STPREG_ST1];
st1 &= ~ST1_ENABLE;
st1 &= ~ST1_ENABLE;
switch (rprim) {
case GL_TRIANGLES:
if (intel->ctx.Polygon.StippleFlag &&
intel->hw_stipple)
st1 |= ST1_ENABLE;
break;
case GL_LINES:
case GL_POINTS:
default:
break;
}
switch (rprim) {
case GL_TRIANGLES:
if (intel->ctx.Polygon.StippleFlag &&
intel->hw_stipple)
st1 |= ST1_ENABLE;
break;
case GL_LINES:
case GL_POINTS:
default:
break;
}
i915->intel.reduced_primitive = rprim;
i915->intel.reduced_primitive = rprim;
if (st1 != i915->state.Stipple[I915_STPREG_ST1]) {
I915_STATECHANGE(i915, I915_UPLOAD_STIPPLE);
i915->state.Stipple[I915_STPREG_ST1] = st1;
}
if (st1 != i915->state.Stipple[I915_STPREG_ST1]) {
I915_STATECHANGE(i915, I915_UPLOAD_STIPPLE);
i915->state.Stipple[I915_STPREG_ST1] = st1;
}
}
/* Pull apart the vertex format registers and figure out how large a
* vertex is supposed to be.
*/
static GLboolean i915_check_vertex_size( intelContextPtr intel,
static GLboolean i915_check_vertex_size( struct intel_context *intel,
GLuint expected )
{
i915ContextPtr i915 = I915_CONTEXT(intel);
struct i915_context *i915 = i915_context(&intel->ctx);
int lis2 = i915->current->Ctx[I915_CTXREG_LIS2];
int lis4 = i915->current->Ctx[I915_CTXREG_LIS4];
int i, sz = 0;
@ -132,11 +134,11 @@ static GLboolean i915_check_vertex_size( intelContextPtr intel,
}
static void i915_emit_invarient_state( intelContextPtr intel )
static void i915_emit_invarient_state( struct intel_context *intel )
{
BATCH_LOCALS;
BEGIN_BATCH( 200 );
BEGIN_BATCH( 200, 0 );
OUT_BATCH(_3DSTATE_AA_CMD |
AA_LINE_ECAAR_WIDTH_ENABLE |
@ -204,14 +206,8 @@ static void i915_emit_invarient_state( intelContextPtr intel )
}
#define emit( intel, state, size ) \
do { \
int k; \
BEGIN_BATCH( (size) / sizeof(GLuint)); \
for (k = 0 ; k < (size) / sizeof(GLuint) ; k++) \
OUT_BATCH((state)[k]); \
ADVANCE_BATCH(); \
} while (0);
#define emit(intel, state, size ) \
intel_batchbuffer_data(intel->batch, state, size, 0 )
static GLuint get_dirty( struct i915_hw_state *state )
{
@ -268,42 +264,70 @@ static GLuint get_state_size( struct i915_hw_state *state )
/* Push the state into the sarea and/or texture memory.
*/
static void i915_emit_state( intelContextPtr intel )
static void i915_emit_state( struct intel_context *intel )
{
i915ContextPtr i915 = I915_CONTEXT(intel);
struct i915_context *i915 = i915_context(&intel->ctx);
struct i915_hw_state *state = i915->current;
int i;
GLuint dirty = get_dirty(state);
GLuint counter = intel->batch.counter;
BATCH_LOCALS;
if (intel->batch.space < get_state_size(state)) {
intelFlushBatch(intel, GL_TRUE);
dirty = get_dirty(state);
counter = intel->batch.counter;
}
/* We don't hold the lock at this point, so want to make sure that
* there won't be a buffer wrap.
*
* It might be better to talk about explicit places where
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel->batch,
get_state_size(state),
0);
if (VERBOSE)
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "%s dirty: %x\n", __FUNCTION__, dirty);
if (dirty & I915_UPLOAD_INVARIENT) {
if (INTEL_DEBUG & DEBUG_STATE) fprintf(stderr, "I915_UPLOAD_INVARIENT:\n");
i915_emit_invarient_state( intel );
}
if (dirty & I915_UPLOAD_CTX) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_CTX:\n");
emit( i915, state->Ctx, sizeof(state->Ctx) );
if (INTEL_DEBUG & DEBUG_STATE) fprintf(stderr, "I915_UPLOAD_CTX:\n");
emit(intel, state->Ctx, sizeof(state->Ctx) );
}
if (dirty & I915_UPLOAD_BUFFERS) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_BUFFERS:\n");
emit( i915, state->Buffer, sizeof(state->Buffer) );
if (INTEL_DEBUG & DEBUG_STATE) fprintf(stderr, "I915_UPLOAD_BUFFERS:\n");
BEGIN_BATCH(I915_DEST_SETUP_SIZE+2, 0);
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR1]);
OUT_RELOC(state->draw_region->buffer, DRM_MM_TT|DRM_MM_WRITE,
state->draw_region->draw_offset);
if (state->depth_region) {
OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR1]);
OUT_RELOC(state->depth_region->buffer, DRM_MM_TT|DRM_MM_WRITE,
state->depth_region->draw_offset);
}
OUT_BATCH(state->Buffer[I915_DESTREG_DV0]);
OUT_BATCH(state->Buffer[I915_DESTREG_DV1]);
OUT_BATCH(state->Buffer[I915_DESTREG_SENABLE]);
OUT_BATCH(state->Buffer[I915_DESTREG_SR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_SR1]);
OUT_BATCH(state->Buffer[I915_DESTREG_SR2]);
ADVANCE_BATCH();
}
if (dirty & I915_UPLOAD_STIPPLE) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_STIPPLE:\n");
emit( i915, state->Stipple, sizeof(state->Stipple) );
if (INTEL_DEBUG & DEBUG_STATE) fprintf(stderr, "I915_UPLOAD_STIPPLE:\n");
emit(intel, state->Stipple, sizeof(state->Stipple) );
}
if (dirty & I915_UPLOAD_FOG) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_FOG:\n");
emit( i915, state->Fog, sizeof(state->Fog) );
if (INTEL_DEBUG & DEBUG_STATE) fprintf(stderr, "I915_UPLOAD_FOG:\n");
emit(intel, state->Fog, sizeof(state->Fog) );
}
/* Combine all the dirty texture state into a single command to
@ -316,18 +340,29 @@ static void i915_emit_state( intelContextPtr intel )
if (dirty & I915_UPLOAD_TEX(i))
nr++;
BEGIN_BATCH(2+nr*3);
BEGIN_BATCH(2+nr*3, 0);
OUT_BATCH(_3DSTATE_MAP_STATE | (3*nr));
OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
for (i = 0 ; i < I915_TEX_UNITS ; i++)
if (dirty & I915_UPLOAD_TEX(i)) {
OUT_BATCH(state->Tex[i][I915_TEXREG_MS2]);
if (state->tex_buffer[i]) {
OUT_RELOC(state->tex_buffer[i],
DRM_MM_TT|DRM_MM_READ,
state->tex_offset[i]);
}
else {
assert(i == 0);
assert(state == &i915->meta);
OUT_BATCH(0);
}
OUT_BATCH(state->Tex[i][I915_TEXREG_MS3]);
OUT_BATCH(state->Tex[i][I915_TEXREG_MS4]);
}
ADVANCE_BATCH();
BEGIN_BATCH(2+nr*3);
BEGIN_BATCH(2+nr*3, 0);
OUT_BATCH(_3DSTATE_SAMPLER_STATE | (3*nr));
OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
for (i = 0 ; i < I915_TEX_UNITS ; i++)
@ -340,114 +375,132 @@ static void i915_emit_state( intelContextPtr intel )
}
if (dirty & I915_UPLOAD_CONSTANTS) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_CONSTANTS:\n");
emit( i915, state->Constant, state->ConstantSize * sizeof(GLuint) );
if (INTEL_DEBUG & DEBUG_STATE) fprintf(stderr, "I915_UPLOAD_CONSTANTS:\n");
emit(intel, state->Constant, state->ConstantSize * sizeof(GLuint) );
}
if (dirty & I915_UPLOAD_PROGRAM) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_PROGRAM:\n");
if (INTEL_DEBUG & DEBUG_STATE) fprintf(stderr, "I915_UPLOAD_PROGRAM:\n");
assert((state->Program[0] & 0x1ff)+2 == state->ProgramSize);
emit( i915, state->Program, state->ProgramSize * sizeof(GLuint) );
if (VERBOSE)
emit(intel, state->Program, state->ProgramSize * sizeof(GLuint) );
if (INTEL_DEBUG & DEBUG_STATE)
i915_disassemble_program( state->Program, state->ProgramSize );
}
state->emitted |= dirty;
intel->batch.last_emit_state = counter;
assert(counter == intel->batch.counter);
}
static void i915_destroy_context( intelContextPtr intel )
static void i915_destroy_context( struct intel_context *intel )
{
_tnl_free_vertices(&intel->ctx);
}
/**
* Set the color buffer drawing region.
* Set the drawing regions for the color and depth/stencil buffers.
* This involves setting the pitch, cpp and buffer ID/location.
* Also set pixel format for color and Z rendering
* Used for setting both regular and meta state.
*/
static void
i915_set_color_region( intelContextPtr intel, const intelRegion *region)
void
i915_state_draw_region(struct intel_context *intel,
struct i915_hw_state *state,
struct intel_region *color_region,
struct intel_region *depth_region)
{
i915ContextPtr i915 = I915_CONTEXT(intel);
struct i915_context *i915 = i915_context(&intel->ctx);
GLuint value;
ASSERT(state == &i915->state || state == &i915->meta);
if (state->draw_region != color_region) {
intel_region_release(intel, &state->draw_region);
intel_region_reference(&state->draw_region, color_region);
}
if (state->depth_region != depth_region) {
intel_region_release(intel, &state->depth_region);
intel_region_reference(&state->depth_region, depth_region);
}
/*
* Set stride/cpp values
*/
if (color_region) {
state->Buffer[I915_DESTREG_CBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
state->Buffer[I915_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK |
BUF_3D_PITCH(color_region->pitch * color_region->cpp) |
BUF_3D_USE_FENCE);
}
if (depth_region) {
state->Buffer[I915_DESTREG_DBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
state->Buffer[I915_DESTREG_DBUFADDR1] =
(BUF_3D_ID_DEPTH |
BUF_3D_PITCH(depth_region->pitch * depth_region->cpp) |
BUF_3D_USE_FENCE);
}
/*
* Compute/set I915_DESTREG_DV1 value
*/
value = (DSTORG_HORT_BIAS(0x8) | /* .5 */
DSTORG_VERT_BIAS(0x8) | /* .5 */
LOD_PRECLAMP_OGL |
TEX_DEFAULT_COLOR_OGL);
if (color_region && color_region->cpp == 4) {
value |= DV_PF_8888;
}
else {
value |= (DITHER_FULL_ALWAYS | DV_PF_565);
}
if (depth_region && depth_region->cpp == 4) {
value |= DEPTH_FRMT_24_FIXED_8_OTHER;
}
else {
value |= DEPTH_FRMT_16_FIXED;
}
state->Buffer[I915_DESTREG_DV1] = value;
I915_STATECHANGE( i915, I915_UPLOAD_BUFFERS );
i915->state.Buffer[I915_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(region->pitch) | BUF_3D_USE_FENCE);
i915->state.Buffer[I915_DESTREG_CBUFADDR2] = region->offset;
}
/**
* specify the z-buffer/stencil region
*/
static void
i915_set_z_region( intelContextPtr intel, const intelRegion *region)
i915_set_draw_region(struct intel_context *intel,
struct intel_region *color_region,
struct intel_region *depth_region)
{
i915ContextPtr i915 = I915_CONTEXT(intel);
I915_STATECHANGE( i915, I915_UPLOAD_BUFFERS );
i915->state.Buffer[I915_DESTREG_DBUFADDR1] =
(BUF_3D_ID_DEPTH | BUF_3D_PITCH(region->pitch) | BUF_3D_USE_FENCE);
i915->state.Buffer[I915_DESTREG_DBUFADDR2] = region->offset;
struct i915_context *i915 = i915_context(&intel->ctx);
i915_state_draw_region(intel, &i915->state, color_region, depth_region);
}
/**
* Set both the color and Z/stencil drawing regions.
* Similar to two previous functions, but don't use I915_STATECHANGE()
*/
static void
i915_update_color_z_regions(intelContextPtr intel,
const intelRegion *colorRegion,
const intelRegion *depthRegion)
static void i915_lost_hardware( struct intel_context *intel )
{
i915ContextPtr i915 = I915_CONTEXT(intel);
struct i915_context *i915 = i915_context(&intel->ctx);
i915->state.emitted = 0;
}
i915->state.Buffer[I915_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(colorRegion->pitch) | BUF_3D_USE_FENCE);
i915->state.Buffer[I915_DESTREG_CBUFADDR2] = colorRegion->offset;
i915->state.Buffer[I915_DESTREG_DBUFADDR1] =
(BUF_3D_ID_DEPTH |
BUF_3D_PITCH(depthRegion->pitch) | /* pitch in bytes */
BUF_3D_USE_FENCE);
i915->state.Buffer[I915_DESTREG_DBUFADDR2] = depthRegion->offset;
static GLuint i915_flush_cmd( void )
{
return MI_FLUSH | FLUSH_MAP_CACHE;
}
static void i915_lost_hardware( intelContextPtr intel )
void i915InitVtbl( struct i915_context *i915 )
{
I915_CONTEXT(intel)->state.emitted = 0;
}
static void i915_emit_flush( intelContextPtr intel )
{
BATCH_LOCALS;
BEGIN_BATCH(2);
OUT_BATCH( MI_FLUSH | FLUSH_MAP_CACHE | FLUSH_RENDER_CACHE );
OUT_BATCH( 0 );
ADVANCE_BATCH();
}
void i915InitVtbl( i915ContextPtr i915 )
{
i915->intel.vtbl.alloc_tex_obj = i915AllocTexObj;
i915->intel.vtbl.check_vertex_size = i915_check_vertex_size;
i915->intel.vtbl.clear_with_tris = i915ClearWithTris;
i915->intel.vtbl.rotate_window = i915RotateWindow;
i915->intel.vtbl.destroy = i915_destroy_context;
i915->intel.vtbl.emit_invarient_state = i915_emit_invarient_state;
i915->intel.vtbl.emit_state = i915_emit_state;
i915->intel.vtbl.lost_hardware = i915_lost_hardware;
i915->intel.vtbl.reduced_primitive_state = i915_reduced_primitive_state;
i915->intel.vtbl.render_start = i915_render_start;
i915->intel.vtbl.set_color_region = i915_set_color_region;
i915->intel.vtbl.set_z_region = i915_set_z_region;
i915->intel.vtbl.update_color_z_regions = i915_update_color_z_regions;
i915->intel.vtbl.set_draw_region = i915_set_draw_region;
i915->intel.vtbl.update_texture_state = i915UpdateTextureState;
i915->intel.vtbl.emit_flush = i915_emit_flush;
i915->intel.vtbl.flush_cmd = i915_flush_cmd;
}

View file

@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -25,760 +25,270 @@
*
**************************************************************************/
#include <stdio.h>
#include <errno.h>
#include "mtypes.h"
#include "context.h"
#include "enums.h"
#include "vblank.h"
#include "intel_reg.h"
#include "intel_batchbuffer.h"
#include "intel_context.h"
#include "intel_ioctl.h"
#include "intel_bufmgr.h"
/* ================================================================
* Performance monitoring functions
/* Relocations in kernel space:
* - pass dma buffer seperately
* - memory manager knows how to patch
* - pass list of dependent buffers
* - pass relocation list
*
* Either:
* - get back an offset for buffer to fire
* - memory manager knows how to fire buffer
*
* Really want the buffer to be AGP and pinned.
*
*/
static void intel_fill_box( intelContextPtr intel,
GLshort x, GLshort y,
GLshort w, GLshort h,
GLubyte r, GLubyte g, GLubyte b )
/* Cliprect fence: The highest fence protecting a dma buffer
* containing explicit cliprect information. Like the old drawable
* lock but irq-driven. X server must wait for this fence to expire
* before changing cliprects [and then doing sw rendering?]. For
* other dma buffers, the scheduler will grab current cliprect info
* and mix into buffer. X server must hold the lock while changing
* cliprects??? Make per-drawable. Need cliprects in shared memory
* -- beats storing them with every cmd buffer in the queue.
*
* ==> X server must wait for this fence to expire before touching the
* framebuffer with new cliprects.
*
* ==> Cliprect-dependent buffers associated with a
* cliprect-timestamp. All of the buffers associated with a timestamp
* must go to hardware before any buffer with a newer timestamp.
*
* ==> Dma should be queued per-drawable for correct X/GL
* synchronization. Or can fences be used for this?
*
* Applies to: Blit operations, metaops, X server operations -- X
* server automatically waits on its own dma to complete before
* modifying cliprects ???
*/
static void intel_dump_batchbuffer( GLuint offset,
GLuint *ptr,
GLuint count )
{
x += intel->drawX;
y += intel->drawY;
if (x >= 0 && y >= 0 &&
x+w < intel->intelScreen->width &&
y+h < intel->intelScreen->height)
intelEmitFillBlitLocked( intel,
intel->intelScreen->cpp,
intel->intelScreen->back.pitch,
intel->intelScreen->back.offset,
x, y, w, h,
INTEL_PACKCOLOR(intel->intelScreen->fbFormat,
r,g,b,0xff));
}
static void intel_draw_performance_boxes( intelContextPtr intel )
{
/* Purple box for page flipping
*/
if ( intel->perf_boxes & I830_BOX_FLIP )
intel_fill_box( intel, 4, 4, 8, 8, 255, 0, 255 );
/* Red box if we have to wait for idle at any point
*/
if ( intel->perf_boxes & I830_BOX_WAIT )
intel_fill_box( intel, 16, 4, 8, 8, 255, 0, 0 );
/* Blue box: lost context?
*/
if ( intel->perf_boxes & I830_BOX_LOST_CONTEXT )
intel_fill_box( intel, 28, 4, 8, 8, 0, 0, 255 );
/* Yellow box for texture swaps
*/
if ( intel->perf_boxes & I830_BOX_TEXTURE_LOAD )
intel_fill_box( intel, 40, 4, 8, 8, 255, 255, 0 );
/* Green box if hardware never idles (as far as we can tell)
*/
if ( !(intel->perf_boxes & I830_BOX_RING_EMPTY) )
intel_fill_box( intel, 64, 4, 8, 8, 0, 255, 0 );
/* Draw bars indicating number of buffers allocated
* (not a great measure, easily confused)
*/
#if 0
if (intel->dma_used) {
int bar = intel->dma_used / 10240;
if (bar > 100) bar = 100;
if (bar < 1) bar = 1;
intel_fill_box( intel, 4, 16, bar, 4, 196, 128, 128 );
intel->dma_used = 0;
}
#endif
intel->perf_boxes = 0;
int i;
fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count/4);
for (i = 0; i < count/4; i += 4)
fprintf(stderr, "0x%x:\t0x%08x 0x%08x 0x%08x 0x%08x\n",
offset + i*4, ptr[i], ptr[i+1], ptr[i+2], ptr[i+3]);
fprintf(stderr, "END BATCH\n\n\n");
}
static int bad_prim_vertex_nr( int primitive, int nr )
void intel_batchbuffer_reset( struct intel_batchbuffer *batch )
{
switch (primitive & PRIM3D_MASK) {
case PRIM3D_POINTLIST:
return nr < 1;
case PRIM3D_LINELIST:
return (nr & 1) || nr == 0;
case PRIM3D_LINESTRIP:
return nr < 2;
case PRIM3D_TRILIST:
case PRIM3D_RECTLIST:
return nr % 3 || nr == 0;
case PRIM3D_POLY:
case PRIM3D_TRIFAN:
case PRIM3D_TRISTRIP:
case PRIM3D_TRISTRIP_RVRSE:
return nr < 3;
default:
return 1;
}
bmBufferData(batch->bm,
batch->buffer,
BATCH_SZ,
NULL,
0);
if (!batch->list)
batch->list = bmNewBufferList();
drmMMClearBufList(batch->list);
batch->list_count = 0;
batch->nr_relocs = 0;
batch->flags = 0;
bmAddBuffer( batch->bm,
batch->list,
batch->buffer,
DRM_MM_TT,
NULL,
&batch->offset[batch->list_count++]);
batch->map = bmMapBuffer(batch->bm, batch->buffer, DRM_MM_WRITE);
batch->ptr = batch->map;
}
static void intel_flush_inline_primitive( GLcontext *ctx )
/*======================================================================
* Public functions
*/
struct intel_batchbuffer *intel_batchbuffer_alloc( struct intel_context *intel )
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
GLuint used = intel->batch.ptr - intel->prim.start_ptr;
GLuint vertcount;
struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
assert(intel->prim.primitive != ~0);
batch->intel = intel;
batch->bm = intel->bm;
if (1) {
/* Check vertex size against the vertex we're specifying to
* hardware. If it's wrong, ditch the primitive.
*/
if (!intel->vtbl.check_vertex_size( intel, intel->vertex_size ))
goto do_discard;
bmGenBuffers(intel->bm, 1, &batch->buffer, BM_BATCHBUFFER);
batch->last_fence = bmInitFence(batch->bm);
intel_batchbuffer_reset( batch );
return batch;
}
vertcount = (used - 4)/ (intel->vertex_size * 4);
void intel_batchbuffer_free( struct intel_batchbuffer *batch )
{
if (batch->map)
bmUnmapBuffer(batch->bm, batch->buffer);
free(batch);
}
if (!vertcount)
goto do_discard;
/* TODO: Push this whole function into bufmgr.
*/
static void do_flush_locked( struct intel_batchbuffer *batch,
GLuint used,
GLboolean ignore_cliprects,
GLboolean allow_unlock)
{
GLuint *ptr;
GLuint i;
bmValidateBufferList( batch->bm,
batch->list,
DRM_MM_TT );
/* Apply the relocations. This nasty map indicates to me that the
* whole task should be done internally by the memory manager, and
* that dma buffers probably need to be pinned within agp space.
*/
ptr = (GLuint *)bmMapBuffer(batch->bm, batch->buffer, DRM_MM_WRITE);
for (i = 0; i < batch->nr_relocs; i++) {
struct buffer_reloc *r = &batch->reloc[i];
if (vertcount * intel->vertex_size * 4 != used - 4) {
fprintf(stderr, "vertex size confusion %d %d\n", used,
intel->vertex_size * vertcount * 4);
goto do_discard;
}
if (bad_prim_vertex_nr( intel->prim.primitive, vertcount )) {
fprintf(stderr, "bad_prim_vertex_nr %x %d\n", intel->prim.primitive,
vertcount);
goto do_discard;
}
assert(r->elem < batch->list_count);
ptr[r->offset/4] = batch->offset[r->elem] + r->delta;
}
if (used < 8)
goto do_discard;
if (INTEL_DEBUG & DEBUG_DMA)
intel_dump_batchbuffer( 0, ptr, used );
*(int *)intel->prim.start_ptr = (_3DPRIMITIVE |
intel->prim.primitive |
(used/4-2));
goto finished;
bmUnmapBuffer(batch->bm, batch->buffer);
do_discard:
intel->batch.ptr -= used;
intel->batch.space += used;
assert(intel->batch.space >= 0);
finished:
intel->prim.primitive = ~0;
intel->prim.start_ptr = 0;
intel->prim.flush = 0;
}
/* Emit a primitive referencing vertices in a vertex buffer.
*/
void intelStartInlinePrimitive( intelContextPtr intel, GLuint prim )
{
BATCH_LOCALS;
if (0)
fprintf(stderr, "%s %x\n", __FUNCTION__, prim);
/* Finish any in-progress primitive:
/* Fire the batch buffer, which was uploaded above:
*/
INTEL_FIREVERTICES( intel );
/* Emit outstanding state:
*/
intel->vtbl.emit_state( intel );
/* Make sure there is some space in this buffer:
*/
if (intel->vertex_size * 10 * sizeof(GLuint) >= intel->batch.space) {
intelFlushBatch(intel, GL_TRUE);
intel->vtbl.emit_state( intel );
}
#if 1
if (((unsigned long)intel->batch.ptr) & 0x4) {
BEGIN_BATCH(1);
OUT_BATCH(0);
ADVANCE_BATCH();
}
intel_batch_ioctl(batch->intel,
batch->offset[0],
used,
ignore_cliprects,
allow_unlock);
#endif
batch->last_fence = bmFenceBufferList(batch->bm, batch->list);
if (!batch->intel->last_swap_fence_retired) {
int retired;
drmFence dFence = {0,batch->intel->last_swap_fence};
/*FIXME: Temporary fix for fence ageing
*
*/
if (!drmTestFence(batch->intel->driFd, dFence, 0, &retired)) {
batch->intel->last_swap_fence_retired = retired;
}
}
}
/* Emit a slot which will be filled with the inline primitive
* command later.
GLuint intel_batchbuffer_flush( struct intel_batchbuffer *batch )
{
struct intel_context *intel = batch->intel;
GLuint used = batch->ptr - batch->map;
if (used == 0)
return batch->last_fence;
/* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
* performance drain that we would like to avoid.
*/
BEGIN_BATCH(2);
OUT_BATCH( 0 );
intel->prim.start_ptr = batch_ptr;
intel->prim.primitive = prim;
intel->prim.flush = intel_flush_inline_primitive;
intel->batch.contains_geometry = 1;
OUT_BATCH( 0 );
ADVANCE_BATCH();
}
void intelRestartInlinePrimitive( intelContextPtr intel )
{
GLuint prim = intel->prim.primitive;
intel_flush_inline_primitive( &intel->ctx );
if (1) intelFlushBatch(intel, GL_TRUE); /* GL_TRUE - is critical */
intelStartInlinePrimitive( intel, prim );
}
void intelWrapInlinePrimitive( intelContextPtr intel )
{
GLuint prim = intel->prim.primitive;
if (0)
fprintf(stderr, "%s\n", __FUNCTION__);
intel_flush_inline_primitive( &intel->ctx );
intelFlushBatch(intel, GL_TRUE);
intelStartInlinePrimitive( intel, prim );
}
/* Emit a primitive with space for inline vertices.
*/
GLuint *intelEmitInlinePrimitiveLocked(intelContextPtr intel,
int primitive,
int dwords,
int vertex_size )
{
GLuint *tmp = 0;
BATCH_LOCALS;
if (0)
fprintf(stderr, "%s 0x%x %d\n", __FUNCTION__, primitive, dwords);
/* Emit outstanding state:
*/
intel->vtbl.emit_state( intel );
if ((1+dwords)*4 >= intel->batch.space) {
intelFlushBatch(intel, GL_TRUE);
intel->vtbl.emit_state( intel );
if (used & 4) {
((int *)batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *)batch->ptr)[1] = 0;
((int *)batch->ptr)[2] = MI_BATCH_BUFFER_END;
used += 12;
}
if (1) {
int used = dwords * 4;
int vertcount;
/* Check vertex size against the vertex we're specifying to
* hardware. If it's wrong, ditch the primitive.
*/
if (!intel->vtbl.check_vertex_size( intel, vertex_size ))
goto do_discard;
vertcount = dwords / vertex_size;
if (dwords % vertex_size) {
fprintf(stderr, "did not request a whole number of vertices\n");
goto do_discard;
}
if (bad_prim_vertex_nr( primitive, vertcount )) {
fprintf(stderr, "bad_prim_vertex_nr %x %d\n", primitive, vertcount);
goto do_discard;
}
if (used < 8)
goto do_discard;
}
/* Emit 3D_PRIMITIVE commands:
*/
BEGIN_BATCH(1 + dwords);
OUT_BATCH( _3DPRIMITIVE |
primitive |
(dwords-1) );
tmp = (GLuint *)batch_ptr;
batch_ptr += dwords * 4;
ADVANCE_BATCH();
intel->batch.contains_geometry = 1;
do_discard:
return tmp;
}
static void intelWaitForFrameCompletion( intelContextPtr intel )
{
drm_i915_sarea_t *sarea = (drm_i915_sarea_t *)intel->sarea;
if (intel->do_irqs) {
if (intelGetLastFrame(intel) < sarea->last_dispatch) {
if (!intel->irqsEmitted) {
while (intelGetLastFrame (intel) < sarea->last_dispatch)
;
}
else {
UNLOCK_HARDWARE( intel );
intelWaitIrq( intel, intel->alloc.irq_emitted );
LOCK_HARDWARE( intel );
}
intel->irqsEmitted = 10;
}
if (intel->irqsEmitted) {
intelEmitIrqLocked( intel );
intel->irqsEmitted--;
}
}
else {
while (intelGetLastFrame (intel) < sarea->last_dispatch) {
UNLOCK_HARDWARE( intel );
if (intel->do_usleeps)
DO_USLEEP( 1 );
LOCK_HARDWARE( intel );
}
}
}
/*
* Copy the back buffer to the front buffer.
*/
void intelCopyBuffer( const __DRIdrawablePrivate *dPriv,
const drm_clip_rect_t *rect)
{
intelContextPtr intel;
GLboolean missed_target;
int64_t ust;
if (0)
fprintf(stderr, "%s\n", __FUNCTION__);
assert(dPriv);
assert(dPriv->driContextPriv);
assert(dPriv->driContextPriv->driverPrivate);
intel = (intelContextPtr) dPriv->driContextPriv->driverPrivate;
intelFlush( &intel->ctx );
LOCK_HARDWARE( intel );
intelWaitForFrameCompletion( intel );
if (!rect)
{
UNLOCK_HARDWARE( intel );
driWaitForVBlank( dPriv, &intel->vbl_seq, intel->vblank_flags, & missed_target );
LOCK_HARDWARE( intel );
}
{
const intelScreenPrivate *intelScreen = intel->intelScreen;
const __DRIdrawablePrivate *dPriv = intel->driDrawable;
const int nbox = dPriv->numClipRects;
const drm_clip_rect_t *pbox = dPriv->pClipRects;
drm_clip_rect_t box;
const int cpp = intelScreen->cpp;
const int pitch = intelScreen->front.pitch; /* in bytes */
int i;
GLuint CMD, BR13;
BATCH_LOCALS;
switch(cpp) {
case 2:
BR13 = (pitch) | (0xCC << 16) | (1<<24);
CMD = XY_SRC_COPY_BLT_CMD;
break;
case 4:
BR13 = (pitch) | (0xCC << 16) | (1<<24) | (1<<25);
CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
break;
default:
BR13 = (pitch) | (0xCC << 16) | (1<<24);
CMD = XY_SRC_COPY_BLT_CMD;
break;
}
if (0)
intel_draw_performance_boxes( intel );
for (i = 0 ; i < nbox; i++, pbox++)
{
if (pbox->x1 > pbox->x2 ||
pbox->y1 > pbox->y2 ||
pbox->x2 > intelScreen->width ||
pbox->y2 > intelScreen->height) {
_mesa_warning(&intel->ctx, "Bad cliprect in intelCopyBuffer()");
continue;
}
box = *pbox;
if (rect)
{
if (rect->x1 > box.x1)
box.x1 = rect->x1;
if (rect->y1 > box.y1)
box.y1 = rect->y1;
if (rect->x2 < box.x2)
box.x2 = rect->x2;
if (rect->y2 < box.y2)
box.y2 = rect->y2;
if (box.x1 > box.x2 || box.y1 > box.y2)
continue;
}
BEGIN_BATCH( 8);
OUT_BATCH( CMD );
OUT_BATCH( BR13 );
OUT_BATCH( (box.y1 << 16) | box.x1 );
OUT_BATCH( (box.y2 << 16) | box.x2 );
if (intel->sarea->pf_current_page == 0)
OUT_BATCH( intelScreen->front.offset );
else
OUT_BATCH( intelScreen->back.offset );
OUT_BATCH( (box.y1 << 16) | box.x1 );
OUT_BATCH( BR13 & 0xffff );
if (intel->sarea->pf_current_page == 0)
OUT_BATCH( intelScreen->back.offset );
else
OUT_BATCH( intelScreen->front.offset );
ADVANCE_BATCH();
}
}
intelFlushBatchLocked( intel, GL_TRUE, GL_TRUE, GL_TRUE );
UNLOCK_HARDWARE( intel );
if (!rect)
{
intel->swap_count++;
(*dri_interface->getUST)(&ust);
if (missed_target) {
intel->swap_missed_count++;
intel->swap_missed_ust = ust - intel->swap_ust;
}
intel->swap_ust = ust;
}
}
void intelEmitFillBlitLocked( intelContextPtr intel,
GLuint cpp,
GLshort dst_pitch, /* in bytes */
GLuint dst_offset,
GLshort x, GLshort y,
GLshort w, GLshort h,
GLuint color )
{
GLuint BR13, CMD;
BATCH_LOCALS;
switch(cpp) {
case 1:
case 2:
case 3:
BR13 = dst_pitch | (0xF0 << 16) | (1<<24);
CMD = XY_COLOR_BLT_CMD;
break;
case 4:
BR13 = dst_pitch | (0xF0 << 16) | (1<<24) | (1<<25);
CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
XY_COLOR_BLT_WRITE_RGB);
break;
default:
return;
((int *)batch->ptr)[0] = intel->vtbl.flush_cmd() ;
((int *)batch->ptr)[1] = MI_BATCH_BUFFER_END;
used += 8;
}
BEGIN_BATCH( 6);
OUT_BATCH( CMD );
OUT_BATCH( BR13 );
OUT_BATCH( (y << 16) | x );
OUT_BATCH( ((y+h) << 16) | (x+w) );
OUT_BATCH( dst_offset );
OUT_BATCH( color );
ADVANCE_BATCH();
}
bmUnmapBuffer(batch->bm, batch->buffer);
batch->ptr = NULL;
batch->map = NULL;
/* Copy BitBlt
*/
void intelEmitCopyBlitLocked( intelContextPtr intel,
GLuint cpp,
GLshort src_pitch,
GLuint src_offset,
GLshort dst_pitch,
GLuint dst_offset,
GLshort src_x, GLshort src_y,
GLshort dst_x, GLshort dst_y,
GLshort w, GLshort h )
{
GLuint CMD, BR13;
int dst_y2 = dst_y + h;
int dst_x2 = dst_x + w;
BATCH_LOCALS;
src_pitch *= cpp;
dst_pitch *= cpp;
switch(cpp) {
case 1:
case 2:
case 3:
BR13 = dst_pitch | (0xCC << 16) | (1<<24);
CMD = XY_SRC_COPY_BLT_CMD;
break;
case 4:
BR13 = dst_pitch | (0xCC << 16) | (1<<24) | (1<<25);
CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
break;
default:
return;
}
if (dst_y2 < dst_y ||
dst_x2 < dst_x) {
return;
}
BEGIN_BATCH( 12);
OUT_BATCH( CMD );
OUT_BATCH( BR13 );
OUT_BATCH( (dst_y << 16) | dst_x );
OUT_BATCH( (dst_y2 << 16) | dst_x2 );
OUT_BATCH( dst_offset );
OUT_BATCH( (src_y << 16) | src_x );
OUT_BATCH( src_pitch );
OUT_BATCH( src_offset );
ADVANCE_BATCH();
}
void intelClearWithBlit(GLcontext *ctx, GLbitfield flags, GLboolean all,
GLint cx1, GLint cy1, GLint cw, GLint ch)
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
intelScreenPrivate *intelScreen = intel->intelScreen;
GLuint clear_depth, clear_color;
GLint cx, cy;
GLint pitch;
GLint cpp = intelScreen->cpp;
GLint i;
GLuint BR13, CMD, D_CMD;
BATCH_LOCALS;
intelFlush( &intel->ctx );
LOCK_HARDWARE( intel );
pitch = intelScreen->front.pitch;
clear_color = intel->ClearColor;
clear_depth = 0;
if (flags & BUFFER_BIT_DEPTH) {
clear_depth = (GLuint)(ctx->Depth.Clear * intel->ClearDepth);
}
if (flags & BUFFER_BIT_STENCIL) {
clear_depth |= (ctx->Stencil.Clear & 0xff) << 24;
}
switch(cpp) {
case 2:
BR13 = (0xF0 << 16) | (pitch) | (1<<24);
D_CMD = CMD = XY_COLOR_BLT_CMD;
break;
case 4:
BR13 = (0xF0 << 16) | (pitch) | (1<<24) | (1<<25);
CMD = (XY_COLOR_BLT_CMD |
XY_COLOR_BLT_WRITE_ALPHA |
XY_COLOR_BLT_WRITE_RGB);
D_CMD = XY_COLOR_BLT_CMD;
if (flags & BUFFER_BIT_DEPTH) D_CMD |= XY_COLOR_BLT_WRITE_RGB;
if (flags & BUFFER_BIT_STENCIL) D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
break;
default:
BR13 = (0xF0 << 16) | (pitch) | (1<<24);
D_CMD = CMD = XY_COLOR_BLT_CMD;
break;
}
{
/* flip top to bottom */
cy = intel->driDrawable->h-cy1-ch;
cx = cx1 + intel->drawX;
cy += intel->drawY;
/* adjust for page flipping */
if ( intel->sarea->pf_current_page == 1 ) {
GLuint tmp = flags;
flags &= ~(BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT);
if ( tmp & BUFFER_BIT_FRONT_LEFT ) flags |= BUFFER_BIT_BACK_LEFT;
if ( tmp & BUFFER_BIT_BACK_LEFT ) flags |= BUFFER_BIT_FRONT_LEFT;
}
for (i = 0 ; i < intel->numClipRects ; i++)
{
drm_clip_rect_t *box = &intel->pClipRects[i];
drm_clip_rect_t b;
if (!all) {
GLint x = box->x1;
GLint y = box->y1;
GLint w = box->x2 - x;
GLint h = box->y2 - y;
if (x < cx) w -= cx - x, x = cx;
if (y < cy) h -= cy - y, y = cy;
if (x + w > cx + cw) w = cx + cw - x;
if (y + h > cy + ch) h = cy + ch - y;
if (w <= 0) continue;
if (h <= 0) continue;
b.x1 = x;
b.y1 = y;
b.x2 = x + w;
b.y2 = y + h;
} else {
b = *box;
}
if (b.x1 > b.x2 ||
b.y1 > b.y2 ||
b.x2 > intelScreen->width ||
b.y2 > intelScreen->height)
continue;
if ( flags & BUFFER_BIT_FRONT_LEFT ) {
BEGIN_BATCH( 6);
OUT_BATCH( CMD );
OUT_BATCH( BR13 );
OUT_BATCH( (b.y1 << 16) | b.x1 );
OUT_BATCH( (b.y2 << 16) | b.x2 );
OUT_BATCH( intelScreen->front.offset );
OUT_BATCH( clear_color );
ADVANCE_BATCH();
}
if ( flags & BUFFER_BIT_BACK_LEFT ) {
BEGIN_BATCH( 6);
OUT_BATCH( CMD );
OUT_BATCH( BR13 );
OUT_BATCH( (b.y1 << 16) | b.x1 );
OUT_BATCH( (b.y2 << 16) | b.x2 );
OUT_BATCH( intelScreen->back.offset );
OUT_BATCH( clear_color );
ADVANCE_BATCH();
}
if ( flags & (BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH) ) {
BEGIN_BATCH( 6);
OUT_BATCH( D_CMD );
OUT_BATCH( BR13 );
OUT_BATCH( (b.y1 << 16) | b.x1 );
OUT_BATCH( (b.y2 << 16) | b.x2 );
OUT_BATCH( intelScreen->depth.offset );
OUT_BATCH( clear_depth );
ADVANCE_BATCH();
}
}
}
intelFlushBatchLocked( intel, GL_TRUE, GL_FALSE, GL_TRUE );
UNLOCK_HARDWARE( intel );
}
void intelDestroyBatchBuffer( GLcontext *ctx )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
if (intel->alloc.offset) {
intelFreeAGP( intel, intel->alloc.ptr );
intel->alloc.ptr = NULL;
intel->alloc.offset = 0;
}
else if (intel->alloc.ptr) {
free(intel->alloc.ptr);
intel->alloc.ptr = NULL;
}
memset(&intel->batch, 0, sizeof(intel->batch));
}
void intelInitBatchBuffer( GLcontext *ctx )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
/* This path isn't really safe with rotate:
/* TODO: Just pass the relocation list and dma buffer up to the
* kernel.
*/
if (getenv("INTEL_BATCH") && intel->intelScreen->allow_batchbuffer) {
switch (intel->intelScreen->deviceID) {
case PCI_CHIP_I865_G:
/* HW bug? Seems to crash if batchbuffer crosses 4k boundary.
*/
intel->alloc.size = 8 * 1024;
break;
default:
/* This is the smallest amount of memory the kernel deals with.
* We'd ideally like to make this smaller.
*/
intel->alloc.size = 1 << intel->intelScreen->logTextureGranularity;
break;
}
if (!intel->locked)
{
assert(!(batch->flags & INTEL_BATCH_NO_CLIPRECTS));
intel->alloc.ptr = intelAllocateAGP( intel, intel->alloc.size );
if (intel->alloc.ptr)
intel->alloc.offset =
intelAgpOffsetFromVirtual( intel, intel->alloc.ptr );
else
intel->alloc.offset = 0; /* OK? */
LOCK_HARDWARE(intel);
do_flush_locked(batch, used, GL_FALSE, GL_TRUE);
UNLOCK_HARDWARE(intel);
}
else {
GLboolean ignore_cliprects = !(batch->flags & INTEL_BATCH_CLIPRECTS);
do_flush_locked(batch, used, ignore_cliprects, GL_FALSE);
}
/* The default is now to use a local buffer and pass that to the
* kernel. This is also a fallback if allocation fails on the
* above path:
/* Reset the buffer:
*/
if (!intel->alloc.ptr) {
intel->alloc.size = 8 * 1024;
intel->alloc.ptr = malloc( intel->alloc.size );
intel->alloc.offset = 0;
intel_batchbuffer_reset( batch );
return batch->last_fence;
}
void intel_batchbuffer_finish( struct intel_batchbuffer *batch )
{
bmFinishFence(batch->bm,
intel_batchbuffer_flush(batch));
}
/* This is the only way buffers get added to the validate list.
*/
GLboolean intel_batchbuffer_emit_reloc( struct intel_batchbuffer *batch,
GLuint buffer,
GLuint flags,
GLuint delta )
{
GLuint i;
assert(batch->nr_relocs <= MAX_RELOCS);
i = bmScanBufferList(batch->bm, batch->list, buffer);
if (i == -1) {
i = batch->list_count;
bmAddBuffer(batch->bm,
batch->list,
buffer,
flags,
NULL,
&batch->offset[batch->list_count++]);
}
assert(intel->alloc.ptr);
{
struct buffer_reloc *r = &batch->reloc[batch->nr_relocs++];
r->offset = batch->ptr - batch->map;
r->delta = delta;
r->elem = i;
}
batch->ptr += 4;
return GL_TRUE;
}
void intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data,
GLuint bytes,
GLuint flags)
{
assert((bytes & 3) == 0);
intel_batchbuffer_require_space(batch, bytes, flags);
__memcpy(batch->ptr, data, bytes);
batch->ptr += bytes;
}

View file

@ -1,126 +1,126 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_BATCHBUFFER_H
#define INTEL_BATCHBUFFER_H
#include "intel_context.h"
#include "intel_ioctl.h"
#include "mtypes.h"
#include "intel_bufmgr.h"
struct intel_context;
#define BATCH_SZ 4096
#define BATCH_RESERVED 16
#define MAX_RELOCS 100
#define INTEL_BATCH_NO_CLIPRECTS 0x1
#define INTEL_BATCH_CLIPRECTS 0x2
struct buffer_reloc {
GLuint offset;
GLuint elem; /* elem in buffer list, not buffer id */
GLuint delta; /* not needed? */
};
struct intel_batchbuffer {
struct bufmgr *bm;
struct intel_context *intel;
GLuint buffer;
GLuint last_fence;
GLuint flags;
/* In progress:
*/
unsigned long offset[MAX_RELOCS];
struct _drmMMBufList *list;
GLuint list_count;
GLubyte *map;
GLubyte *ptr;
struct buffer_reloc reloc[MAX_RELOCS];
GLuint nr_relocs;
};
struct intel_batchbuffer *intel_batchbuffer_alloc( struct intel_context *intel );
void intel_batchbuffer_free( struct intel_batchbuffer *batch );
#define BATCH_LOCALS GLubyte *batch_ptr;
void intel_batchbuffer_finish( struct intel_batchbuffer *batch );
/* #define VERBOSE 0 */
#ifndef VERBOSE
extern int VERBOSE;
#endif
GLuint intel_batchbuffer_flush( struct intel_batchbuffer *batch );
void intel_batchbuffer_reset( struct intel_batchbuffer *batch );
#define BEGIN_BATCH(n) \
do { \
if (VERBOSE) fprintf(stderr, \
"BEGIN_BATCH(%ld) in %s, %d dwords free\n", \
((unsigned long)n), __FUNCTION__, \
intel->batch.space/4); \
if (intel->batch.space < (n)*4) \
intelFlushBatch(intel, GL_TRUE); \
if (intel->batch.space == intel->batch.size) intel->batch.func = __FUNCTION__; \
batch_ptr = intel->batch.ptr; \
} while (0)
/* Unlike bmBufferData, this currently requires the buffer be mapped.
* Consider it a convenience function wrapping multple
* intel_buffer_dword() calls.
*/
void intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data,
GLuint bytes,
GLuint flags);
#define OUT_BATCH(n) \
do { \
*(GLuint *)batch_ptr = (n); \
if (VERBOSE) fprintf(stderr, " -- %08x at %s/%d\n", (n), __FILE__, __LINE__); \
batch_ptr += 4; \
} while (0)
void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
GLuint bytes);
#define ADVANCE_BATCH() \
do { \
if (VERBOSE) fprintf(stderr, "ADVANCE_BATCH()\n"); \
intel->batch.space -= (batch_ptr - intel->batch.ptr); \
intel->batch.ptr = batch_ptr; \
assert(intel->batch.space >= 0); \
} while(0)
GLboolean intel_batchbuffer_emit_reloc( struct intel_batchbuffer *batch,
GLuint buffer,
GLuint flags,
GLuint offset );
extern void intelInitBatchBuffer( GLcontext *ctx );
extern void intelDestroyBatchBuffer( GLcontext *ctx );
extern void intelStartInlinePrimitive( intelContextPtr intel, GLuint prim );
extern void intelWrapInlinePrimitive( intelContextPtr intel );
extern void intelRestartInlinePrimitive( intelContextPtr intel );
extern GLuint *intelEmitInlinePrimitiveLocked(intelContextPtr intel,
int primitive, int dwords,
int vertex_size);
extern void intelCopyBuffer( const __DRIdrawablePrivate *dpriv,
const drm_clip_rect_t *rect);
extern void intelClearWithBlit(GLcontext *ctx, GLbitfield mask, GLboolean all,
GLint cx1, GLint cy1, GLint cw, GLint ch);
extern void intelEmitCopyBlitLocked( intelContextPtr intel,
GLuint cpp,
GLshort src_pitch,
GLuint src_offset,
GLshort dst_pitch,
GLuint dst_offset,
GLshort srcx, GLshort srcy,
GLshort dstx, GLshort dsty,
GLshort w, GLshort h );
extern void intelEmitFillBlitLocked( intelContextPtr intel,
GLuint cpp,
GLshort dst_pitch,
GLuint dst_offset,
GLshort x, GLshort y,
GLshort w, GLshort h,
GLuint color );
static __inline GLuint *intelExtendInlinePrimitive( intelContextPtr intel,
GLuint dwords )
/* Inline functions - might actually be better off with these
* non-inlined. Certainly better off switching all command packets to
* be passed as structs rather than dwords, but that's a little bit of
* work...
*/
static INLINE GLuint
intel_batchbuffer_space( struct intel_batchbuffer *batch )
{
GLuint sz = dwords * sizeof(GLuint);
GLuint *ptr;
if (intel->batch.space < sz) {
intelWrapInlinePrimitive( intel );
/* assert(intel->batch.space >= sz); */
}
/* assert(intel->prim.primitive != ~0); */
ptr = (GLuint *)intel->batch.ptr;
intel->batch.ptr += sz;
intel->batch.space -= sz;
return ptr;
return (BATCH_SZ - BATCH_RESERVED) - (batch->ptr - batch->map);
}
static INLINE void
intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch,
GLuint dword)
{
assert(batch->map);
assert(intel_batchbuffer_space(batch) >= 4);
*(GLuint *)(batch->ptr) = dword;
batch->ptr += 4;
}
static INLINE void
intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
GLuint sz,
GLuint flags)
{
assert(sz < BATCH_SZ - 8);
if (intel_batchbuffer_space(batch) < sz ||
(batch->flags != 0 && flags != 0 && batch->flags != flags))
intel_batchbuffer_flush(batch);
batch->flags |= flags;
}
/* Here are the crusty old macros, to be removed:
*/
#define BATCH_LOCALS
#define BEGIN_BATCH(n, flags) do { \
assert(!intel->prim.flush); \
intel_batchbuffer_require_space(intel->batch, (n)*4, flags); \
} while (0)
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
#define OUT_RELOC(buf,flags,delta) do { \
assert((delta) >= 0); \
intel_batchbuffer_emit_reloc(intel->batch, buf, flags, delta); \
} while (0)
#define ADVANCE_BATCH() do { } while(0)
#endif

View file

@ -48,157 +48,169 @@
#define DV_PF_565 (2<<8)
#define DV_PF_8888 (3<<8)
#define INTEL_CONTEXT(ctx) ((intelContextPtr)(ctx))
struct intel_region;
struct intel_context;
typedef struct intel_context intelContext;
typedef struct intel_context *intelContextPtr;
typedef struct intel_texture_object *intelTextureObjectPtr;
typedef void (*intel_tri_func)(intelContextPtr, intelVertex *, intelVertex *,
typedef void (*intel_tri_func)(struct intel_context *, intelVertex *, intelVertex *,
intelVertex *);
typedef void (*intel_line_func)(intelContextPtr, intelVertex *, intelVertex *);
typedef void (*intel_point_func)(intelContextPtr, intelVertex *);
typedef void (*intel_line_func)(struct intel_context *, intelVertex *, intelVertex *);
typedef void (*intel_point_func)(struct intel_context *, intelVertex *);
#define INTEL_FALLBACK_DRAW_BUFFER 0x1
#define INTEL_FALLBACK_READ_BUFFER 0x2
#define INTEL_FALLBACK_USER 0x4
#define INTEL_FALLBACK_NO_BATCHBUFFER 0x8
#define INTEL_FALLBACK_NO_TEXMEM 0x10
#define INTEL_FALLBACK_DEPTH_BUFFER 0x4
#define INTEL_FALLBACK_STENCIL_BUFFER 0x8
#define INTEL_FALLBACK_USER 0x10
#define INTEL_FALLBACK_RENDERMODE 0x20
extern void intelFallback( intelContextPtr intel, GLuint bit, GLboolean mode );
extern void intelFallback( struct intel_context *intel, GLuint bit, GLboolean mode );
#define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
#define INTEL_TEX_MAXLEVELS 10
struct intel_texture_object
{
driTextureObject base; /* the parent class */
struct gl_texture_object base; /* The "parent" object */
GLuint texelBytes;
GLuint age;
GLuint Pitch;
GLuint Height;
GLuint TextureOffset;
GLubyte *BufAddr;
/* The mipmap tree must include at least these levels once
* validated:
*/
GLuint firstLevel;
GLuint lastLevel;
GLuint min_level;
GLuint max_level;
GLuint depth_pitch;
/* Offset for firstLevel image:
*/
GLuint textureOffset;
struct {
const struct gl_texture_image *image;
GLuint offset; /* into BufAddr */
GLuint height;
GLuint internalFormat;
} image[6][INTEL_TEX_MAXLEVELS];
GLuint dirty;
GLuint firstLevel,lastLevel;
/* On validation any active images held in main memory or in other
* regions will be copied to this region and the old storage freed.
*/
struct intel_mipmap_tree *mt;
};
struct intel_texture_image
{
struct gl_texture_image base;
/* These aren't stored in gl_texture_image
*/
GLuint level;
GLuint face;
/* If intelImage->mt != NULL, image data is stored here.
* Else if intelImage->base.Data != NULL, image is stored there.
* Else there is no image data.
*/
struct intel_mipmap_tree *mt;
};
#define INTEL_MAX_FIXUP 64
struct intel_context
{
GLcontext ctx; /* the parent class */
struct {
void (*destroy)( intelContextPtr intel );
void (*emit_state)( intelContextPtr intel );
void (*emit_invarient_state)( intelContextPtr intel );
void (*lost_hardware)( intelContextPtr intel );
void (*update_texture_state)( intelContextPtr intel );
void (*destroy)( struct intel_context *intel );
void (*emit_state)( struct intel_context *intel );
void (*lost_hardware)( struct intel_context *intel );
void (*update_texture_state)( struct intel_context *intel );
void (*render_start)( intelContextPtr intel );
void (*set_color_region)( intelContextPtr intel, const intelRegion *reg );
void (*set_z_region)( intelContextPtr intel, const intelRegion *reg );
void (*update_color_z_regions)(intelContextPtr intel,
const intelRegion *colorRegion,
const intelRegion *depthRegion);
void (*emit_flush)( intelContextPtr intel );
void (*reduced_primitive_state)( intelContextPtr intel, GLenum rprim );
void (*render_start)( struct intel_context *intel );
void (*set_draw_region)( struct intel_context *intel,
struct intel_region *draw_region,
struct intel_region *depth_region );
GLboolean (*check_vertex_size)( intelContextPtr intel, GLuint expected );
GLuint (*flush_cmd)( void );
void (*clear_with_tris)( intelContextPtr intel, GLbitfield mask,
GLboolean all,
GLint cx, GLint cy, GLint cw, GLint ch);
void (*reduced_primitive_state)( struct intel_context *intel, GLenum rprim );
void (*rotate_window)( intelContextPtr intel,
GLboolean (*check_vertex_size)( struct intel_context *intel, GLuint expected );
/* Metaops:
*/
void (*install_meta_state)( struct intel_context *intel );
void (*leave_meta_state)( struct intel_context *intel );
void (*meta_draw_region)( struct intel_context *intel,
struct intel_region *draw_region,
struct intel_region *depth_region );
void (*meta_color_mask)( struct intel_context *intel,
GLboolean );
void (*meta_stencil_replace)( struct intel_context *intel,
GLuint mask,
GLuint clear );
void (*meta_depth_replace)( struct intel_context *intel );
void (*meta_texture_blend_replace)( struct intel_context *intel );
void (*meta_no_stencil_write)( struct intel_context *intel );
void (*meta_no_depth_write)( struct intel_context *intel );
void (*meta_no_texture)( struct intel_context *intel );
void (*meta_import_pixel_state)( struct intel_context *intel );
GLboolean (*meta_tex_rect_source)( struct intel_context *intel,
GLuint buffer,
GLuint offset,
GLuint pitch,
GLuint height,
GLenum format,
GLenum type);
void (*rotate_window)( struct intel_context *intel,
__DRIdrawablePrivate *dPriv, GLuint srcBuf);
intelTextureObjectPtr (*alloc_tex_obj)( struct gl_texture_object *tObj );
} vtbl;
GLint refcount;
GLuint Fallback;
GLuint NewGLState;
struct {
GLuint start_offset;
GLint size;
GLint space;
GLubyte *ptr;
GLuint counter;
GLuint last_emit_state;
GLboolean contains_geometry;
const char *func;
GLuint last_swap;
} batch;
struct {
void *ptr;
GLint size;
GLuint offset;
GLuint active_buf;
GLuint irq_emitted;
} alloc;
GLuint last_fence;
GLuint last_swap_fence;
GLboolean last_swap_fence_retired;
struct intel_batchbuffer *batch;
struct {
GLuint id;
GLuint primitive;
GLubyte *start_ptr;
void (*flush)( GLcontext * );
void (*flush)( struct intel_context * );
} prim;
GLboolean locked;
char *prevLockFile;
int prevLockLine;
GLubyte clear_red;
GLubyte clear_green;
GLubyte clear_blue;
GLubyte clear_alpha;
GLuint ClearColor;
GLuint ClearDepth;
GLuint ClearColor565;
GLuint ClearColor8888;
/* Offsets of fields within the current vertex:
*/
GLuint coloroffset;
GLuint specoffset;
/* Support for duplicating XYZW as WPOS parameter (crutch for I915).
*/
GLuint wpos_offset;
GLuint wpos_size;
struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
GLuint vertex_attr_count;
GLfloat depth_scale;
GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
GLuint depth_clear_mask;
GLuint stencil_clear_mask;
GLboolean hw_stencil;
GLboolean hw_stipple;
/* Texture object bookkeeping
/* AGP memory buffer manager:
*/
GLuint nr_heaps;
driTexHeap * texture_heaps[1];
driTextureObject swapped;
GLuint lastStamp;
struct bufmgr *bm;
struct intel_texture_object *CurrentTexObj[MAX_TEXTURE_UNITS];
/* State for intelvb.c and inteltris.c.
*/
@ -207,7 +219,14 @@ struct intel_context
GLenum render_primitive;
GLenum reduced_primitive;
GLuint vertex_size;
unsigned char *verts; /* points to tnl->clipspace.vertex_buf */
GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
struct intel_region *front_region; /* XXX FBO: obsolete */
struct intel_region *rotated_region; /* XXX FBO: obsolete */
struct intel_region *back_region; /* XXX FBO: obsolete */
struct intel_region *draw_region; /* XXX FBO: rename to color_region */
struct intel_region *depth_region; /**< currently bound depth/Z region */
/* Fallback rasterization functions
@ -216,17 +235,13 @@ struct intel_context
intel_line_func draw_line;
intel_tri_func draw_tri;
/* Drawing buffer state
/* These refer to the current drawing buffer:
*/
intelRegion *drawRegion; /* current drawing buffer */
intelRegion *readRegion; /* current reading buffer */
int drawX; /* origin of drawable in draw buffer */
int drawY;
GLuint numClipRects; /* cliprects for that buffer */
int drawX, drawY; /**< origin of drawing area within region */
GLuint numClipRects; /**< cliprects for drawing */
drm_clip_rect_t *pClipRects;
drm_clip_rect_t fboRect; /**< cliprect for FBO rendering */
int dirtyAge;
int perf_boxes;
GLuint do_usleeps;
@ -234,10 +249,6 @@ struct intel_context
GLuint irqsEmitted;
drm_i915_irq_wait_t iw;
GLboolean scissor;
drm_clip_rect_t draw_rect;
drm_clip_rect_t scissor_rect;
drm_context_t hHWContext;
drmLock *driHwLock;
int driFd;
@ -246,6 +257,8 @@ struct intel_context
__DRIscreenPrivate *driScreen;
intelScreenPrivate *intelScreen;
drmI830Sarea *sarea;
GLuint lastStamp;
/**
* Configuration cache
@ -265,22 +278,20 @@ struct intel_context
};
#define DEBUG_LOCKING 1
#define DEBUG_LOCKING 1
#if DEBUG_LOCKING
extern char *prevLockFile;
extern int prevLockLine;
#define DEBUG_LOCK() \
do { \
prevLockFile = (__FILE__); \
prevLockLine = (__LINE__); \
intel->prevLockFile = (__FILE__); \
intel->prevLockLine = (__LINE__); \
} while (0)
#define DEBUG_RESET() \
do { \
prevLockFile = 0; \
prevLockLine = 0; \
intel->prevLockFile = 0; \
intel->prevLockLine = 0; \
} while (0)
/* Slightly less broken way of detecting recursive locking in a
@ -299,7 +310,8 @@ extern int prevLockLine;
(DRM_LOCK_HELD | intel->hHWContext) ) { \
fprintf( stderr, \
"LOCK SET!\n\tPrevious %s:%d\n\tCurrent: %s:%d\n", \
prevLockFile, prevLockLine, __FILE__, __LINE__ ); \
intel->prevLockFile, intel->prevLockLine, \
__FILE__, __LINE__ ); \
abort(); \
} \
} while (0)
@ -312,7 +324,7 @@ extern int prevLockLine;
#endif
extern _glthread_Mutex lockMutex;
/* Lock the hardware and validate our state.
@ -320,13 +332,14 @@ extern int prevLockLine;
#define LOCK_HARDWARE( intel ) \
do { \
char __ret=0; \
_glthread_LOCK_MUTEX(lockMutex); \
DEBUG_CHECK_LOCK(); \
assert(!(intel)->locked); \
DRM_CAS((intel)->driHwLock, (intel)->hHWContext, \
(DRM_LOCK_HELD|(intel)->hHWContext), __ret); \
if (__ret) \
intelGetLock( (intel), 0 ); \
DEBUG_LOCK(); \
DEBUG_LOCK(); \
(intel)->locked = 1; \
}while (0)
@ -342,6 +355,7 @@ do { \
} \
DRM_UNLOCK((intel)->driFd, (intel)->driHwLock, (intel)->hHWContext); \
DEBUG_RESET(); \
_glthread_UNLOCK_MUTEX(lockMutex); \
} while (0)
@ -350,8 +364,7 @@ do { \
#define INTEL_FIREVERTICES(intel) \
do { \
if ((intel)->prim.flush) \
(intel)->prim.flush(&(intel)->ctx); \
assert(!(intel)->prim.flush); \
} while (0)
/* ================================================================
@ -372,20 +385,14 @@ do { \
((a<<24) | (r<<16) | (g<<8) | b)
#define INTEL_PACKCOLOR(format, r, g, b, a) \
(format == DV_PF_555 ? INTEL_PACKCOLOR1555(r,g,b,a) : \
(format == DV_PF_565 ? INTEL_PACKCOLOR565(r,g,b) : \
(format == DV_PF_8888 ? INTEL_PACKCOLOR8888(r,g,b,a) : \
0)))
/* ================================================================
* From linux kernel i386 header files, copes with odd sizes better
* than COPY_DWORDS would:
* XXX Put this in src/mesa/main/imports.h ???
*/
#if defined(i386) || defined(__i386__)
static __inline__ void * __memcpy(void * to, const void * from, size_t n)
static INLINE void * __memcpy(void * to, const void * from, size_t n)
{
int d0, d1, d2;
__asm__ __volatile__(
@ -447,21 +454,19 @@ extern int INTEL_DEBUG;
* intel_context.c:
*/
extern void intelInitDriverFunctions( struct dd_function_table *functions );
extern GLboolean intelInitContext( intelContextPtr intel,
extern GLboolean intelInitContext( struct intel_context *intel,
const __GLcontextModes *mesaVis,
__DRIcontextPrivate *driContextPriv,
void *sharedContextPrivate,
struct dd_function_table *functions );
extern void intelGetLock(intelContextPtr intel, GLuint flags);
extern void intelSetBackClipRects(intelContextPtr intel);
extern void intelSetFrontClipRects(intelContextPtr intel);
extern void intelWindowMoved( intelContextPtr intel );
extern void intelGetLock(struct intel_context *intel, GLuint flags);
extern void intelInitState( GLcontext *ctx );
extern const GLubyte *intelGetString( GLcontext *ctx, GLenum name );
extern void intelFinish( GLcontext *ctx );
extern void intelFlush( GLcontext *ctx );
extern void intelInitDriverFunctions( struct dd_function_table *functions );
/* ================================================================
@ -521,6 +526,8 @@ extern void intelInitStateFuncs( struct dd_function_table *functions );
#define BLENDFACT_INV_CONST_ALPHA 0x0f
#define BLENDFACT_MASK 0x0f
#define MI_BATCH_BUFFER_END (0xA<<23)
extern int intel_translate_compare_func( GLenum func );
extern int intel_translate_stencil_op( GLenum op );
@ -528,19 +535,26 @@ extern int intel_translate_blend_factor( GLenum factor );
extern int intel_translate_logic_op( GLenum opcode );
/* ================================================================
* intel_ioctl.c:
/*======================================================================
* Inline conversion functions.
* These are better-typed than the macros used previously:
*/
extern void intel_dump_batchbuffer( long offset,
int *ptr,
int count );
static INLINE struct intel_context *intel_context( GLcontext *ctx )
{
return (struct intel_context *)ctx;
}
static INLINE struct intel_texture_object *intel_texture_object( struct gl_texture_object *obj )
{
return (struct intel_texture_object *)obj;
}
/* ================================================================
* intel_pixel.c:
*/
extern void intelInitPixelFuncs( struct dd_function_table *functions );
static INLINE struct intel_texture_image *intel_texture_image( struct gl_texture_image *img )
{
return (struct intel_texture_image *)img;
}
extern struct intel_renderbuffer *intel_renderbuffer( struct gl_renderbuffer *rb );
#endif

View file

@ -38,22 +38,12 @@
#include "intel_context.h"
#include "intel_ioctl.h"
#include "intel_batchbuffer.h"
#include "intel_blit.h"
#include "intel_regions.h"
#include "drm.h"
#include "intel_bufmgr.h"
u_int32_t intelGetLastFrame (intelContextPtr intel)
{
int ret;
u_int32_t frame;
drm_i915_getparam_t gp;
gp.param = I915_PARAM_LAST_DISPATCH;
gp.value = (int *)&frame;
ret = drmCommandWriteRead( intel->driFd, DRM_I915_GETPARAM,
&gp, sizeof(gp) );
return frame;
}
int intelEmitIrqLocked( intelContextPtr intel )
int intelEmitIrqLocked( struct intel_context *intel )
{
drmI830IrqEmit ie;
int ret, seq;
@ -62,21 +52,21 @@ int intelEmitIrqLocked( intelContextPtr intel )
(DRM_LOCK_HELD|intel->hHWContext));
ie.irq_seq = &seq;
ret = drmCommandWriteRead( intel->driFd, DRM_I830_IRQ_EMIT,
&ie, sizeof(ie) );
if ( ret ) {
fprintf( stderr, "%s: drmI830IrqEmit: %d\n", __FUNCTION__, ret );
exit(1);
}
}
if (0)
fprintf(stderr, "%s --> %d\n", __FUNCTION__, seq );
return seq;
}
void intelWaitIrq( intelContextPtr intel, int seq )
void intelWaitIrq( struct intel_context *intel, int seq )
{
int ret;
@ -91,570 +81,69 @@ void intelWaitIrq( intelContextPtr intel, int seq )
if ( ret ) {
fprintf( stderr, "%s: drmI830IrqWait: %d\n", __FUNCTION__, ret );
if (0)
intel_dump_batchbuffer( intel->alloc.offset,
intel->alloc.ptr,
intel->alloc.size );
exit(1);
}
}
static void age_intel( intelContextPtr intel, int age )
{
GLuint i;
for (i = 0 ; i < MAX_TEXTURE_UNITS ; i++)
if (intel->CurrentTexObj[i])
intel->CurrentTexObj[i]->age = age;
}
void intel_dump_batchbuffer( long offset,
int *ptr,
int count )
{
int i;
fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count);
for (i = 0; i < count/4; i += 4)
fprintf(stderr, "\t0x%x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
(unsigned int)offset + i*4, ptr[i], ptr[i+1], ptr[i+2], ptr[i+3]);
fprintf(stderr, "END BATCH\n\n\n");
}
void intelRefillBatchLocked( intelContextPtr intel, GLboolean allow_unlock )
{
GLuint last_irq = intel->alloc.irq_emitted;
GLuint half = intel->alloc.size / 2;
GLuint buf = (intel->alloc.active_buf ^= 1);
intel->alloc.irq_emitted = intelEmitIrqLocked( intel );
if (last_irq) {
if (allow_unlock) UNLOCK_HARDWARE( intel );
intelWaitIrq( intel, last_irq );
if (allow_unlock) LOCK_HARDWARE( intel );
}
if (0)
fprintf(stderr, "%s: now using half %d\n", __FUNCTION__, buf);
intel->batch.start_offset = intel->alloc.offset + buf * half;
intel->batch.ptr = (unsigned char *)intel->alloc.ptr + buf * half;
intel->batch.size = half - 8;
intel->batch.space = half - 8;
assert(intel->batch.space >= 0);
}
#define MI_BATCH_BUFFER_END (0xA<<23)
void intelFlushBatchLocked( intelContextPtr intel,
GLboolean ignore_cliprects,
GLboolean refill,
GLboolean allow_unlock)
void intel_batch_ioctl( struct intel_context *intel,
GLuint start_offset,
GLuint used,
GLboolean ignore_cliprects,
GLboolean allow_unlock)
{
drmI830BatchBuffer batch;
assert(intel->locked);
assert(used);
if (0)
fprintf(stderr, "%s used %d of %d offset %x..%x refill %d (started in %s)\n",
fprintf(stderr, "%s used %d offset %x..%x ignore_cliprects %d\n",
__FUNCTION__,
(intel->batch.size - intel->batch.space),
intel->batch.size,
intel->batch.start_offset,
intel->batch.start_offset +
(intel->batch.size - intel->batch.space),
refill,
intel->batch.func);
used,
start_offset,
start_offset + used,
ignore_cliprects);
/* Throw away non-effective packets. Won't work once we have
* hardware contexts which would preserve statechanges beyond a
* single buffer.
*/
if (intel->numClipRects == 0 && !ignore_cliprects) {
/* Without this yeild, an application with no cliprects can hog
* the hardware. Without unlocking, the effect is much worse -
* effectively a lock-out of other contexts.
*/
if (allow_unlock) {
UNLOCK_HARDWARE( intel );
UNLOCK_HARDWARE(intel);
sched_yield();
LOCK_HARDWARE( intel );
LOCK_HARDWARE(intel);
}
/* Note that any state thought to have been emitted actually
* hasn't:
*/
intel->batch.ptr -= (intel->batch.size - intel->batch.space);
intel->batch.space = intel->batch.size;
intel->vtbl.lost_hardware( intel );
}
if (intel->batch.space != intel->batch.size) {
if (intel->sarea->ctxOwner != intel->hHWContext) {
intel->perf_boxes |= I830_BOX_LOST_CONTEXT;
intel->sarea->ctxOwner = intel->hHWContext;
}
batch.start = intel->batch.start_offset;
batch.used = intel->batch.size - intel->batch.space;
batch.cliprects = intel->pClipRects;
batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
batch.DR1 = 0;
batch.DR4 = ((((GLuint)intel->drawX) & 0xffff) |
(((GLuint)intel->drawY) << 16));
if (intel->alloc.offset) {
if ((batch.used & 0x4) == 0) {
((int *)intel->batch.ptr)[0] = 0;
((int *)intel->batch.ptr)[1] = MI_BATCH_BUFFER_END;
batch.used += 0x8;
intel->batch.ptr += 0x8;
}
else {
((int *)intel->batch.ptr)[0] = MI_BATCH_BUFFER_END;
batch.used += 0x4;
intel->batch.ptr += 0x4;
}
}
if (0)
intel_dump_batchbuffer( batch.start,
(int *)(intel->batch.ptr - batch.used),
batch.used );
intel->batch.start_offset += batch.used;
intel->batch.size -= batch.used;
if (intel->batch.size < 8) {
refill = GL_TRUE;
intel->batch.space = intel->batch.size = 0;
}
else {
intel->batch.size -= 8;
intel->batch.space = intel->batch.size;
}
assert(intel->batch.space >= 0);
assert(batch.start >= intel->alloc.offset);
assert(batch.start < intel->alloc.offset + intel->alloc.size);
assert(batch.start + batch.used > intel->alloc.offset);
assert(batch.start + batch.used <=
intel->alloc.offset + intel->alloc.size);
if (intel->alloc.offset) {
if (drmCommandWrite (intel->driFd, DRM_I830_BATCHBUFFER, &batch,
sizeof(batch))) {
fprintf(stderr, "DRM_I830_BATCHBUFFER: %d\n", -errno);
UNLOCK_HARDWARE(intel);
exit(1);
}
} else {
drmI830CmdBuffer cmd;
cmd.buf = (char *)intel->alloc.ptr + batch.start;
cmd.sz = batch.used;
cmd.DR1 = batch.DR1;
cmd.DR4 = batch.DR4;
cmd.num_cliprects = batch.num_cliprects;
cmd.cliprects = batch.cliprects;
if (drmCommandWrite (intel->driFd, DRM_I830_CMDBUFFER, &cmd,
sizeof(cmd))) {
fprintf(stderr, "DRM_I830_CMDBUFFER: %d\n", -errno);
UNLOCK_HARDWARE(intel);
exit(1);
}
}
age_intel(intel, intel->sarea->last_enqueue);
/* FIXME: use hardware contexts to avoid 'losing' hardware after
* each buffer flush.
*/
if (intel->batch.contains_geometry)
assert(intel->batch.last_emit_state == intel->batch.counter);
intel->batch.counter++;
intel->batch.contains_geometry = 0;
intel->batch.func = 0;
intel->vtbl.lost_hardware( intel );
}
if (refill)
intelRefillBatchLocked( intel, allow_unlock );
}
void intelFlushBatch( intelContextPtr intel, GLboolean refill )
{
if (intel->locked) {
intelFlushBatchLocked( intel, GL_FALSE, refill, GL_FALSE );
}
else {
LOCK_HARDWARE(intel);
intelFlushBatchLocked( intel, GL_FALSE, refill, GL_TRUE );
UNLOCK_HARDWARE(intel);
}
}
void intelWaitForIdle( intelContextPtr intel )
{
if (0)
fprintf(stderr, "%s\n", __FUNCTION__);
intel->vtbl.emit_flush( intel );
intelFlushBatch( intel, GL_TRUE );
/* Use an irq to wait for dma idle -- Need to track lost contexts
* to shortcircuit consecutive calls to this function:
*/
intelWaitIrq( intel, intel->alloc.irq_emitted );
intel->alloc.irq_emitted = 0;
}
/**
* Check if we need to rotate/warp the front color buffer to the
* rotated screen. We generally need to do this when we get a glFlush
* or glFinish after drawing to the front color buffer.
*/
static void
intelCheckFrontRotate(GLcontext *ctx)
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
if (intel->ctx.DrawBuffer->_ColorDrawBufferMask[0] == BUFFER_BIT_FRONT_LEFT) {
intelScreenPrivate *screen = intel->intelScreen;
if (screen->current_rotation != 0) {
__DRIdrawablePrivate *dPriv = intel->driDrawable;
intelRotateWindow(intel, dPriv, BUFFER_BIT_FRONT_LEFT);
}
}
}
/**
* NOT directly called via glFlush.
*/
void intelFlush( GLcontext *ctx )
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
if (intel->Fallback)
_swrast_flush( ctx );
INTEL_FIREVERTICES( intel );
if (intel->batch.size != intel->batch.space)
intelFlushBatch( intel, GL_FALSE );
}
/**
* Called via glFlush.
*/
void intelglFlush( GLcontext *ctx )
{
intelFlush(ctx);
intelCheckFrontRotate(ctx);
}
void intelFinish( GLcontext *ctx )
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
intelFlush( ctx );
intelWaitForIdle( intel );
intelCheckFrontRotate(ctx);
}
void intelClear(GLcontext *ctx, GLbitfield mask, GLboolean all,
GLint cx, GLint cy, GLint cw, GLint ch)
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
const GLuint colorMask = *((GLuint *) &ctx->Color.ColorMask);
GLbitfield tri_mask = 0;
GLbitfield blit_mask = 0;
GLbitfield swrast_mask = 0;
if (0)
fprintf(stderr, "%s\n", __FUNCTION__);
/* Take care of cliprects, which are handled differently for
* clears, etc.
*/
intelFlush( &intel->ctx );
if (mask & BUFFER_BIT_FRONT_LEFT) {
if (colorMask == ~0) {
blit_mask |= BUFFER_BIT_FRONT_LEFT;
}
else {
tri_mask |= BUFFER_BIT_FRONT_LEFT;
}
}
if (mask & BUFFER_BIT_BACK_LEFT) {
if (colorMask == ~0) {
blit_mask |= BUFFER_BIT_BACK_LEFT;
}
else {
tri_mask |= BUFFER_BIT_BACK_LEFT;
}
}
if (mask & BUFFER_BIT_DEPTH) {
blit_mask |= BUFFER_BIT_DEPTH;
}
if (mask & BUFFER_BIT_STENCIL) {
if (!intel->hw_stencil) {
swrast_mask |= BUFFER_BIT_STENCIL;
}
else if ((ctx->Stencil.WriteMask[0] & 0xff) != 0xff) {
tri_mask |= BUFFER_BIT_STENCIL;
}
else {
blit_mask |= BUFFER_BIT_STENCIL;
}
}
swrast_mask |= (mask & BUFFER_BIT_ACCUM);
if (blit_mask)
intelClearWithBlit( ctx, blit_mask, all, cx, cy, cw, ch );
if (tri_mask)
intel->vtbl.clear_with_tris( intel, tri_mask, all, cx, cy, cw, ch);
if (swrast_mask)
_swrast_Clear( ctx, swrast_mask, all, cx, cy, cw, ch );
}
void
intelRotateWindow(intelContextPtr intel, __DRIdrawablePrivate *dPriv,
GLuint srcBuffer)
{
if (intel->vtbl.rotate_window) {
intel->vtbl.rotate_window(intel, dPriv, srcBuffer);
}
}
void *intelAllocateAGP( intelContextPtr intel, GLsizei size )
{
int region_offset;
drmI830MemAlloc alloc;
int ret;
if (0)
fprintf(stderr, "%s: %d bytes\n", __FUNCTION__, size);
alloc.region = I830_MEM_REGION_AGP;
alloc.alignment = 0;
alloc.size = size;
alloc.region_offset = &region_offset;
LOCK_HARDWARE(intel);
/* Make sure the global heap is initialized
*/
if (intel->texture_heaps[0])
driAgeTextures( intel->texture_heaps[0] );
ret = drmCommandWriteRead( intel->driFd,
DRM_I830_ALLOC,
&alloc, sizeof(alloc));
if (ret) {
fprintf(stderr, "%s: DRM_I830_ALLOC ret %d\n", __FUNCTION__, ret);
UNLOCK_HARDWARE(intel);
return NULL;
}
if (0)
fprintf(stderr, "%s: allocated %d bytes\n", __FUNCTION__, size);
/* Need to propogate this information (agp memory in use) to our
* local texture lru. The kernel has already updated the global
* lru. An alternative would have been to allocate memory the
* usual way and then notify the kernel to pin the allocation.
*/
if (intel->texture_heaps[0])
driAgeTextures( intel->texture_heaps[0] );
UNLOCK_HARDWARE(intel);
return (void *)((char *)intel->intelScreen->tex.map + region_offset);
}
void intelFreeAGP( intelContextPtr intel, void *pointer )
{
int region_offset;
drmI830MemFree memfree;
int ret;
region_offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
if (region_offset < 0 ||
region_offset > intel->intelScreen->tex.size) {
fprintf(stderr, "offset %d outside range 0..%d\n", region_offset,
intel->intelScreen->tex.size);
return;
}
memfree.region = I830_MEM_REGION_AGP;
memfree.region_offset = region_offset;
ret = drmCommandWrite( intel->driFd,
DRM_I830_FREE,
&memfree, sizeof(memfree));
if (ret)
fprintf(stderr, "%s: DRM_I830_FREE ret %d\n", __FUNCTION__, ret);
}
/* This version of AllocateMemoryMESA allocates only agp memory, and
* only does so after the point at which the driver has been
* initialized.
*
* Theoretically a valid context isn't required. However, in this
* implementation, it is, as I'm using the hardware lock to protect
* the kernel data structures, and the current context to get the
* device fd.
*/
void *intelAllocateMemoryMESA(__DRInativeDisplay *dpy, int scrn,
GLsizei size, GLfloat readfreq,
GLfloat writefreq, GLfloat priority)
{
GET_CURRENT_CONTEXT(ctx);
if (INTEL_DEBUG & DEBUG_IOCTL)
fprintf(stderr, "%s sz %d %f/%f/%f\n", __FUNCTION__, size, readfreq,
writefreq, priority);
if (getenv("INTEL_NO_ALLOC"))
return NULL;
if (!ctx || INTEL_CONTEXT(ctx) == 0)
return NULL;
return intelAllocateAGP( INTEL_CONTEXT(ctx), size );
}
/* Called via glXFreeMemoryMESA() */
void intelFreeMemoryMESA(__DRInativeDisplay *dpy, int scrn, GLvoid *pointer)
{
GET_CURRENT_CONTEXT(ctx);
if (INTEL_DEBUG & DEBUG_IOCTL)
fprintf(stderr, "%s %p\n", __FUNCTION__, pointer);
if (!ctx || INTEL_CONTEXT(ctx) == 0) {
fprintf(stderr, "%s: no context\n", __FUNCTION__);
return;
}
intelFreeAGP( INTEL_CONTEXT(ctx), pointer );
}
/* Called via glXGetMemoryOffsetMESA()
*
* Returns offset of pointer from the start of agp aperture.
*/
GLuint intelGetMemoryOffsetMESA(__DRInativeDisplay *dpy, int scrn,
const GLvoid *pointer)
{
GET_CURRENT_CONTEXT(ctx);
intelContextPtr intel;
if (!ctx || !(intel = INTEL_CONTEXT(ctx)) ) {
fprintf(stderr, "%s: no context\n", __FUNCTION__);
return ~0;
}
if (!intelIsAgpMemory( intel, pointer, 0 ))
return ~0;
return intelAgpOffsetFromVirtual( intel, pointer );
}
GLboolean intelIsAgpMemory( intelContextPtr intel, const GLvoid *pointer,
GLint size )
{
int offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
int valid = (size >= 0 &&
offset >= 0 &&
offset + size < intel->intelScreen->tex.size);
if (INTEL_DEBUG & DEBUG_IOCTL)
fprintf(stderr, "intelIsAgpMemory( %p ) : %d\n", pointer, valid );
return valid;
}
GLuint intelAgpOffsetFromVirtual( intelContextPtr intel, const GLvoid *pointer )
{
int offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
if (offset < 0 || offset > intel->intelScreen->tex.size)
return ~0;
else
return intel->intelScreen->tex.offset + offset;
}
/* Flip the front & back buffes
*/
void intelPageFlip( const __DRIdrawablePrivate *dPriv )
{
#if 0
intelContextPtr intel;
int tmp, ret;
if (INTEL_DEBUG & DEBUG_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
assert(dPriv);
assert(dPriv->driContextPriv);
assert(dPriv->driContextPriv->driverPrivate);
intel = (intelContextPtr) dPriv->driContextPriv->driverPrivate;
intelFlush( &intel->ctx );
LOCK_HARDWARE( intel );
if (dPriv->pClipRects) {
*(drm_clip_rect_t *)intel->sarea->boxes = dPriv->pClipRects[0];
intel->sarea->nbox = 1;
}
ret = drmCommandNone(intel->driFd, DRM_I830_FLIP);
if (ret) {
fprintf(stderr, "%s: %d\n", __FUNCTION__, ret);
UNLOCK_HARDWARE( intel );
batch.start = start_offset;
batch.used = used;
batch.cliprects = intel->pClipRects;
batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
batch.DR1 = 0;
batch.DR4 = ((((GLuint)intel->drawX) & 0xffff) |
(((GLuint)intel->drawY) << 16));
if (INTEL_DEBUG & DEBUG_DMA)
fprintf(stderr, "%s: 0x%x..0x%x DR4: %x cliprects: %d\n",
__FUNCTION__,
batch.start,
batch.start + batch.used * 4,
batch.DR4, batch.num_cliprects);
#if 1
if (drmCommandWrite (intel->driFd, DRM_I830_BATCHBUFFER, &batch,
sizeof(batch))) {
fprintf(stderr, "DRM_I830_BATCHBUFFER: %d\n", -errno);
UNLOCK_HARDWARE(intel);
exit(1);
}
tmp = intel->sarea->last_enqueue;
intelRefillBatchLocked( intel );
UNLOCK_HARDWARE( intel );
intelSetDrawBuffer( &intel->ctx, intel->ctx.Color.DriverDrawBuffer );
#endif
/* FIXME: use hardware contexts to avoid 'losing' hardware after
* each buffer flush.
*/
intel->vtbl.lost_hardware( intel );
}

View file

@ -30,44 +30,12 @@
#include "intel_context.h"
extern void intelWaitAgeLocked( intelContextPtr intel, int age, GLboolean unlock );
void intelWaitIrq( struct intel_context *intel, int seq );
int intelEmitIrqLocked( struct intel_context *intel );
extern void intelClear(GLcontext *ctx, GLbitfield mask, GLboolean all,
GLint cx, GLint cy, GLint cw, GLint ch);
extern void intelPageFlip( const __DRIdrawablePrivate *dpriv );
extern void intelRotateWindow(intelContextPtr intel,
__DRIdrawablePrivate *dPriv, GLuint srcBuffer);
extern void intelWaitForIdle( intelContextPtr intel );
extern void intelFlushBatch( intelContextPtr intel, GLboolean refill );
extern void intelFlushBatchLocked( intelContextPtr intel,
GLboolean ignore_cliprects,
GLboolean refill,
GLboolean allow_unlock);
extern void intelRefillBatchLocked( intelContextPtr intel, GLboolean allow_unlock );
extern void intelFinish( GLcontext *ctx );
extern void intelFlush( GLcontext *ctx );
extern void intelglFlush( GLcontext *ctx );
extern void *intelAllocateAGP( intelContextPtr intel, GLsizei size );
extern void intelFreeAGP( intelContextPtr intel, void *pointer );
extern void *intelAllocateMemoryMESA( __DRInativeDisplay *dpy, int scrn,
GLsizei size, GLfloat readfreq,
GLfloat writefreq, GLfloat priority );
extern void intelFreeMemoryMESA( __DRInativeDisplay *dpy, int scrn,
GLvoid *pointer );
extern GLuint intelGetMemoryOffsetMESA( __DRInativeDisplay *dpy, int scrn, const GLvoid *pointer );
extern GLboolean intelIsAgpMemory( intelContextPtr intel, const GLvoid *pointer,
GLint size );
extern GLuint intelAgpOffsetFromVirtual( intelContextPtr intel, const GLvoid *p );
extern void intelWaitIrq( intelContextPtr intel, int seq );
extern u_int32_t intelGetLastFrame (intelContextPtr intel);
extern int intelEmitIrqLocked( intelContextPtr intel );
void intel_batch_ioctl( struct intel_context *intel,
GLuint start_offset,
GLuint used,
GLboolean ignore_cliprects,
GLboolean allow_unlock);
#endif

View file

@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -12,7 +12,7 @@
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* next paragraph) shall be included in all copies or substantial portionsalloc
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
@ -25,487 +25,98 @@
*
**************************************************************************/
#include "glheader.h"
#include "enums.h"
#include "mtypes.h"
#include "macros.h"
#include "state.h"
#include "swrast/swrast.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_ioctl.h"
#include "intel_batchbuffer.h"
static GLboolean
check_color( const GLcontext *ctx, GLenum type, GLenum format,
const struct gl_pixelstore_attrib *packing,
const void *pixels, GLint sz, GLint pitch )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
GLuint cpp = intel->intelScreen->cpp;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
if ( (pitch & 63) ||
ctx->_ImageTransferState ||
packing->SwapBytes ||
packing->LsbFirst) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: failed 1\n", __FUNCTION__);
return GL_FALSE;
}
if ( type == GL_UNSIGNED_INT_8_8_8_8_REV &&
cpp == 4 &&
format == GL_BGRA ) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: passed 2\n", __FUNCTION__);
return GL_TRUE;
}
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: failed\n", __FUNCTION__);
return GL_FALSE;
}
static GLboolean
check_color_per_fragment_ops( const GLcontext *ctx )
{
int result;
result = (!( ctx->Color.AlphaEnabled ||
ctx->Depth.Test ||
ctx->Fog.Enabled ||
ctx->Scissor.Enabled ||
ctx->Stencil.Enabled ||
!ctx->Color.ColorMask[0] ||
!ctx->Color.ColorMask[1] ||
!ctx->Color.ColorMask[2] ||
!ctx->Color.ColorMask[3] ||
ctx->Color.ColorLogicOpEnabled ||
ctx->Texture._EnabledUnits
) &&
ctx->Current.RasterPosValid);
return result;
}
#include "intel_pixel.h"
#include "intel_regions.h"
/**
* Clip the given rectangle against the buffer's bounds (including scissor).
* \param size returns the
* \return GL_TRUE if any pixels remain, GL_FALSE if totally clipped.
* Check if any fragment operations are in effect which might effect
* glDraw/CopyPixels.
*/
GLboolean intel_check_blit_fragment_ops( GLcontext *ctx )
{
if (ctx->NewState)
_mesa_update_state(ctx);
/* XXX Note: Scissor could be done with the blitter:
*/
return !(ctx->_ImageTransferState ||
ctx->Color.AlphaEnabled ||
ctx->Depth.Test ||
ctx->Fog.Enabled ||
ctx->Scissor.Enabled ||
ctx->Stencil.Enabled ||
!ctx->Color.ColorMask[0] ||
!ctx->Color.ColorMask[1] ||
!ctx->Color.ColorMask[2] ||
!ctx->Color.ColorMask[3] ||
ctx->Color.ColorLogicOpEnabled ||
ctx->Texture._EnabledUnits ||
ctx->FragmentProgram._Enabled);
}
GLboolean intel_check_meta_tex_fragment_ops( GLcontext *ctx )
{
if (ctx->NewState)
_mesa_update_state(ctx);
/* Some of _ImageTransferState (scale, bias) could be done with
* fragment programs on i915.
*/
return !(ctx->_ImageTransferState ||
ctx->Fog.Enabled || /* not done yet */
ctx->Texture._EnabledUnits ||
ctx->FragmentProgram._Enabled);
}
/* The intel_region struct doesn't really do enough to capture the
* format of the pixels in the region. For now this code assumes that
* the region is a display surface and hence is either ARGB8888 or
* RGB565.
* XXX FBO: If we'd pass in the intel_renderbuffer instead of region, we'd
* know the buffer's pixel format.
*
* XXX Replace this with _mesa_clip_drawpixels() and _mesa_clip_readpixels()
* from Mesa 6.4. We shouldn't apply scissor for ReadPixels.
* \param format as given to glDraw/ReadPixels
* \param type as given to glDraw/ReadPixels
*/
static GLboolean
clip_pixelrect( const GLcontext *ctx,
const GLframebuffer *buffer,
GLint *x, GLint *y,
GLsizei *width, GLsizei *height)
GLboolean intel_check_blit_format( struct intel_region *region,
GLenum format, GLenum type )
{
/* left clipping */
if (*x < buffer->_Xmin) {
*width -= (buffer->_Xmin - *x);
*x = buffer->_Xmin;
}
/* right clipping */
if (*x + *width > buffer->_Xmax)
*width -= (*x + *width - buffer->_Xmax - 1);
if (*width <= 0)
return GL_FALSE;
/* bottom clipping */
if (*y < buffer->_Ymin) {
*height -= (buffer->_Ymin - *y);
*y = buffer->_Ymin;
}
/* top clipping */
if (*y + *height > buffer->_Ymax)
*height -= (*y + *height - buffer->_Ymax - 1);
if (*height <= 0)
return GL_FALSE;
return GL_TRUE;
}
/**
* Compute intersection of a clipping rectangle and pixel rectangle,
* returning results in x/y/w/hOut vars.
* \return GL_TRUE if there's intersection, GL_FALSE if disjoint.
*/
static INLINE GLboolean
intersect_region(const drm_clip_rect_t *box,
GLint x, GLint y, GLsizei width, GLsizei height,
GLint *xOut, GLint *yOut, GLint *wOut, GLint *hOut)
{
GLint bx = box->x1;
GLint by = box->y1;
GLint bw = box->x2 - bx;
GLint bh = box->y2 - by;
if (bx < x) bw -= x - bx, bx = x;
if (by < y) bh -= y - by, by = y;
if (bx + bw > x + width) bw = x + width - bx;
if (by + bh > y + height) bh = y + height - by;
*xOut = bx;
*yOut = by;
*wOut = bw;
*hOut = bh;
if (bw <= 0) return GL_FALSE;
if (bh <= 0) return GL_FALSE;
return GL_TRUE;
}
static GLboolean
intelTryReadPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack,
GLvoid *pixels )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
GLint size = 0; /* not really used */
GLint pitch = pack->RowLength ? pack->RowLength : width;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
/* Only accelerate reading to agp buffers.
*/
if ( !intelIsAgpMemory(intel, pixels,
pitch * height * intel->intelScreen->cpp ) ) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: dest not agp\n", __FUNCTION__);
return GL_FALSE;
}
/* Need GL_PACK_INVERT_MESA to cope with upsidedown results from
* blitter:
*/
if (!pack->Invert) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: MESA_PACK_INVERT not set\n", __FUNCTION__);
return GL_FALSE;
}
if (!check_color(ctx, type, format, pack, pixels, size, pitch))
return GL_FALSE;
switch ( intel->intelScreen->cpp ) {
case 4:
break;
default:
return GL_FALSE;
}
/* Although the blits go on the command buffer, need to do this and
* fire with lock held to guarentee cliprects and drawing offset are
* correct.
*
* This is an unusual situation however, as the code which flushes
* a full command buffer expects to be called unlocked. As a
* workaround, immediately flush the buffer on aquiring the lock.
*/
intelFlush( &intel->ctx );
LOCK_HARDWARE( intel );
{
__DRIdrawablePrivate *dPriv = intel->driDrawable;
int nbox = dPriv->numClipRects;
int src_offset = intel->readRegion->offset;
int src_pitch = intel->intelScreen->front.pitch;
int dst_offset = intelAgpOffsetFromVirtual( intel, pixels);
drm_clip_rect_t *box = dPriv->pClipRects;
int i;
assert(dst_offset != ~0); /* should have been caught above */
if (!clip_pixelrect(ctx, ctx->ReadBuffer, &x, &y, &width, &height)) {
UNLOCK_HARDWARE( intel );
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s totally clipped -- nothing to do\n",
__FUNCTION__);
return GL_TRUE;
}
/* convert to screen coords (y=0=top) */
y = dPriv->h - y - height;
x += dPriv->x;
y += dPriv->y;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "readpixel blit src_pitch %d dst_pitch %d\n",
src_pitch, pitch);
/* We don't really have to do window clipping for readpixels.
* The OpenGL spec says that pixels read from outside the
* visible window region (pixel ownership) have undefined value.
*/
for (i = 0 ; i < nbox ; i++)
{
GLint bx, by, bw, bh;
if (intersect_region(box+i, x, y, width, height,
&bx, &by, &bw, &bh)) {
intelEmitCopyBlitLocked( intel,
intel->intelScreen->cpp,
src_pitch, src_offset,
pitch, dst_offset,
bx, by,
bx - x, by - y,
bw, bh );
}
}
}
UNLOCK_HARDWARE( intel );
intelFinish( &intel->ctx );
return GL_TRUE;
}
static void
intelReadPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack,
GLvoid *pixels )
{
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
if (!intelTryReadPixels( ctx, x, y, width, height, format, type, pack,
pixels))
_swrast_ReadPixels( ctx, x, y, width, height, format, type, pack,
pixels);
}
static void do_draw_pix( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLint pitch,
const void *pixels,
GLuint dest )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
__DRIdrawablePrivate *dPriv = intel->driDrawable;
drm_clip_rect_t *box = dPriv->pClipRects;
int nbox = dPriv->numClipRects;
int i;
int src_offset = intelAgpOffsetFromVirtual( intel, pixels);
int src_pitch = pitch;
assert(src_offset != ~0); /* should be caught earlier */
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
intelFlush( &intel->ctx );
LOCK_HARDWARE( intel );
if (ctx->DrawBuffer)
{
y -= height; /* cope with pixel zoom */
if (!clip_pixelrect(ctx, ctx->DrawBuffer,
&x, &y, &width, &height)) {
UNLOCK_HARDWARE( intel );
return;
}
y = dPriv->h - y - height; /* convert from gl to hardware coords */
x += dPriv->x;
y += dPriv->y;
for (i = 0 ; i < nbox ; i++ )
{
GLint bx, by, bw, bh;
if (intersect_region(box + i, x, y, width, height,
&bx, &by, &bw, &bh)) {
intelEmitCopyBlitLocked( intel,
intel->intelScreen->cpp,
src_pitch, src_offset,
intel->intelScreen->front.pitch,
intel->drawRegion->offset,
bx - x, by - y,
bx, by,
bw, bh );
}
}
}
UNLOCK_HARDWARE( intel );
intelFinish( &intel->ctx );
}
static GLboolean
intelTryDrawPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid *pixels )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
GLint pitch = unpack->RowLength ? unpack->RowLength : width;
GLuint dest;
GLuint cpp = intel->intelScreen->cpp;
GLint size = width * pitch * cpp;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
switch (format) {
case GL_RGB:
case GL_RGBA:
case GL_BGRA:
dest = intel->drawRegion->offset;
/* Planemask doesn't have full support in blits.
*/
if (!ctx->Color.ColorMask[RCOMP] ||
!ctx->Color.ColorMask[GCOMP] ||
!ctx->Color.ColorMask[BCOMP] ||
!ctx->Color.ColorMask[ACOMP]) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: planemask\n", __FUNCTION__);
return GL_FALSE;
}
/* Can't do conversions on agp reads/draws.
*/
if ( !intelIsAgpMemory( intel, pixels, size ) ) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: not agp memory\n", __FUNCTION__);
return GL_FALSE;
}
if (!check_color(ctx, type, format, unpack, pixels, size, pitch)) {
return GL_FALSE;
}
if (!check_color_per_fragment_ops(ctx)) {
return GL_FALSE;
}
if (ctx->Pixel.ZoomX != 1.0F ||
ctx->Pixel.ZoomY != -1.0F)
return GL_FALSE;
break;
default:
return GL_FALSE;
}
if ( intelIsAgpMemory(intel, pixels, size) )
{
do_draw_pix( ctx, x, y, width, height, pitch, pixels, dest );
if (region->cpp == 4 &&
(type == GL_UNSIGNED_INT_8_8_8_8_REV ||
type == GL_UNSIGNED_BYTE) &&
format == GL_BGRA ) {
return GL_TRUE;
}
else if (0)
{
/* Pixels is in regular memory -- get dma buffers and perform
* upload through them. No point doing this for regular uploads
* but once we remove some of the restrictions above (colormask,
* pixelformat conversion, zoom?, etc), this could be a win.
*/
if (region->cpp == 2 &&
type == GL_UNSIGNED_SHORT_5_6_5_REV &&
format == GL_BGR ) {
return GL_TRUE;
}
else
return GL_FALSE;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: bad format for blit (cpp %d, type %s format %s)\n",
__FUNCTION__, region->cpp,
_mesa_lookup_enum_by_nr(type),
_mesa_lookup_enum_by_nr(format));
return GL_FALSE;
}
static void
intelDrawPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid *pixels )
{
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
if (!intelTryDrawPixels( ctx, x, y, width, height, format, type,
unpack, pixels ))
_swrast_DrawPixels( ctx, x, y, width, height, format, type,
unpack, pixels );
}
/**
* Implement glCopyPixels for the front color buffer (or back buffer Pixmap)
* for the color buffer. Don't support zooming, pixel transfer, etc.
* We do support copying from one window to another, ala glXMakeCurrentRead.
*/
static void
intelCopyPixels( GLcontext *ctx,
GLint srcx, GLint srcy, GLsizei width, GLsizei height,
GLint destx, GLint desty, GLenum type )
{
#if 0
const XMesaContext xmesa = XMESA_CONTEXT(ctx);
const SWcontext *swrast = SWRAST_CONTEXT( ctx );
XMesaDisplay *dpy = xmesa->xm_visual->display;
const XMesaDrawable drawBuffer = xmesa->xm_draw_buffer->buffer;
const XMesaDrawable readBuffer = xmesa->xm_read_buffer->buffer;
const XMesaGC gc = xmesa->xm_draw_buffer->gc;
ASSERT(dpy);
ASSERT(gc);
if (drawBuffer && /* buffer != 0 means it's a Window or Pixmap */
readBuffer &&
type == GL_COLOR &&
(swrast->_RasterMask & ~CLIP_BIT) == 0 && /* no blend, z-test, etc */
ctx->_ImageTransferState == 0 && /* no color tables, scale/bias, etc */
ctx->Pixel.ZoomX == 1.0 && /* no zooming */
ctx->Pixel.ZoomY == 1.0) {
/* Note: we don't do any special clipping work here. We could,
* but X will do it for us.
*/
srcy = FLIP(xmesa->xm_read_buffer, srcy) - height + 1;
desty = FLIP(xmesa->xm_draw_buffer, desty) - height + 1;
XCopyArea(dpy, readBuffer, drawBuffer, gc,
srcx, srcy, width, height, destx, desty);
}
#else
_swrast_CopyPixels(ctx, srcx, srcy, width, height, destx, desty, type );
#endif
}
void intelInitPixelFuncs( struct dd_function_table *functions )
{
/* Pixel path fallbacks.
*/
functions->Accum = _swrast_Accum;
functions->Bitmap = _swrast_Bitmap;
functions->CopyPixels = intelCopyPixels;
if (!getenv("INTEL_NO_BLITS")) {
functions->ReadPixels = intelReadPixels;
functions->DrawPixels = intelDrawPixels;
}
else {
functions->ReadPixels = _swrast_ReadPixels;
functions->DrawPixels = _swrast_DrawPixels;
}
functions->ReadPixels = intelReadPixels;
functions->DrawPixels = intelDrawPixels;
}

View file

@ -106,24 +106,29 @@ static const int scale_prim[GL_POLYGON+1] = {
};
static void intelDmaPrimitive( intelContextPtr intel, GLenum prim )
static void intelDmaPrimitive( struct intel_context *intel, GLenum prim )
{
if (0) fprintf(stderr, "%s %s\n", __FUNCTION__, _mesa_lookup_enum_by_nr(prim));
INTEL_FIREVERTICES(intel);
intel->vtbl.reduced_primitive_state( intel, reduced_prim[prim] );
intelStartInlinePrimitive( intel, hw_prim[prim] );
intelStartInlinePrimitive( intel, hw_prim[prim], INTEL_BATCH_CLIPRECTS );
}
#define LOCAL_VARS intelContextPtr intel = INTEL_CONTEXT(ctx)
#define LOCAL_VARS struct intel_context *intel = intel_context(ctx)
#define INIT( prim ) \
do { \
intelDmaPrimitive( intel, prim ); \
} while (0)
#define FLUSH() INTEL_FIREVERTICES( intel )
#define FLUSH() \
do { \
if (intel->prim.flush) \
intel->prim.flush(intel); \
} while (0)
#define GET_SUBSEQUENT_VB_MAX_VERTS() \
(((intel->alloc.size / 2) - 1500) / (intel->vertex_size*4))
((BATCH_SZ - 1500) / (intel->vertex_size*4))
#define GET_CURRENT_VB_MAX_VERTS() GET_SUBSEQUENT_VB_MAX_VERTS()
#define ALLOC_VERTS( nr ) \
@ -142,7 +147,7 @@ do { \
/* Heuristic to choose between the two render paths:
*/
static GLboolean choose_render( intelContextPtr intel,
static GLboolean choose_render( struct intel_context *intel,
struct vertex_buffer *VB )
{
int vertsz = intel->vertex_size;
@ -194,7 +199,7 @@ static GLboolean choose_render( intelContextPtr intel,
static GLboolean intel_run_render( GLcontext *ctx,
struct tnl_pipeline_stage *stage )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
GLuint i;
@ -225,6 +230,9 @@ static GLboolean intel_run_render( GLcontext *ctx,
}
tnl->Driver.Render.Finish( ctx );
if (intel->prim.flush)
intel->prim.flush(intel);
return GL_FALSE; /* finished the pipe */
}

View file

@ -38,10 +38,12 @@
#include "intel_screen.h"
#include "intel_buffers.h"
#include "intel_tex.h"
#include "intel_span.h"
#include "intel_tris.h"
#include "intel_ioctl.h"
#include "intel_fbo.h"
#include "i830_dri.h"
@ -283,16 +285,13 @@ static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv)
intelScreen->cpp = gDRIPriv->cpp;
switch (gDRIPriv->bitsPerPixel) {
case 15: intelScreen->fbFormat = DV_PF_555; break;
case 16: intelScreen->fbFormat = DV_PF_565; break;
case 32: intelScreen->fbFormat = DV_PF_8888; break;
default: exit(1); break;
}
intelUpdateScreenFromSAREA(intelScreen, sarea);
if (0)
intelPrintDRIInfo(intelScreen, sPriv, gDRIPriv);
if (!intelMapScreenRegions(sPriv)) {
fprintf(stderr,"\nERROR! mapping regions\n");
_mesa_free(intelScreen);
@ -300,6 +299,28 @@ static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv)
return GL_FALSE;
}
#if 0
/*
* FIXME: Remove this code and its references.
*/
intelScreen->tex.offset = gDRIPriv->textureOffset;
intelScreen->logTextureGranularity = gDRIPriv->logTextureGranularity;
intelScreen->tex.handle = gDRIPriv->textures;
intelScreen->tex.size = gDRIPriv->textureSize;
#else
intelScreen->tex.offset = 0;
intelScreen->logTextureGranularity = 0;
intelScreen->tex.handle = 0;
intelScreen->tex.size = 0;
#endif
intelScreen->sarea_priv_offset = gDRIPriv->sarea_priv_offset;
if (1) intelPrintDRIInfo(intelScreen, sPriv, gDRIPriv);
intelScreen->drmMinor = sPriv->drmMinor;
/* Determine if IRQs are active? */
@ -340,14 +361,8 @@ static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv)
(*glx_enable_extension)( psc, "GLX_MESA_swap_control" );
(*glx_enable_extension)( psc, "GLX_MESA_swap_frame_usage" );
(*glx_enable_extension)( psc, "GLX_SGI_make_current_read" );
(*glx_enable_extension)( psc, "GLX_MESA_allocate_memory" );
(*glx_enable_extension)( psc, "GLX_MESA_copy_sub_buffer" );
}
sPriv->psc->allocateMemory = (void *) intelAllocateMemoryMESA;
sPriv->psc->freeMemory = (void *) intelFreeMemoryMESA;
sPriv->psc->memoryOffset = (void *) intelGetMemoryOffsetMESA;
return GL_TRUE;
}
@ -362,6 +377,9 @@ static void intelDestroyScreen(__DRIscreenPrivate *sPriv)
}
/**
* This is called when we need to set up GL rendering to a new X window.
*/
static GLboolean intelCreateBuffer( __DRIscreenPrivate *driScrnPriv,
__DRIdrawablePrivate *driDrawPriv,
const __GLcontextModes *mesaVis,
@ -374,70 +392,71 @@ static GLboolean intelCreateBuffer( __DRIscreenPrivate *driScrnPriv,
} else {
GLboolean swStencil = (mesaVis->stencilBits > 0 &&
mesaVis->depthBits != 24);
GLenum rgbFormat = (mesaVis->redBits == 5 ? GL_RGB5 : GL_RGBA8);
struct gl_framebuffer *fb = _mesa_create_framebuffer(mesaVis);
/* setup the hardware-based renderbuffers */
{
driRenderbuffer *frontRb
= driNewRenderbuffer(GL_RGBA,
screen->front.map,
screen->cpp,
screen->front.offset, screen->front.pitch,
driDrawPriv);
intelSetSpanFunctions(frontRb, mesaVis);
struct intel_renderbuffer *frontRb
= intel_create_renderbuffer(rgbFormat,
screen->width, screen->height,
screen->front.offset,
screen->front.pitch,
screen->cpp,
screen->front.map);
intel_set_span_functions(&frontRb->Base);
_mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &frontRb->Base);
}
if (mesaVis->doubleBufferMode) {
driRenderbuffer *backRb
= driNewRenderbuffer(GL_RGBA,
screen->back.map,
screen->cpp,
screen->back.offset, screen->back.pitch,
driDrawPriv);
intelSetSpanFunctions(backRb, mesaVis);
struct intel_renderbuffer *backRb
= intel_create_renderbuffer(rgbFormat,
screen->width, screen->height,
screen->back.offset,
screen->back.pitch,
screen->cpp,
screen->back.map);
intel_set_span_functions(&backRb->Base);
_mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &backRb->Base);
}
if (mesaVis->depthBits == 16) {
driRenderbuffer *depthRb
= driNewRenderbuffer(GL_DEPTH_COMPONENT16,
screen->depth.map,
screen->cpp,
screen->depth.offset, screen->depth.pitch,
driDrawPriv);
intelSetSpanFunctions(depthRb, mesaVis);
_mesa_add_renderbuffer(fb, BUFFER_DEPTH, &depthRb->Base);
if (mesaVis->depthBits == 24 && mesaVis->stencilBits == 8) {
/* combined depth/stencil buffer */
struct intel_renderbuffer *depthStencilRb
= intel_create_renderbuffer(
GL_DEPTH24_STENCIL8_EXT,
screen->width, screen->height,
screen->depth.offset,
screen->depth.pitch,
screen->cpp, /* 4! */
screen->depth.map);
intel_set_span_functions(&depthStencilRb->Base);
/* note: bind RB to two attachment points */
_mesa_add_renderbuffer(fb, BUFFER_DEPTH, &depthStencilRb->Base);
_mesa_add_renderbuffer(fb, BUFFER_STENCIL, &depthStencilRb->Base);
}
else if (mesaVis->depthBits == 24) {
driRenderbuffer *depthRb
= driNewRenderbuffer(GL_DEPTH_COMPONENT24,
screen->depth.map,
screen->cpp,
screen->depth.offset, screen->depth.pitch,
driDrawPriv);
intelSetSpanFunctions(depthRb, mesaVis);
else if (mesaVis->depthBits == 16) {
/* just 16-bit depth buffer, no hw stencil */
struct intel_renderbuffer *depthRb
= intel_create_renderbuffer(GL_DEPTH_COMPONENT16,
screen->width, screen->height,
screen->depth.offset,
screen->depth.pitch,
screen->cpp, /* 2! */
screen->depth.map);
intel_set_span_functions(&depthRb->Base);
_mesa_add_renderbuffer(fb, BUFFER_DEPTH, &depthRb->Base);
}
if (mesaVis->stencilBits > 0 && !swStencil) {
driRenderbuffer *stencilRb
= driNewRenderbuffer(GL_STENCIL_INDEX8_EXT,
screen->depth.map,
screen->cpp,
screen->depth.offset, screen->depth.pitch,
driDrawPriv);
intelSetSpanFunctions(stencilRb, mesaVis);
_mesa_add_renderbuffer(fb, BUFFER_STENCIL, &stencilRb->Base);
}
/* now add any/all software-based renderbuffers we may need */
_mesa_add_soft_renderbuffers(fb,
GL_FALSE, /* color */
GL_FALSE, /* depth */
GL_FALSE, /* never sw color */
GL_FALSE, /* never sw depth */
swStencil,
mesaVis->accumRedBits > 0,
GL_FALSE, /* alpha */
GL_FALSE /* aux */);
GL_FALSE, /* never sw alpha */
GL_FALSE /* never sw aux */);
driDrawPriv->driverPrivate = (void *) fb;
return (driDrawPriv->driverPrivate != NULL);
@ -456,7 +475,7 @@ static void intelDestroyBuffer(__DRIdrawablePrivate *driDrawPriv)
static int
intelGetSwapInfo( __DRIdrawablePrivate *dPriv, __DRIswapInfo * sInfo )
{
intelContextPtr intel;
struct intel_context *intel;
if ( (dPriv == NULL) || (dPriv->driContextPriv == NULL)
|| (dPriv->driContextPriv->driverPrivate == NULL)
@ -500,6 +519,8 @@ static GLboolean intelCreateContext( const __GLcontextModes *mesaVis,
intelScreenPrivate *intelScreen = (intelScreenPrivate *)sPriv->private;
switch (intelScreen->deviceID) {
/* Don't deal with i830 until texture work complete:
*/
case PCI_CHIP_845_G:
case PCI_CHIP_I830_M:
case PCI_CHIP_I855_GM:
@ -646,7 +667,7 @@ void * __driCreateNewScreen_20050727( __DRInativeDisplay *dpy, int scrn, __DRIsc
__DRIscreenPrivate *psp;
static const __DRIversion ddx_expected = { 1, 5, 0 };
static const __DRIversion dri_expected = { 4, 0, 0 };
static const __DRIversion drm_expected = { 1, 4, 0 };
static const __DRIversion drm_expected = { 1, 5, 1 };
dri_interface = interface;

View file

@ -29,13 +29,15 @@
#define _INTEL_INIT_H_
#include <sys/time.h>
#include "xmlconfig.h"
#include "dri_util.h"
#include "intel_rotate.h"
#include "i830_common.h"
#include "xmlconfig.h"
/* This roughly corresponds to a gl_renderbuffer (Mesa 6.4) */
/* XXX: change name or eliminate to avoid conflict with "struct
* intel_region"!!!
*/
typedef struct {
drm_handle_t handle;
drmSize size; /* region size in bytes */
@ -58,7 +60,8 @@ typedef struct
int mem; /* unused */
int cpp; /* for front and back buffers */
int fbFormat;
/* int bitsPerPixel; */
int fbFormat; /* XXX FBO: this is obsolete - remove after i830 updates */
int logTextureGranularity;

View file

@ -30,194 +30,340 @@
#include "mtypes.h"
#include "colormac.h"
#include "intel_fbo.h"
#include "intel_screen.h"
#include "intel_span.h"
#include "intel_regions.h"
#include "intel_ioctl.h"
#include "intel_tex.h"
#include "swrast/swrast.h"
/*
break intelWriteRGBASpan_ARGB8888
*/
#undef DBG
#define DBG 0
#define LOCAL_VARS \
intelContextPtr intel = INTEL_CONTEXT(ctx); \
__DRIdrawablePrivate *dPriv = intel->driDrawable; \
driRenderbuffer *drb = (driRenderbuffer *) rb; \
GLuint pitch = drb->pitch; \
GLuint height = dPriv->h; \
char *buf = (char *) drb->Base.Data + \
dPriv->x * drb->cpp + \
dPriv->y * pitch; \
GLushort p; \
(void) buf; (void) p
#define LOCAL_VARS \
struct intel_context *intel = intel_context(ctx); \
struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
const GLint yScale = irb->RenderToTexture ? 1 : -1; \
const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
GLubyte *buf = (GLubyte *) irb->pfMap \
+ (intel->drawY * irb->pfPitch + intel->drawX) * irb->region->cpp;\
GLuint p; \
assert(irb->pfMap);\
(void) p;
#define LOCAL_DEPTH_VARS \
intelContextPtr intel = INTEL_CONTEXT(ctx); \
__DRIdrawablePrivate *dPriv = intel->driDrawable; \
driRenderbuffer *drb = (driRenderbuffer *) rb; \
GLuint pitch = drb->pitch; \
GLuint height = dPriv->h; \
char *buf = (char *) drb->Base.Data + \
dPriv->x * drb->cpp + \
dPriv->y * pitch
/* XXX FBO: this is identical to the macro in spantmp2.h except we get
* the cliprect info from the context, not the driDrawable.
* Move this into spantmp2.h someday.
*/
#define HW_CLIPLOOP() \
do { \
int _nc = intel->numClipRects; \
while ( _nc-- ) { \
int minx = intel->pClipRects[_nc].x1 - intel->drawX; \
int miny = intel->pClipRects[_nc].y1 - intel->drawY; \
int maxx = intel->pClipRects[_nc].x2 - intel->drawX; \
int maxy = intel->pClipRects[_nc].y2 - intel->drawY;
#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
#define INIT_MONO_PIXEL(p,color)\
p = INTEL_PACKCOLOR565(color[0],color[1],color[2])
#define Y_FLIP(_y) (height - _y - 1)
#define Y_FLIP(_y) ((_y) * yScale + yBias)
#define HW_LOCK()
#define HW_UNLOCK()
/* 16 bit, 565 rgb color spanline and pixel functions
/* 16 bit, RGB565 color spanline and pixel functions
*/
#define WRITE_RGBA( _x, _y, r, g, b, a ) \
*(GLushort *)(buf + _x*2 + _y*pitch) = ( (((int)r & 0xf8) << 8) | \
(((int)g & 0xfc) << 3) | \
(((int)b & 0xf8) >> 3))
#define WRITE_PIXEL( _x, _y, p ) \
*(GLushort *)(buf + _x*2 + _y*pitch) = p
#define SPANTMP_PIXEL_FMT GL_RGB
#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_5_6_5
#define READ_RGBA( rgba, _x, _y ) \
do { \
GLushort p = *(GLushort *)(buf + _x*2 + _y*pitch); \
rgba[0] = (((p >> 11) & 0x1f) * 255) / 31; \
rgba[1] = (((p >> 5) & 0x3f) * 255) / 63; \
rgba[2] = (((p >> 0) & 0x1f) * 255) / 31; \
rgba[3] = 255; \
} while(0)
#define TAG(x) intel##x##_RGB565
#define TAG2(x,y) intel##x##_RGB565##y
#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 2)
#include "spantmp2.h"
#define TAG(x) intel##x##_565
#include "spantmp.h"
/* 15 bit, 555 rgb color spanline and pixel functions
/* 32 bit, ARGB8888 color spanline and pixel functions
*/
#define WRITE_RGBA( _x, _y, r, g, b, a ) \
*(GLushort *)(buf + _x*2 + _y*pitch) = (((r & 0xf8) << 7) | \
((g & 0xf8) << 3) | \
((b & 0xf8) >> 3))
#define SPANTMP_PIXEL_FMT GL_BGRA
#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
#define WRITE_PIXEL( _x, _y, p ) \
*(GLushort *)(buf + _x*2 + _y*pitch) = p
#define TAG(x) intel##x##_ARGB8888
#define TAG2(x,y) intel##x##_ARGB8888##y
#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 4)
#include "spantmp2.h"
#define READ_RGBA( rgba, _x, _y ) \
do { \
GLushort p = *(GLushort *)(buf + _x*2 + _y*pitch); \
rgba[0] = (p >> 7) & 0xf8; \
rgba[1] = (p >> 3) & 0xf8; \
rgba[2] = (p << 3) & 0xf8; \
rgba[3] = 255; \
} while(0)
#define TAG(x) intel##x##_555
#include "spantmp.h"
#define LOCAL_DEPTH_VARS \
struct intel_context *intel = intel_context(ctx); \
struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
const GLuint pitch = irb->pfPitch/***XXX region->pitch*/; /* in pixels */ \
const GLint yScale = irb->RenderToTexture ? 1 : -1; \
const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
char *buf = (char *) irb->pfMap/*XXX use region->map*/ + \
(intel->drawY * pitch + intel->drawX) * irb->region->cpp;
/* 16 bit depthbuffer functions.
*/
#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
/**
** 16-bit depthbuffer functions.
**/
#define WRITE_DEPTH( _x, _y, d ) \
*(GLushort *)(buf + (_x)*2 + (_y)*pitch) = d;
((GLushort *)buf)[(_x) + (_y) * pitch] = d;
#define READ_DEPTH( d, _x, _y ) \
d = *(GLushort *)(buf + (_x)*2 + (_y)*pitch);
d = ((GLushort *)buf)[(_x) + (_y) * pitch];
#define TAG(x) intel##x##_z16
#include "depthtmp.h"
#undef LOCAL_VARS
#define LOCAL_VARS \
intelContextPtr intel = INTEL_CONTEXT(ctx); \
__DRIdrawablePrivate *dPriv = intel->driDrawable; \
driRenderbuffer *drb = (driRenderbuffer *) rb; \
GLuint pitch = drb->pitch; \
GLuint height = dPriv->h; \
char *buf = (char *)drb->Base.Data + \
dPriv->x * drb->cpp + \
dPriv->y * pitch; \
GLuint p; \
(void) buf; (void) p
#undef INIT_MONO_PIXEL
#define INIT_MONO_PIXEL(p,color)\
p = INTEL_PACKCOLOR8888(color[0],color[1],color[2],color[3])
/* 32 bit, 8888 argb color spanline and pixel functions
*/
#define WRITE_RGBA(_x, _y, r, g, b, a) \
*(GLuint *)(buf + _x*4 + _y*pitch) = ((r << 16) | \
(g << 8) | \
(b << 0) | \
(a << 24) )
#define WRITE_PIXEL(_x, _y, p) \
*(GLuint *)(buf + _x*4 + _y*pitch) = p
#define READ_RGBA(rgba, _x, _y) \
do { \
GLuint p = *(GLuint *)(buf + _x*4 + _y*pitch); \
rgba[0] = (p >> 16) & 0xff; \
rgba[1] = (p >> 8) & 0xff; \
rgba[2] = (p >> 0) & 0xff; \
rgba[3] = (p >> 24) & 0xff; \
} while (0)
#define TAG(x) intel##x##_8888
#include "spantmp.h"
/* 24/8 bit interleaved depth/stencil functions
*/
#define WRITE_DEPTH( _x, _y, d ) { \
GLuint tmp = *(GLuint *)(buf + (_x)*4 + (_y)*pitch); \
tmp &= 0xff000000; \
tmp |= (d) & 0xffffff; \
*(GLuint *)(buf + (_x)*4 + (_y)*pitch) = tmp; \
/**
** 24/8-bit interleaved depth/stencil functions
** Note: we're actually reading back combined depth+stencil values.
** The wrappers in main/depthstencil.c are used to extract the depth
** and stencil values.
**/
/* Change ZZZS -> SZZZ */
#define WRITE_DEPTH( _x, _y, d ) { \
GLuint tmp = ((d) >> 8) | ((d) << 24); \
((GLuint *)buf)[(_x) + (_y) * pitch] = tmp; \
}
#define READ_DEPTH( d, _x, _y ) \
d = *(GLuint *)(buf + (_x)*4 + (_y)*pitch) & 0xffffff;
/* Change SZZZ -> ZZZS */
#define READ_DEPTH( d, _x, _y ) { \
GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
d = (tmp << 8) | (tmp >> 24); \
}
#define TAG(x) intel##x##_z24_s8
#include "depthtmp.h"
#define WRITE_STENCIL( _x, _y, d ) { \
GLuint tmp = *(GLuint *)(buf + (_x)*4 + (_y)*pitch); \
tmp &= 0xffffff; \
tmp |= ((d)<<24); \
*(GLuint *)(buf + (_x)*4 + (_y)*pitch) = tmp; \
/**
** 8-bit stencil function (XXX FBO: This is obsolete)
**/
#define WRITE_STENCIL( _x, _y, d ) { \
GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
tmp &= 0xffffff; \
tmp |= ((d) << 24); \
((GLuint *) buf)[(_x) + (_y) * pitch] = tmp; \
}
#define READ_STENCIL( d, _x, _y ) \
d = *(GLuint *)(buf + (_x)*4 + (_y)*pitch) >> 24;
#define READ_STENCIL( d, _x, _y ) \
d = ((GLuint *)buf)[(_x) + (_y) * pitch] >> 24;
#define TAG(x) intel##x##_z24_s8
#include "stenciltmp.h"
/* Move locking out to get reasonable span performance.
/**
* Map or unmap all the renderbuffers which we may need during
* software rendering.
* XXX in the future, we could probably convey extra information to
* reduce the number of mappings needed. I.e. if doing a glReadPixels
* from the depth buffer, we really only need one mapping.
*
* XXX Rewrite this function someday.
* We can probably just loop over all the renderbuffer attachments,
* map/unmap all of them, and not worry about the _ColorDrawBuffers
* _ColorReadBuffer, _DepthBuffer or _StencilBuffer fields.
*/
static void
intel_map_unmap_buffers(struct intel_context *intel, GLboolean map)
{
GLcontext *ctx = &intel->ctx;
GLuint i, j;
struct intel_renderbuffer *irb;
/* color draw buffers */
for (i = 0; i < ctx->Const.MaxDrawBuffers; i++) {
for (j = 0; j < ctx->DrawBuffer->_NumColorDrawBuffers[i]; j++) {
struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i][j];
irb = intel_renderbuffer(rb);
if (irb) {
/* this is a user-created intel_renderbuffer */
if (irb->region) {
if (map)
intel_region_map(intel, irb->region);
else
intel_region_unmap(intel, irb->region);
}
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
}
}
/* check for render to textures */
for (i = 0; i < BUFFER_COUNT; i++) {
struct gl_renderbuffer_attachment *att = ctx->DrawBuffer->Attachment + i;
struct gl_texture_object *tex = att->Texture;
if (tex) {
/* render to texture */
ASSERT(att->Renderbuffer);
if (map) {
struct gl_texture_image *texImg;
texImg = tex->Image[att->CubeMapFace][att->TextureLevel];
intel_tex_map_images(intel, intel_texture_object(tex));
}
else {
intel_tex_unmap_images(intel, intel_texture_object(tex));
}
}
}
/* color read buffers */
irb = intel_renderbuffer(ctx->ReadBuffer->_ColorReadBuffer);
if (irb && irb->region) {
if (map)
intel_region_map(intel, irb->region);
else
intel_region_unmap(intel, irb->region);
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
/* Account for front/back color page flipping.
* The span routines use the pfMap and pfPitch fields which will
* swap the front/back region map/pitch if we're page flipped.
* Do this after mapping, above, so the map field is valid.
*/
#if 0
if (map && ctx->DrawBuffer->Name == 0) {
struct intel_renderbuffer *irbFront
= intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_FRONT_LEFT);
struct intel_renderbuffer *irbBack
= intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_BACK_LEFT);
if (irbBack) {
/* double buffered */
if (intel->sarea->pf_current_page == 0) {
irbFront->pfMap = irbFront->region->map;
irbFront->pfPitch = irbFront->region->pitch;
irbBack->pfMap = irbBack->region->map;
irbBack->pfPitch = irbBack->region->pitch;
}
else {
irbFront->pfMap = irbBack->region->map;
irbFront->pfPitch = irbBack->region->pitch;
irbBack->pfMap = irbFront->region->map;
irbBack->pfPitch = irbFront->region->pitch;
}
}
}
#endif
/* depth buffer (Note wrapper!) */
if (ctx->DrawBuffer->_DepthBuffer) {
irb = intel_renderbuffer(ctx->DrawBuffer->_DepthBuffer->Wrapped);
if (irb && irb->region && irb->Base.Name != 0) {
if (map) {
intel_region_map(intel, irb->region);
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
else {
intel_region_unmap(intel, irb->region);
irb->pfMap = NULL;
irb->pfPitch = 0;
}
}
}
/* stencil buffer (Note wrapper!) */
if (ctx->DrawBuffer->_StencilBuffer) {
irb = intel_renderbuffer(ctx->DrawBuffer->_StencilBuffer->Wrapped);
if (irb && irb->region && irb->Base.Name != 0) {
if (map) {
intel_region_map(intel, irb->region);
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
else {
intel_region_unmap(intel, irb->region);
irb->pfMap = NULL;
irb->pfPitch = 0;
}
}
}
}
/**
* Prepare for softare rendering. Map current read/draw framebuffers'
* renderbuffes and all currently bound texture objects.
*
* Old note: Moved locking out to get reasonable span performance.
*/
void intelSpanRenderStart( GLcontext *ctx )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
GLuint i;
intelFlush(&intel->ctx);
LOCK_HARDWARE(intel);
intelWaitForIdle(intel);
#if 0
/* Just map the framebuffer and all textures. Bufmgr code will
* take care of waiting on the necessary fences:
*/
intel_region_map(intel, intel->front_region);
intel_region_map(intel, intel->back_region);
intel_region_map(intel, intel->depth_region);
#endif
for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
if (ctx->Texture.Unit[i]._ReallyEnabled) {
struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
intel_tex_map_images(intel, intel_texture_object(texObj));
}
}
intel_map_unmap_buffers(intel, GL_TRUE);
}
/**
* Called when done softare rendering. Unmap the buffers we mapped in
* the above function.
*/
void intelSpanRenderFinish( GLcontext *ctx )
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
struct intel_context *intel = intel_context( ctx );
GLuint i;
_swrast_flush( ctx );
/* Now unmap the framebuffer:
*/
#if 0
intel_region_unmap(intel, intel->front_region);
intel_region_unmap(intel, intel->back_region);
intel_region_unmap(intel, intel->depth_region);
#endif
for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
if (ctx->Texture.Unit[i]._ReallyEnabled) {
struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
intel_tex_unmap_images(intel, intel_texture_object(texObj));
}
}
intel_map_unmap_buffers(intel, GL_FALSE);
UNLOCK_HARDWARE( intel );
}
void intelInitSpanFuncs( GLcontext *ctx )
{
struct swrast_device_driver *swdd = _swrast_GetDeviceDriverReference(ctx);
@ -227,32 +373,31 @@ void intelInitSpanFuncs( GLcontext *ctx )
/**
* Plug in the Get/Put routines for the given driRenderbuffer.
* Plug in appropriate span read/write functions for the given renderbuffer.
* These are used for the software fallbacks.
*/
void
intelSetSpanFunctions(driRenderbuffer *drb, const GLvisual *vis)
intel_set_span_functions(struct gl_renderbuffer *rb)
{
if (drb->Base.InternalFormat == GL_RGBA) {
if (vis->redBits == 5 && vis->greenBits == 5 && vis->blueBits == 5) {
intelInitPointers_555(&drb->Base);
}
else if (vis->redBits == 5 && vis->greenBits == 6 && vis->blueBits == 5) {
intelInitPointers_565(&drb->Base);
}
else {
assert(vis->redBits == 8);
assert(vis->greenBits == 8);
assert(vis->blueBits == 8);
intelInitPointers_8888(&drb->Base);
}
if (rb->_ActualFormat == GL_RGB5) {
/* 565 RGB */
intelInitPointers_RGB565(rb);
}
else if (drb->Base.InternalFormat == GL_DEPTH_COMPONENT16) {
intelInitDepthPointers_z16(&drb->Base);
else if (rb->_ActualFormat == GL_RGBA8) {
/* 8888 RGBA */
intelInitPointers_ARGB8888(rb);
}
else if (drb->Base.InternalFormat == GL_DEPTH_COMPONENT24) {
intelInitDepthPointers_z24_s8(&drb->Base);
else if (rb->_ActualFormat == GL_DEPTH_COMPONENT16) {
intelInitDepthPointers_z16(rb);
}
else if (drb->Base.InternalFormat == GL_STENCIL_INDEX8_EXT) {
intelInitStencilPointers_z24_s8(&drb->Base);
else if (rb->_ActualFormat == GL_DEPTH_COMPONENT24 || /* XXX FBO remove */
rb->_ActualFormat == GL_DEPTH24_STENCIL8_EXT) {
intelInitDepthPointers_z24_s8(rb);
}
else if (rb->_ActualFormat == GL_STENCIL_INDEX8_EXT) { /* XXX FBO remove */
intelInitStencilPointers_z24_s8(rb);
}
else {
_mesa_problem(NULL, "Unexpected _ActualFormat in intelSetSpanFunctions");
}
}

View file

@ -28,14 +28,12 @@
#ifndef _INTEL_SPAN_H
#define _INTEL_SPAN_H
#include "drirenderbuffer.h"
extern void intelInitSpanFuncs( GLcontext *ctx );
extern void intelSpanRenderFinish( GLcontext *ctx );
extern void intelSpanRenderStart( GLcontext *ctx );
extern void
intelSetSpanFunctions(driRenderbuffer *rb, const GLvisual *vis);
intel_set_span_functions(struct gl_renderbuffer *rb);
#endif

View file

@ -30,10 +30,13 @@
#include "context.h"
#include "macros.h"
#include "enums.h"
#include "colormac.h"
#include "dd.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_fbo.h"
#include "intel_regions.h"
#include "swrast/swrast.h"
int intel_translate_compare_func( GLenum func )
@ -164,87 +167,67 @@ int intel_translate_logic_op( GLenum opcode )
}
}
static void intelDrawBuffer(GLcontext *ctx, GLenum mode )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
int front = 0;
if (!ctx->DrawBuffer)
return;
switch ( ctx->DrawBuffer->_ColorDrawBufferMask[0] ) {
case BUFFER_BIT_FRONT_LEFT:
front = 1;
FALLBACK( intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE );
break;
case BUFFER_BIT_BACK_LEFT:
front = 0;
FALLBACK( intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE );
break;
default:
FALLBACK( intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE );
return;
}
if ( intel->sarea->pf_current_page == 1 )
front ^= 1;
intelSetFrontClipRects( intel );
if (front) {
intel->drawRegion = &intel->intelScreen->front;
intel->readRegion = &intel->intelScreen->front;
} else {
intel->drawRegion = &intel->intelScreen->back;
intel->readRegion = &intel->intelScreen->back;
}
intel->vtbl.set_color_region( intel, intel->drawRegion );
}
static void intelReadBuffer( GLcontext *ctx, GLenum mode )
{
/* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
}
static void intelClearColor(GLcontext *ctx, const GLfloat color[4])
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
intelScreenPrivate *screen = intel->intelScreen;
struct intel_context *intel = intel_context(ctx);
GLubyte clear[4];
CLAMPED_FLOAT_TO_UBYTE(intel->clear_red, color[0]);
CLAMPED_FLOAT_TO_UBYTE(intel->clear_green, color[1]);
CLAMPED_FLOAT_TO_UBYTE(intel->clear_blue, color[2]);
CLAMPED_FLOAT_TO_UBYTE(intel->clear_alpha, color[3]);
CLAMPED_FLOAT_TO_UBYTE(clear[0], color[0]);
CLAMPED_FLOAT_TO_UBYTE(clear[1], color[1]);
CLAMPED_FLOAT_TO_UBYTE(clear[2], color[2]);
CLAMPED_FLOAT_TO_UBYTE(clear[3], color[3]);
intel->ClearColor = INTEL_PACKCOLOR(screen->fbFormat,
intel->clear_red,
intel->clear_green,
intel->clear_blue,
intel->clear_alpha);
/* compute both 32 and 16-bit clear values */
intel->ClearColor8888 = INTEL_PACKCOLOR8888(clear[0], clear[1],
clear[2], clear[3]);
intel->ClearColor565 = INTEL_PACKCOLOR565(clear[0], clear[1], clear[2]);
}
/**
* Update the viewport transformation matrix. Depends on:
* - viewport pos/size
* - depthrange
* - window pos/size or FBO size
*/
static void intelCalcViewport( GLcontext *ctx )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
const GLfloat *v = ctx->Viewport._WindowMap.m;
const GLfloat depthScale = 1.0F / ctx->DrawBuffer->_DepthMaxF;
GLfloat *m = intel->ViewportMatrix.m;
GLint h = 0;
GLfloat yScale, yBias;
if (intel->driDrawable)
h = intel->driDrawable->h + SUBPIXEL_Y;
if (ctx->DrawBuffer->Name) {
/* User created FBO */
struct intel_renderbuffer *irb
= intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[0][0]);
if (irb && !irb->RenderToTexture) {
/* y=0=top */
yScale = -1.0;
yBias = irb->Base.Height;
}
else {
/* y=0=bottom */
yScale = 1.0;
yBias = 0.0;
}
}
else {
/* window buffer, y=0=top */
yScale = -1.0;
yBias = (intel->driDrawable) ? intel->driDrawable->h : 0.0F;
}
/* See also intel_translate_vertex. SUBPIXEL adjustments can be done
* via state vars, too.
*/
m[MAT_SX] = v[MAT_SX];
m[MAT_TX] = v[MAT_TX] + SUBPIXEL_X;
m[MAT_SY] = - v[MAT_SY];
m[MAT_TY] = - v[MAT_TY] + h;
m[MAT_SZ] = v[MAT_SZ] * intel->depth_scale;
m[MAT_TZ] = v[MAT_TZ] * intel->depth_scale;
m[MAT_SX] = v[MAT_SX];
m[MAT_TX] = v[MAT_TX] + SUBPIXEL_X;
m[MAT_SY] = v[MAT_SY] * yScale;
m[MAT_TY] = v[MAT_TY] * yScale + yBias + SUBPIXEL_Y;
m[MAT_SZ] = v[MAT_SZ] * depthScale;
m[MAT_TZ] = v[MAT_TZ] * depthScale;
}
static void intelViewport( GLcontext *ctx,
@ -264,18 +247,112 @@ static void intelDepthRange( GLcontext *ctx,
*/
static void intelRenderMode( GLcontext *ctx, GLenum mode )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
FALLBACK( intel, INTEL_FALLBACK_RENDERMODE, (mode != GL_RENDER) );
}
void intelInitStateFuncs( struct dd_function_table *functions )
{
functions->DrawBuffer = intelDrawBuffer;
functions->ReadBuffer = intelReadBuffer;
functions->RenderMode = intelRenderMode;
functions->Viewport = intelViewport;
functions->DepthRange = intelDepthRange;
functions->ClearColor = intelClearColor;
}
void intelInitState( GLcontext *ctx )
{
/* Mesa should do this for us:
*/
ctx->Driver.AlphaFunc( ctx,
ctx->Color.AlphaFunc,
ctx->Color.AlphaRef);
ctx->Driver.BlendColor( ctx,
ctx->Color.BlendColor );
ctx->Driver.BlendEquationSeparate( ctx,
ctx->Color.BlendEquationRGB,
ctx->Color.BlendEquationA);
ctx->Driver.BlendFuncSeparate( ctx,
ctx->Color.BlendSrcRGB,
ctx->Color.BlendDstRGB,
ctx->Color.BlendSrcA,
ctx->Color.BlendDstA);
ctx->Driver.ColorMask( ctx,
ctx->Color.ColorMask[RCOMP],
ctx->Color.ColorMask[GCOMP],
ctx->Color.ColorMask[BCOMP],
ctx->Color.ColorMask[ACOMP]);
ctx->Driver.CullFace( ctx, ctx->Polygon.CullFaceMode );
ctx->Driver.DepthFunc( ctx, ctx->Depth.Func );
ctx->Driver.DepthMask( ctx, ctx->Depth.Mask );
ctx->Driver.Enable( ctx, GL_ALPHA_TEST, ctx->Color.AlphaEnabled );
ctx->Driver.Enable( ctx, GL_BLEND, ctx->Color.BlendEnabled );
ctx->Driver.Enable( ctx, GL_COLOR_LOGIC_OP, ctx->Color.ColorLogicOpEnabled );
ctx->Driver.Enable( ctx, GL_COLOR_SUM, ctx->Fog.ColorSumEnabled );
ctx->Driver.Enable( ctx, GL_CULL_FACE, ctx->Polygon.CullFlag );
ctx->Driver.Enable( ctx, GL_DEPTH_TEST, ctx->Depth.Test );
ctx->Driver.Enable( ctx, GL_DITHER, ctx->Color.DitherFlag );
ctx->Driver.Enable( ctx, GL_FOG, ctx->Fog.Enabled );
ctx->Driver.Enable( ctx, GL_LIGHTING, ctx->Light.Enabled );
ctx->Driver.Enable( ctx, GL_LINE_SMOOTH, ctx->Line.SmoothFlag );
ctx->Driver.Enable( ctx, GL_POLYGON_STIPPLE, ctx->Polygon.StippleFlag );
ctx->Driver.Enable( ctx, GL_SCISSOR_TEST, ctx->Scissor.Enabled );
ctx->Driver.Enable( ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled );
ctx->Driver.Enable( ctx, GL_TEXTURE_1D, GL_FALSE );
ctx->Driver.Enable( ctx, GL_TEXTURE_2D, GL_FALSE );
ctx->Driver.Enable( ctx, GL_TEXTURE_RECTANGLE_NV, GL_FALSE );
ctx->Driver.Enable( ctx, GL_TEXTURE_3D, GL_FALSE );
ctx->Driver.Enable( ctx, GL_TEXTURE_CUBE_MAP, GL_FALSE );
ctx->Driver.Fogfv( ctx, GL_FOG_COLOR, ctx->Fog.Color );
ctx->Driver.Fogfv( ctx, GL_FOG_MODE, 0 );
ctx->Driver.Fogfv( ctx, GL_FOG_DENSITY, &ctx->Fog.Density );
ctx->Driver.Fogfv( ctx, GL_FOG_START, &ctx->Fog.Start );
ctx->Driver.Fogfv( ctx, GL_FOG_END, &ctx->Fog.End );
ctx->Driver.FrontFace( ctx, ctx->Polygon.FrontFace );
{
GLfloat f = (GLfloat)ctx->Light.Model.ColorControl;
ctx->Driver.LightModelfv( ctx, GL_LIGHT_MODEL_COLOR_CONTROL, &f );
}
ctx->Driver.LineWidth( ctx, ctx->Line.Width );
ctx->Driver.LogicOpcode( ctx, ctx->Color.LogicOp );
ctx->Driver.PointSize( ctx, ctx->Point.Size );
ctx->Driver.PolygonStipple( ctx, (const GLubyte *)ctx->PolygonStipple );
ctx->Driver.Scissor( ctx, ctx->Scissor.X, ctx->Scissor.Y,
ctx->Scissor.Width, ctx->Scissor.Height );
ctx->Driver.ShadeModel( ctx, ctx->Light.ShadeModel );
ctx->Driver.StencilFuncSeparate( ctx, GL_FRONT,
ctx->Stencil.Function[0],
ctx->Stencil.Ref[0],
ctx->Stencil.ValueMask[0] );
ctx->Driver.StencilFuncSeparate( ctx, GL_BACK,
ctx->Stencil.Function[1],
ctx->Stencil.Ref[1],
ctx->Stencil.ValueMask[1] );
ctx->Driver.StencilMaskSeparate( ctx, GL_FRONT, ctx->Stencil.WriteMask[0] );
ctx->Driver.StencilMaskSeparate( ctx, GL_BACK, ctx->Stencil.WriteMask[1] );
ctx->Driver.StencilOpSeparate( ctx, GL_FRONT,
ctx->Stencil.FailFunc[0],
ctx->Stencil.ZFailFunc[0],
ctx->Stencil.ZPassFunc[0]);
ctx->Driver.StencilOpSeparate( ctx, GL_BACK,
ctx->Stencil.FailFunc[1],
ctx->Stencil.ZFailFunc[1],
ctx->Stencil.ZPassFunc[1]);
/* XXX this isn't really needed */
ctx->Driver.DrawBuffer( ctx, ctx->Color.DrawBuffer[0] );
}

File diff suppressed because it is too large Load diff

View file

@ -35,11 +35,101 @@
void intelInitTextureFuncs( struct dd_function_table *functions );
void intelDestroyTexObj( intelContextPtr intel, intelTextureObjectPtr t );
int intelUploadTexImages( intelContextPtr intel, intelTextureObjectPtr t,
GLuint face );
const struct gl_texture_format *
intelChooseTextureFormat( GLcontext *ctx, GLint internalFormat,
GLenum format, GLenum type );
void intelTexImage3D(GLcontext *ctx,
GLenum target, GLint level,
GLint internalFormat,
GLint width, GLint height, GLint depth,
GLint border,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelTexSubImage3D(GLcontext *ctx,
GLenum target,
GLint level,
GLint xoffset, GLint yoffset, GLint zoffset,
GLsizei width, GLsizei height, GLsizei depth,
GLenum format, GLenum type,
const GLvoid *pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelTexImage2D(GLcontext *ctx,
GLenum target, GLint level,
GLint internalFormat,
GLint width, GLint height, GLint border,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelTexSubImage2D(GLcontext *ctx,
GLenum target,
GLint level,
GLint xoffset, GLint yoffset,
GLsizei width, GLsizei height,
GLenum format, GLenum type,
const GLvoid *pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelTexImage1D(GLcontext *ctx,
GLenum target, GLint level,
GLint internalFormat,
GLint width, GLint border,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelTexSubImage1D(GLcontext *ctx,
GLenum target,
GLint level,
GLint xoffset,
GLsizei width,
GLenum format, GLenum type,
const GLvoid *pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelCopyTexImage1D( GLcontext *ctx, GLenum target, GLint level,
GLenum internalFormat,
GLint x, GLint y, GLsizei width,
GLint border );
void intelCopyTexImage2D( GLcontext *ctx, GLenum target, GLint level,
GLenum internalFormat,
GLint x, GLint y, GLsizei width, GLsizei height,
GLint border );
void intelCopyTexSubImage1D( GLcontext *ctx, GLenum target, GLint level,
GLint xoffset,
GLint x, GLint y, GLsizei width );
void intelCopyTexSubImage2D( GLcontext *ctx, GLenum target, GLint level,
GLint xoffset, GLint yoffset,
GLint x, GLint y, GLsizei width, GLsizei height );
void intelGetTexImage( GLcontext *ctx, GLenum target, GLint level,
GLenum format, GLenum type, GLvoid *pixels,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage );
GLuint intel_finalize_mipmap_tree( struct intel_context *intel, GLuint unit );
void intel_tex_map_images( struct intel_context *intel,
struct intel_texture_object *intelObj );
void intel_tex_unmap_images( struct intel_context *intel,
struct intel_texture_object *intelObj );
GLboolean
intel_driReinitTextureHeap( driTexHeap *heap,
unsigned size );
#endif

View file

@ -38,14 +38,95 @@
#include "tnl/t_vertex.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_tris.h"
#include "intel_batchbuffer.h"
#include "intel_reg.h"
#include "intel_span.h"
#include "intel_tex.h"
static void intelRenderPrimitive( GLcontext *ctx, GLenum prim );
static void intelRasterPrimitive( GLcontext *ctx, GLenum rprim, GLuint hwprim );
/*
*/
static void intel_flush_inline_primitive( struct intel_context *intel )
{
GLuint used = intel->batch->ptr - intel->prim.start_ptr;
assert(intel->prim.primitive != ~0);
if (used < 8)
goto do_discard;
*(int *)intel->prim.start_ptr = (_3DPRIMITIVE |
intel->prim.primitive |
(used/4-2));
goto finished;
do_discard:
intel->batch->ptr -= used;
finished:
intel->prim.primitive = ~0;
intel->prim.start_ptr = 0;
intel->prim.flush = 0;
}
/* Emit a primitive referencing vertices in a vertex buffer.
*/
void intelStartInlinePrimitive( struct intel_context *intel,
GLuint prim,
GLuint batch_flags )
{
BATCH_LOCALS;
/* Emit a slot which will be filled with the inline primitive
* command later.
*/
BEGIN_BATCH(2, batch_flags);
OUT_BATCH( 0 );
intel->prim.start_ptr = intel->batch->ptr;
intel->prim.primitive = prim;
intel->prim.flush = intel_flush_inline_primitive;
OUT_BATCH( 0 );
ADVANCE_BATCH();
}
void intelWrapInlinePrimitive( struct intel_context *intel )
{
GLuint prim = intel->prim.primitive;
GLuint batchflags = intel->batch->flags;
intel_flush_inline_primitive(intel);
intel_batchbuffer_flush(intel->batch);
intel->vtbl.emit_state( intel );
intelStartInlinePrimitive( intel, prim, batchflags ); /* ??? */
}
GLuint *intelExtendInlinePrimitive( struct intel_context *intel,
GLuint dwords )
{
GLuint sz = dwords * sizeof(GLuint);
GLuint *ptr;
if (intel_batchbuffer_space(intel->batch) < sz)
intelWrapInlinePrimitive( intel );
ptr = (GLuint *)intel->batch->ptr;
intel->batch->ptr += sz;
return ptr;
}
/***********************************************************************
* Emit primitives as inline vertices *
***********************************************************************/
@ -63,22 +144,18 @@ do { \
#else
#define COPY_DWORDS( j, vb, vertsize, v ) \
do { \
if (0) fprintf(stderr, "\n"); \
for ( j = 0 ; j < vertsize ; j++ ) { \
if (0) fprintf(stderr, " -- v(%d): %x/%f\n",j, \
((GLuint *)v)[j], \
((GLfloat *)v)[j]); \
vb[j] = ((GLuint *)v)[j]; \
} \
vb += vertsize; \
} while (0)
#endif
static void __inline__ intel_draw_quad( intelContextPtr intel,
intelVertexPtr v0,
intelVertexPtr v1,
intelVertexPtr v2,
intelVertexPtr v3 )
static void intel_draw_quad( struct intel_context *intel,
intelVertexPtr v0,
intelVertexPtr v1,
intelVertexPtr v2,
intelVertexPtr v3 )
{
GLuint vertsize = intel->vertex_size;
GLuint *vb = intelExtendInlinePrimitive( intel, 6 * vertsize );
@ -92,10 +169,10 @@ static void __inline__ intel_draw_quad( intelContextPtr intel,
COPY_DWORDS( j, vb, vertsize, v3 );
}
static void __inline__ intel_draw_triangle( intelContextPtr intel,
intelVertexPtr v0,
intelVertexPtr v1,
intelVertexPtr v2 )
static void intel_draw_triangle( struct intel_context *intel,
intelVertexPtr v0,
intelVertexPtr v1,
intelVertexPtr v2 )
{
GLuint vertsize = intel->vertex_size;
GLuint *vb = intelExtendInlinePrimitive( intel, 3 * vertsize );
@ -107,9 +184,9 @@ static void __inline__ intel_draw_triangle( intelContextPtr intel,
}
static __inline__ void intel_draw_line( intelContextPtr intel,
intelVertexPtr v0,
intelVertexPtr v1 )
static void intel_draw_line( struct intel_context *intel,
intelVertexPtr v0,
intelVertexPtr v1 )
{
GLuint vertsize = intel->vertex_size;
GLuint *vb = intelExtendInlinePrimitive( intel, 2 * vertsize );
@ -120,8 +197,8 @@ static __inline__ void intel_draw_line( intelContextPtr intel,
}
static __inline__ void intel_draw_point( intelContextPtr intel,
intelVertexPtr v0 )
static void intel_draw_point( struct intel_context *intel,
intelVertexPtr v0 )
{
GLuint vertsize = intel->vertex_size;
GLuint *vb = intelExtendInlinePrimitive( intel, vertsize );
@ -140,7 +217,7 @@ static __inline__ void intel_draw_point( intelContextPtr intel,
* Fixup for ARB_point_parameters *
***********************************************************************/
static void intel_atten_point( intelContextPtr intel, intelVertexPtr v0 )
static void intel_atten_point( struct intel_context *intel, intelVertexPtr v0 )
{
GLcontext *ctx = &intel->ctx;
GLfloat psz[4], col[4], restore_psz, restore_alpha;
@ -189,7 +266,7 @@ static void intel_atten_point( intelContextPtr intel, intelVertexPtr v0 )
static void intel_wpos_triangle( intelContextPtr intel,
static void intel_wpos_triangle( struct intel_context *intel,
intelVertexPtr v0,
intelVertexPtr v1,
intelVertexPtr v2 )
@ -205,7 +282,7 @@ static void intel_wpos_triangle( intelContextPtr intel,
}
static void intel_wpos_line( intelContextPtr intel,
static void intel_wpos_line( struct intel_context *intel,
intelVertexPtr v0,
intelVertexPtr v1 )
{
@ -219,7 +296,7 @@ static void intel_wpos_line( intelContextPtr intel,
}
static void intel_wpos_point( intelContextPtr intel,
static void intel_wpos_point( struct intel_context *intel,
intelVertexPtr v0 )
{
GLuint offset = intel->wpos_offset;
@ -349,7 +426,7 @@ do { \
#define VERT_RESTORE_SPEC( idx ) if (specoffset) v[idx]->ui[specoffset] = spec[idx]
#define LOCAL_VARS(n) \
intelContextPtr intel = INTEL_CONTEXT(ctx); \
struct intel_context *intel = intel_context(ctx); \
GLuint color[n], spec[n]; \
GLuint coloroffset = intel->coloroffset; \
GLboolean specoffset = intel->specoffset; \
@ -481,7 +558,7 @@ static void init_rast_tab( void )
* primitives.
*/
static void
intel_fallback_tri( intelContextPtr intel,
intel_fallback_tri( struct intel_context *intel,
intelVertex *v0,
intelVertex *v1,
intelVertex *v2 )
@ -491,6 +568,9 @@ intel_fallback_tri( intelContextPtr intel,
if (0)
fprintf(stderr, "\n%s\n", __FUNCTION__);
if (intel->prim.flush)
intel->prim.flush(intel);
_swsetup_Translate( ctx, v0, &v[0] );
_swsetup_Translate( ctx, v1, &v[1] );
@ -502,7 +582,7 @@ intel_fallback_tri( intelContextPtr intel,
static void
intel_fallback_line( intelContextPtr intel,
intel_fallback_line( struct intel_context *intel,
intelVertex *v0,
intelVertex *v1 )
{
@ -512,6 +592,9 @@ intel_fallback_line( intelContextPtr intel,
if (0)
fprintf(stderr, "\n%s\n", __FUNCTION__);
if (intel->prim.flush)
intel->prim.flush(intel);
_swsetup_Translate( ctx, v0, &v[0] );
_swsetup_Translate( ctx, v1, &v[1] );
intelSpanRenderStart( ctx );
@ -520,24 +603,6 @@ intel_fallback_line( intelContextPtr intel,
}
static void
intel_fallback_point( intelContextPtr intel,
intelVertex *v0 )
{
GLcontext *ctx = &intel->ctx;
SWvertex v[1];
if (0)
fprintf(stderr, "\n%s\n", __FUNCTION__);
_swsetup_Translate( ctx, v0, &v[0] );
intelSpanRenderStart( ctx );
_swrast_Point( ctx, &v[0] );
intelSpanRenderFinish( ctx );
}
/**********************************************************************/
/* Render unclipped begin/end objects */
/**********************************************************************/
@ -552,7 +617,7 @@ intel_fallback_point( intelContextPtr intel,
#define INIT(x) intelRenderPrimitive( ctx, x )
#undef LOCAL_VARS
#define LOCAL_VARS \
intelContextPtr intel = INTEL_CONTEXT(ctx); \
struct intel_context *intel = intel_context(ctx); \
GLubyte *vertptr = (GLubyte *)intel->verts; \
const GLuint vertsize = intel->vertex_size; \
const GLuint * const elt = TNL_CONTEXT(ctx)->vb.Elts; \
@ -578,7 +643,7 @@ intel_fallback_point( intelContextPtr intel,
static void intelRenderClippedPoly( GLcontext *ctx, const GLuint *elts,
GLuint n )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
GLuint prim = intel->render_primitive;
@ -609,7 +674,7 @@ static void intelRenderClippedLine( GLcontext *ctx, GLuint ii, GLuint jj )
static void intelFastRenderClippedPoly( GLcontext *ctx, const GLuint *elts,
GLuint n )
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
struct intel_context *intel = intel_context( ctx );
const GLuint vertsize = intel->vertex_size;
GLuint *vb = intelExtendInlinePrimitive( intel, (n-2) * 3 * vertsize );
GLubyte *vertptr = (GLubyte *)intel->verts;
@ -630,17 +695,13 @@ static void intelFastRenderClippedPoly( GLcontext *ctx, const GLuint *elts,
#define POINT_FALLBACK (0)
#define LINE_FALLBACK (DD_LINE_STIPPLE)
#define TRI_FALLBACK (0)
#define ANY_FALLBACK_FLAGS (POINT_FALLBACK|LINE_FALLBACK|TRI_FALLBACK|\
DD_TRI_STIPPLE|DD_POINT_ATTEN)
#define ANY_RASTER_FLAGS (DD_TRI_LIGHT_TWOSIDE|DD_TRI_OFFSET|DD_TRI_UNFILLED)
#define ANY_FALLBACK_FLAGS (DD_LINE_STIPPLE | DD_TRI_STIPPLE | DD_POINT_ATTEN)
#define ANY_RASTER_FLAGS (DD_TRI_LIGHT_TWOSIDE | DD_TRI_OFFSET | DD_TRI_UNFILLED)
void intelChooseRenderState(GLcontext *ctx)
{
TNLcontext *tnl = TNL_CONTEXT(ctx);
intelContextPtr intel = INTEL_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
GLuint flags = ctx->_TriangleCaps;
const struct gl_fragment_program *fprog = ctx->FragmentProgram._Current;
GLboolean have_wpos = (fprog && (fprog->Base.InputsRead & FRAG_BIT_WPOS));
@ -676,15 +737,9 @@ void intelChooseRenderState(GLcontext *ctx)
*/
if (flags & ANY_FALLBACK_FLAGS)
{
if (flags & POINT_FALLBACK)
intel->draw_point = intel_fallback_point;
if (flags & LINE_FALLBACK)
if (flags & DD_LINE_STIPPLE)
intel->draw_line = intel_fallback_line;
if (flags & TRI_FALLBACK)
intel->draw_tri = intel_fallback_tri;
if ((flags & DD_TRI_STIPPLE) && !intel->hw_stipple)
intel->draw_tri = intel_fallback_tri;
@ -740,7 +795,7 @@ static const GLenum reduced_prim[GL_POLYGON+1] = {
static void intelRunPipeline( GLcontext *ctx )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
if (intel->NewGLState) {
if (intel->NewGLState & _NEW_TEXTURE) {
@ -760,13 +815,21 @@ static void intelRunPipeline( GLcontext *ctx )
static void intelRenderStart( GLcontext *ctx )
{
INTEL_CONTEXT(ctx)->vtbl.render_start( INTEL_CONTEXT(ctx) );
struct intel_context *intel = intel_context(ctx);
intel->vtbl.render_start( intel_context(ctx) );
intel->vtbl.emit_state( intel );
}
static void intelRenderFinish( GLcontext *ctx )
{
if (INTEL_CONTEXT(ctx)->RenderIndex & INTEL_FALLBACK_BIT)
struct intel_context *intel = intel_context(ctx);
if (intel->RenderIndex & INTEL_FALLBACK_BIT)
_swrast_flush( ctx );
if (intel->prim.flush)
intel->prim.flush(intel);
}
@ -777,7 +840,7 @@ static void intelRenderFinish( GLcontext *ctx )
*/
static void intelRasterPrimitive( GLcontext *ctx, GLenum rprim, GLuint hwprim )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
if (0)
fprintf(stderr, "%s %s %x\n", __FUNCTION__,
@ -787,110 +850,178 @@ static void intelRasterPrimitive( GLcontext *ctx, GLenum rprim, GLuint hwprim )
/* Start a new primitive. Arrange to have it flushed later on.
*/
if (hwprim != intel->prim.primitive)
intelStartInlinePrimitive( intel, hwprim );
if (hwprim != intel->prim.primitive) {
if (intel->prim.flush)
intel->prim.flush(intel);
intelStartInlinePrimitive( intel, hwprim, INTEL_BATCH_CLIPRECTS );
}
}
/*
*/
static void intelRenderPrimitive( GLcontext *ctx, GLenum prim )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
static void intelRenderPrimitive( GLcontext *ctx, GLenum prim )
{
struct intel_context *intel = intel_context(ctx);
if (0)
fprintf(stderr, "%s %s\n", __FUNCTION__, _mesa_lookup_enum_by_nr(prim));
if (0)
fprintf(stderr, "%s %s\n", __FUNCTION__, _mesa_lookup_enum_by_nr(prim));
/* Let some clipping routines know which primitive they're dealing
* with.
*/
intel->render_primitive = prim;
/* Let some clipping routines know which primitive they're dealing
* with.
*/
intel->render_primitive = prim;
/* Shortcircuit this when called from t_dd_rendertmp.h for unfilled
* triangles. The rasterized primitive will always be reset by
* lower level functions in that case, potentially pingponging the
* state:
*/
if (reduced_prim[prim] == GL_TRIANGLES &&
(ctx->_TriangleCaps & DD_TRI_UNFILLED))
return;
/* Shortcircuit this when called from t_dd_rendertmp.h for unfilled
* triangles. The rasterized primitive will always be reset by
* lower level functions in that case, potentially pingponging the
* state:
*/
if (reduced_prim[prim] == GL_TRIANGLES &&
(ctx->_TriangleCaps & DD_TRI_UNFILLED))
return;
/* Set some primitive-dependent state and Start? a new primitive.
*/
intelRasterPrimitive( ctx, reduced_prim[prim], hw_prim[prim] );
}
/* Set some primitive-dependent state and Start? a new primitive.
*/
intelRasterPrimitive( ctx, reduced_prim[prim], hw_prim[prim] );
}
/**********************************************************************/
/* Transition to/from hardware rasterization. */
/**********************************************************************/
static char *fallbackStrings[] = {
"Texture",
"Draw buffer",
"Read buffer",
"Color mask",
"Render mode",
"Stencil",
"Stipple",
"User disable"
};
static char *fallbackStrings[] = {
"Texture",
"Draw buffer",
"Read buffer",
"Color mask",
"Render mode",
"Stencil",
"Stipple",
"User disable"
};
static char *getFallbackString(GLuint bit)
{
int i = 0;
while (bit > 1) {
i++;
bit >>= 1;
}
return fallbackStrings[i];
}
static char *getFallbackString(GLuint bit)
{
int i = 0;
while (bit > 1) {
i++;
bit >>= 1;
}
return fallbackStrings[i];
}
void intelFallback( intelContextPtr intel, GLuint bit, GLboolean mode )
{
GLcontext *ctx = &intel->ctx;
TNLcontext *tnl = TNL_CONTEXT(ctx);
GLuint oldfallback = intel->Fallback;
void intelFallback( struct intel_context *intel, GLuint bit, GLboolean mode )
{
GLcontext *ctx = &intel->ctx;
TNLcontext *tnl = TNL_CONTEXT(ctx);
GLuint oldfallback = intel->Fallback;
if (mode) {
intel->Fallback |= bit;
if (oldfallback == 0) {
intelFlush(ctx);
if (INTEL_DEBUG & DEBUG_FALLBACKS)
fprintf(stderr, "ENTER FALLBACK %x: %s\n",
bit, getFallbackString( bit ));
_swsetup_Wakeup( ctx );
intel->RenderIndex = ~0;
}
}
else {
intel->Fallback &= ~bit;
if (oldfallback == bit) {
_swrast_flush( ctx );
if (INTEL_DEBUG & DEBUG_FALLBACKS)
fprintf(stderr, "LEAVE FALLBACK %s\n", getFallbackString( bit ));
tnl->Driver.Render.Start = intelRenderStart;
tnl->Driver.Render.PrimitiveNotify = intelRenderPrimitive;
tnl->Driver.Render.Finish = intelRenderFinish;
tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
tnl->Driver.Render.CopyPV = _tnl_copy_pv;
tnl->Driver.Render.Interp = _tnl_interp;
if (mode) {
intel->Fallback |= bit;
if (oldfallback == 0) {
intelFlush(ctx);
if (INTEL_DEBUG & DEBUG_FALLBACKS)
fprintf(stderr, "ENTER FALLBACK %x: %s\n",
bit, getFallbackString( bit ));
_swsetup_Wakeup( ctx );
intel->RenderIndex = ~0;
}
}
else {
intel->Fallback &= ~bit;
if (oldfallback == bit) {
_swrast_flush( ctx );
if (INTEL_DEBUG & DEBUG_FALLBACKS)
fprintf(stderr, "LEAVE FALLBACK %s\n", getFallbackString( bit ));
tnl->Driver.Render.Start = intelRenderStart;
tnl->Driver.Render.PrimitiveNotify = intelRenderPrimitive;
tnl->Driver.Render.Finish = intelRenderFinish;
tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
tnl->Driver.Render.CopyPV = _tnl_copy_pv;
tnl->Driver.Render.Interp = _tnl_interp;
_tnl_invalidate_vertex_state( ctx, ~0 );
_tnl_invalidate_vertices( ctx, ~0 );
_tnl_install_attrs( ctx,
intel->vertex_attrs,
intel->vertex_attr_count,
intel->ViewportMatrix.m, 0 );
_tnl_invalidate_vertex_state( ctx, ~0 );
_tnl_invalidate_vertices( ctx, ~0 );
_tnl_install_attrs( ctx,
intel->vertex_attrs,
intel->vertex_attr_count,
intel->ViewportMatrix.m, 0 );
intel->NewGLState |= _INTEL_NEW_RENDERSTATE;
}
}
}
intel->NewGLState |= _INTEL_NEW_RENDERSTATE;
}
}
}
union fi {
GLfloat f;
GLint i;
};
/**********************************************************************/
/* Used only with the metaops callbacks. */
/**********************************************************************/
void intel_meta_draw_poly(struct intel_context *intel,
GLuint n,
GLfloat xy[][2],
GLfloat z,
GLuint color,
GLfloat tex[][2])
{
union fi *vb;
GLint i;
intel->vtbl.emit_state( intel );
/* All 3d primitives should be emitted with INTEL_BATCH_CLIPRECTS,
* otherwise the drawing origin (DR4) might not be set correctly.
*/
intelStartInlinePrimitive( intel, PRIM3D_TRIFAN, INTEL_BATCH_CLIPRECTS );
vb = (union fi *)intelExtendInlinePrimitive( intel, n * 6 );
for (i = 0; i < n; i++) {
vb[0].f = xy[i][0];
vb[1].f = xy[i][1];
vb[2].f = z;
vb[3].i = color;
vb[4].f = tex[i][0];
vb[5].f = tex[i][1];
vb += 6;
}
if (intel->prim.flush)
intel->prim.flush(intel);
}
void intel_meta_draw_quad(struct intel_context *intel,
GLfloat x0, GLfloat x1,
GLfloat y0, GLfloat y1,
GLfloat z,
GLuint color,
GLfloat s0, GLfloat s1,
GLfloat t0, GLfloat t1)
{
GLfloat xy[4][2];
GLfloat tex[4][2];
xy[0][0] = x0; xy[0][1] = y0;
xy[1][0] = x1; xy[1][1] = y0;
xy[2][0] = x1; xy[2][1] = y1;
xy[3][0] = x0; xy[3][1] = y1;
tex[0][0] = s0; tex[0][1] = t0;
tex[1][0] = s1; tex[1][1] = t0;
tex[2][0] = s1; tex[2][1] = t1;
tex[3][0] = s0; tex[3][1] = t1;
intel_meta_draw_poly(intel, 4, xy, z, color, tex);
}

View file

@ -30,6 +30,8 @@
#include "mtypes.h"
#define _INTEL_NEW_RENDERSTATE (_DD_NEW_LINE_STIPPLE | \
_DD_NEW_TRI_UNFILLED | \
_DD_NEW_TRI_LIGHT_TWOSIDE | \
@ -40,7 +42,30 @@
extern void intelInitTriFuncs( GLcontext *ctx );
extern void intelPrintRenderState( const char *msg, GLuint state );
extern void intelChooseRenderState( GLcontext *ctx );
extern void intelStartInlinePrimitive( struct intel_context *intel, GLuint prim, GLuint flags );
extern void intelWrapInlinePrimitive( struct intel_context *intel );
GLuint *intelExtendInlinePrimitive( struct intel_context *intel,
GLuint dwords );
void intel_meta_draw_quad(struct intel_context *intel,
GLfloat x0, GLfloat x1,
GLfloat y0, GLfloat y1,
GLfloat z,
GLuint color,
GLfloat s0, GLfloat s1,
GLfloat t0, GLfloat t1);
void intel_meta_draw_poly(struct intel_context *intel,
GLuint n,
GLfloat xy[][2],
GLfloat z,
GLuint color,
GLfloat tex[][2]);
#endif