Merge branch 'master' of git+ssh://joukj@git.freedesktop.org/git/mesa/mesa

This commit is contained in:
joukj 2007-11-30 11:12:41 +01:00
commit 86f3135fbd
208 changed files with 11492 additions and 12092 deletions

View file

@ -355,6 +355,7 @@ GLW_FILES = \
$(DIRECTORY)/src/glw/*.[ch] \
$(DIRECTORY)/src/glw/Makefile* \
$(DIRECTORY)/src/glw/README \
$(DIRECTORY)/src/glw/glw.pc.in \
$(DIRECTORY)/src/glw/depend
DEMO_FILES = \
@ -424,7 +425,6 @@ DEPEND_FILES = \
$(TOP)/src/mesa/depend \
$(TOP)/src/glx/x11/depend \
$(TOP)/src/glw/depend \
$(TOP)/src/glw/glw.pc.in \
$(TOP)/src/glut/glx/depend \
$(TOP)/src/glu/sgi/depend

48
bin/confdiff.sh Executable file
View file

@ -0,0 +1,48 @@
#!/bin/bash -e
usage()
{
echo "Usage: $0 <target1> <target2>"
echo "Highlight differences between Mesa configs"
echo "Example:"
echo " $0 linux linux-x86"
}
die()
{
echo "$@" >&2
return 1
}
case "$1" in
-h|--help) usage; exit 0;;
esac
[ $# -lt 2 ] && die 2 targets needed. See $0 --help
target1=$1
target2=$2
topdir=$(cd "`dirname $0`"/..; pwd)
cd "$topdir"
[ -f "./configs/$target1" ] || die Missing configs/$target1
[ -f "./configs/$target2" ] || die Missing configs/$target2
trap 'rm -f "$t1" "$t2"' 0
t1=$(mktemp)
t2=$(mktemp)
make -f- -n -p <<EOF | sed '/^# Not a target/,/^$/d' > $t1
TOP = .
include \$(TOP)/configs/$target1
default:
EOF
make -f- -n -p <<EOF | sed '/^# Not a target/,/^$/d' > $t2
TOP = .
include \$(TOP)/configs/$target2
default:
EOF
diff -pu -I'^#' $t1 $t2

View file

@ -11,6 +11,10 @@ CXX = g++
CFLAGS = -O2 -DAIXV3
CXXFLAGS = -O2 -DAIXV3
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
MKLIB_OPTIONS = -arch aix-gcc
GL_LIB_DEPS = -lX11 -lXext -lm
GLU_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -l$(GL_LIB) -lm

View file

@ -39,6 +39,10 @@ ifeq ($(CPU), x86)
CXXFLAGS = $(CFLAGS)
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
LDFLAGS += -Xlinker
ifdef DEBUG

View file

@ -13,6 +13,10 @@ CXX = /bgl/BlueLight/ppcfloor/blrts-gnu/bin/powerpc-bgl-blrts-gnu-g++
CFLAGS = -O3 -ansi -pedantic -fPIC -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE
CXXFLAGS = -O3 -ansi -pedantic -fPIC -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURC
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
MKLIB_OPTIONS = -static
OSMESA_LIB_NAME = libOSMesa.a

View file

@ -10,6 +10,10 @@ CXX = cc
CFLAGS = -I/usr/X11R6/include -O3 -fPIC -fno-common -ffast-math -funroll-loops -fexpensive-optimizations -no-cpp-precomp -dynamic -Ddarwin
CXXFLAGS = -I/usr/X11R6/include -O3 -fPIC -fno-common -ffast-math -funroll-loops -fexpensive-optimizations -no-cpp-precomp -dynamic -Ddarwin
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
# Library names (actual file names)
GL_LIB_NAME = libGL.dylib
GLU_LIB_NAME = libGLU.dylib

View file

@ -9,6 +9,10 @@ CFLAGS = -I/usr/X11R6/include -O3 -fno-common -ffast-math -funroll-loops -fexpen
CXXFLAGS = -I/usr/X11R6/include -O3 -fno-common -ffast-math -funroll-loops -fexpensive-optimizations -no-cpp-precomp -dynamic -Ddarwin
MKLIB_OPTIONS = -static
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
# Library names (actual file names)
GL_LIB_NAME = libGL.a
GLU_LIB_NAME = libGLU.a

View file

@ -11,6 +11,10 @@ CXXFLAGS = -arch ppc -arch i386 -isysroot /Developer/SDKs/MacOSX10.4u.sdk \
-I/usr/X11R6/include -O3 -fno-common -ffast-math -funroll-loops -fexpensive-optimizations -no-cpp-precomp -dynamic -Ddarwin
MKLIB_OPTIONS = -static -archopt "-isysroot /Developer/SDKs/MacOSX10.4u.sdk"
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
# Library names (actual file names)
GL_LIB_NAME = libGL.a
GLU_LIB_NAME = libGLU.a

View file

@ -14,6 +14,10 @@ CXXFLAGS = -arch ppc -arch i386 -isysroot /Developer/SDKs/MacOSX10.4u.sdk \
MKLIB_OPTIONS = -archopt "-isysroot /Developer/SDKs/MacOSX10.4u.sdk"
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
# Library names (actual file names)
GL_LIB_NAME = libGL.dylib
GLU_LIB_NAME = libGLU.dylib

View file

@ -23,5 +23,9 @@ CXXFLAGS += $(WARN_FLAGS) $(OPT_FLAGS) $(PIC_FLAGS) $(DEFINES) $(X11_INCLUDES)
GLUT_CFLAGS = -fexceptions
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
EXTRA_LIB_PATH = -L/usr/local/lib
APP_LIB_DEPS = -L$(TOP)/$(LIB_DIR) $(EXTRA_LIB_PATH) -l$(GLUT_LIB) -l$(GLU_LIB) -l$(GL_LIB) -lXext -lXmu -lXi -lX11 -lm

View file

@ -22,6 +22,10 @@ CFLAGS = $(WARN_FLAGS) $(OPT_FLAGS) $(PIC_FLAGS) -Wmissing-prototypes -std=c99 -
CXXFLAGS = $(WARN_FLAGS) $(OPT_FLAGS) $(PIC_FLAGS) $(DEFINES) -Wall -ansi -pedantic $(ASM_FLAGS) $(X11_INCLUDES)
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
ASM_SOURCES =
# Library/program dependencies

View file

@ -12,5 +12,9 @@ CFLAGS = -ansi -O3 -D_HPUX_SOURCE -I/usr/include/X11R6 -I/usr/contrib/X11R6/incl
CXXFLAGS = -ansi -O3 -D_HPUX_SOURCE
GLUT_CFLAGS = -fexceptions
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
APP_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -L/usr/lib/X11R6 -L/usr/contrib/X11R6/lib -lXext -lXmu -lXi -lX11 -lm

View file

@ -28,6 +28,10 @@ CFLAGS = -Wall -Wmissing-prototypes $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) \
CXXFLAGS = -Wall $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(DEFINES) \
$(X11_INCLUDES)
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
GLUT_CFLAGS = -fexceptions
EXTRA_LIB_PATH = -L/usr/X11R6/lib

View file

@ -11,6 +11,10 @@ CFLAGS = -O3 -mcpu=ev5 -ansi -mieee -pedantic -fPIC -D_XOPEN_SOURCE -DUSE_XSHM
CXXFLAGS = -O3 -mcpu=ev5 -ansi -mieee -pedantic -fPIC -D_XOPEN_SOURCE
GLUT_CFLAGS = -fexceptions
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
GL_LIB_DEPS = -L/usr/X11R6/lib -lX11 -lXext -lm -lpthread
GLUT_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -l$(GLU_LIB) -l$(GL_LIB) -L/usr/X11R6/lib -lX11 -lXmu -lXt -lXi -lm
GLW_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -l$(GL_LIB) -L/usr/X11R6/lib -lXt -lX11

View file

@ -11,7 +11,11 @@ CFLAGS = -O3 -mcpu=ev5 -ansi -mieee -pedantic -D_XOPEN_SOURCE -DUSE_XSHM
CXXFLAGS = -O3 -mcpu=ev5 -ansi -mieee -pedantic -D_XOPEN_SOURCE
GLUT_CFLAGS = -fexceptions
MKLIB_OPTIONS = -static
PIC_FLAGS =
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
# Library names (actual file names)
GL_LIB_NAME = libGL.a

View file

@ -13,6 +13,10 @@ CFLAGS = -Wall -O3 -ffast-math -fPIC -std=c99 -D_GNU_SOURCE -D_POSIX_SOURCE -D
CXXFLAGS = -Wall -O3 -fPIC -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
HAVE_X86 = $(shell uname -m | grep 'i[3-6]86' >/dev/null && echo yes)
ifeq ($(HAVE_X86), yes)
CFLAGS += -DUSE_X86_ASM -DUSE_MMX_ASM -DUSE_3DNOW_ASM -DUSE_SSE_ASM

View file

@ -32,6 +32,11 @@ CFLAGS = -Wall -Wmissing-prototypes -std=c99 -ffast-math \
CXXFLAGS = -Wall $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(DEFINES)
GLUT_CFLAGS = -fexceptions
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
ASM_SOURCES =

View file

@ -5,9 +5,6 @@ include $(TOP)/configs/linux-dri
CONFIG_NAME = linux-dri-x86
# Unnecessary on x86, generally.
PIC_FLAGS =
# Add -m32 to CFLAGS:
ARCH_FLAGS = -m32

View file

@ -32,6 +32,9 @@ CFLAGS = -Wall -Wmissing-prototypes $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) \
CXXFLAGS = -Wall $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(DEFINES)
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
ASM_SOURCES =

View file

@ -6,6 +6,9 @@ CONFIG_NAME = linux-fbdev
CFLAGS = -O3 -ffast-math -ansi -pedantic -fPIC -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE -DPTHREADS -DUSE_GLFBDEV_DRIVER
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
SRC_DIRS = mesa glu glut/fbdev
DRIVER_DIRS = fbdev osmesa
PROGRAM_DIRS = fbdev demos redbook samples

View file

@ -12,6 +12,9 @@ CFLAGS = -O3 -ansi -pedantic -fPIC -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE
CXXFLAGS = -O3 -ansi -pedantic -fPIC -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE
GLUT_CFLAGS = -fexceptions
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
# Library/program dependencies
GL_LIB_DEPS = -L/usr/X11R6/lib -lX11 -lXext -L/usr/local/glide/lib -lglide3x -lm -lpthread

View file

@ -33,6 +33,9 @@ CFLAGS = $(WARN_FLAGS) $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(DEFINES) \
CXXFLAGS = $(WARN_FLAGS) $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(DEFINES)
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
ASM_SOURCES =

View file

@ -12,6 +12,9 @@ CXX = g++
CFLAGS = -O3 -ansi -pedantic -fPIC -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE -DPTHREADS
CXXFLAGS = -O3 -ansi -pedantic -fPIC -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
# Directories
SRC_DIRS = mesa glu

View file

@ -10,6 +10,9 @@ CXX = g++
CFLAGS = -O3 -ansi -pedantic -fPIC -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE -DUSE_XSHM -DPTHREADS -I/usr/X11R6/include -DCHAN_BITS=16 -DDEFAULT_SOFTWARE_DEPTH_BITS=31
CXXFLAGS = -O3 -ansi -pedantic -fPIC -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
# Library names
OSMESA_LIB = OSMesa16

View file

@ -10,7 +10,11 @@ CXX = g++
CFLAGS = -O3 -ansi -pedantic -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE -DUSE_XSHM -DPTHREADS -I/usr/X11R6/include -DCHAN_BITS=16 -DDEFAULT_SOFTWARE_DEPTH_BITS=31
CXXFLAGS = -O3 -ansi -pedantic -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE
MKLIB_OPTIONS = -static
PIC_FLAGS =
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
# Library names
OSMESA_LIB = OSMesa16

View file

@ -10,6 +10,9 @@ CXX = g++
CFLAGS = -O3 -ansi -pedantic -fPIC -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE -DUSE_XSHM -DPTHREADS -I/usr/X11R6/include -DCHAN_BITS=32 -DDEFAULT_SOFTWARE_DEPTH_BITS=31
CXXFLAGS = -O3 -ansi -pedantic -fPIC -ffast-math -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199309L -D_SVID_SOURCE -D_BSD_SOURCE
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
# Library names
OSMESA_LIB = OSMesa32

View file

@ -5,6 +5,7 @@ include $(TOP)/configs/linux-ppc
CONFIG_NAME = linux-ppc-static
MKLIB_OPTIONS = -static
PIC_FLAGS =
# Library names (actual file names)
GL_LIB_NAME = libGL.a

View file

@ -32,6 +32,9 @@ CFLAGS = $(WARN_FLAGS) $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(DEFINES) \
CXXFLAGS = $(WARN_FLAGS) $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(DEFINES)
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
ASM_SOURCES =

View file

@ -5,9 +5,6 @@ include $(TOP)/configs/linux-solo
CONFIG_NAME = linux-solo-x86
# Unnecessary on x86, generally.
PIC_FLAGS =
ASM_FLAGS = -DUSE_X86_ASM -DUSE_MMX_ASM -DUSE_3DNOW_ASM -DUSE_SSE_ASM
ASM_SOURCES = $(X86_SOURCES)
ASM_API = $(X86_API)

View file

@ -5,6 +5,7 @@ include $(TOP)/configs/linux
CONFIG_NAME = linux-static
MKLIB_OPTIONS = -static
PIC_FLAGS =
# Library names (actual file names)
GL_LIB_NAME = libGL.a

View file

@ -5,6 +5,7 @@ include $(TOP)/configs/linux-x86-64
CONFIG_NAME = linux-x86-64-static
MKLIB_OPTIONS = -static
PIC_FLAGS =
# Library names (actual file names)
GL_LIB_NAME = libGL.a

View file

@ -14,6 +14,9 @@ CXXFLAGS = -Wall -O3 -ansi -pedantic -fPIC -D_POSIX_SOURCE -D_POSIX_C_SOURCE=199
GLUT_CFLAGS = -fexceptions
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
ASM_SOURCES = $(X86_SOURCES)
ASM_API = $(X86_API)

View file

@ -5,6 +5,7 @@ include $(TOP)/configs/linux-x86
CONFIG_NAME = linux-x86-static
MKLIB_OPTIONS = -static
PIC_FLAGS =
# Library names (actual file names)
GL_LIB_NAME = libGL.a

View file

@ -12,3 +12,6 @@ CXXFLAGS = -O2 -fPIC
GLUT_CFLAGS = -fexceptions
APP_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -L/usr/X11R6/lib -lXext -lXmu -lXi -lX11 -lm
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing

View file

@ -10,6 +10,10 @@ CXX = g++
CFLAGS = -O2 -fPIC -I/usr/X11R6/include -DUSE_XSHM -DHZ=100
CXXFLAGS = -O2 -fPIC -I/usr/X11R6/include -DHZ=100
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
GL_LIB_DEPS = -L/usr/X11R6/lib -lX11 -lXext -lm
OSMESA_LIB_DEPS = -lm
GLU_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -l$(GL_LIB)

View file

@ -11,6 +11,10 @@ CFLAGS = -O3 -march=i486 -fPIC -I/usr/openwin/include -DUSE_XSHM
CXXFLAGS = -O3 -march=i486 -fPIC
GLUT_CFLAGS = -fexceptions
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
GL_LIB_DEPS = -L/usr/openwin/lib -lX11 -lXext -lm -lpthread
GLU_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -l$(GL_LIB) -lm
GLUT_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -l$(GLU_LIB) -l$(GL_LIB) -L/usr/openwin/lib -lX11 -lXmu -lXt -lXi -lm

View file

@ -12,6 +12,10 @@ CXXFLAGS = -O3 -march=i486 -fPIC
GLUT_CFLAGS = -fexceptions
MKLIB_OPTIONS = -static
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
GL_LIB_DEPS = -L/usr/openwin/lib -lX11 -lXext -lm -lpthread
GLU_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -l$(GL_LIB) -lm
GLUT_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -l$(GLU_LIB) -l$(GL_LIB) -L/usr/openwin/lib -lX11 -lXmu -lXt -lXi -lm

View file

@ -11,5 +11,9 @@ CFLAGS = -fPIC -O3 -I/usr/openwin/include -I/usr/include/X11R5 -I/usr/include/X1
CXXFLAGS = -fPIC -O3 -I/usr/openwin/include -DSUNOS4
GLUT_CFLAGS = -fexceptions -DSOLARIS_2_4_BUG
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
APP_LIB_DEPS = -L$(TOP)/$(LIB_DIR) -L/usr/openwin/lib -lX11 -lXext -lXmu -lXi -lm

View file

@ -28,6 +28,10 @@ CXXFLAGS = $(WARN_FLAGS) $(OPT_FLAGS) $(PIC_FLAGS) $(ARCH_FLAGS) $(DEFINES) \
GLUT_CFLAGS = -fexceptions -DSOLARIS_2_4_BUG
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
# Library/program dependencies
EXTRA_LIB_PATH=-L/usr/openwin/lib

View file

@ -21,6 +21,9 @@ CXX_ARCH_FLAGS = -m64
CXXFLAGS = $(CXX_WARN_FLAGS) $(CXX_OPT_FLAGS) $(CXX_PIC_FLAGS) $(CXX_ARCH_FLAGS) $(DEFINES) \
-I/usr/openwin/include
# Work around aliasing bugs - developers should comment this out
CXXFLAGS += -fno-strict-aliasing
CFLAGS = -xarch=v9 -KPIC -O -I/usr/openwin/include -I/usr/dt/include -DUSE_XSHM -DPTHREADS
#CXXFLAGS = -xarch=v9 -KPIC -O -I/usr/openwin/include -I/usr/dt/include -DPTHREADS
GLUT_CFLAGS = -DSOLARIS_2_4_BUG

View file

@ -12,6 +12,10 @@ CXXFLAGS = -pedantic -O2
GLUT_CFLAGS = -fexceptions
MKLIB_OPTIONS = -static
# Work around aliasing bugs - developers should comment this out
CFLAGS += -fno-strict-aliasing
CXXFLAGS += -fno-strict-aliasing
GL_LIB_NAME = libGL.a
GLU_LIB_NAME = libGLU.a
GLUT_LIB_NAME = libglut.a

View file

@ -34,6 +34,11 @@ in the normal Mesa releases so you'll need to get the latest sources
sources from the <a href="repository.html">git repository</a>.
</p>
<p>
This fbdev/DRI environment isn't well supported.
Code and documentation updates/patches are welcomed.
</p>
<h1>2. Compilation</h1>
@ -66,6 +71,7 @@ You'll need fbdev header files. Check with:
</pre>
<p>
You'll need to get Mesa from git (see above).
Compile Mesa with the 'linux-solo' configuration:
</p>
<pre>

View file

@ -6,7 +6,7 @@
<BODY>
<H1>Help Wanted</H1>
<H1>Help Wanted / To-Do List</H1>
<p>
We can always use more help with the Mesa project.
@ -14,36 +14,13 @@ Here are some specific ideas and areas where help would be appreciated:
</p>
<ol>
<li><p>
Generate the src/mesa/main/enums.c file with a Python script which
uses the gl_API.xml file.
</p>
<li><p>
Try to auto-generate the display list "save" functions seen in dlist.c
using a Python script and the gl_API.xml file.
The gl_API.xml file will probably need a new tag to indicate whether or
not each function gets compiled into display lists.
</p>
<li><p>
Maintenance of assembly language files on Linux, Windows and SPARC systems.
</p>
<li><p>
Help to incorporate the 3Dlabs' shading language compiler for OpenGL 2.0.
</p>
<li><p>
Implement assembly language (SSE/MMX) code generation for
vertex/fragment programs.
</p>
<li><p>
Windows 98/NT driver building, maintenance and testing
(Karl Schultz has been doing a great job of this lately).
</p>
<li><p>
Maintenance and testing of various drivers, such as DOS/DJGPP, GGI, etc.
</p>
<li><p>
Write new tests for Glean.
</p>
<li>
Enable -Wstrict-aliasing=2 -fstrict-aliasing and track down aliasing
issues in the code.
<li>
Windows 98/NT driver building, maintenance and testing
<li>
Maintenance and testing of lesser-used drivers, such as DOS/DJGPP, GGI, etc.
</ol>

View file

@ -24,6 +24,7 @@ TBD
<h2>New features</h2>
<ul>
<li>GL_EXT_texture_from_pixmap extension for Xlib driver
<li>Support for the GL shading language with i965 driver (implemented by Intel)
</ul>

View file

@ -48,6 +48,7 @@ in Mesa:
<li>The inverse trig functions asin(), acos(), and atan() are not implemented
<li>The gl_Color and gl_SecondaryColor varying vars are interpolated
without perspective correction
<li>Floating point literal suffixes 'f' and 'F' aren't allowed.
</ul>
<p>

View file

@ -169,8 +169,13 @@ do { \
/* CreatePixmap returns a PixmapPtr; so, it cannot be inside braces */
#ifdef CREATE_PIXMAP_USAGE_SCRATCH
#define XMesaCreatePixmap(__d,__b,__w,__h,__depth) \
(*__d->CreatePixmap)(__d, __w, __h, __depth, 0)
#else
#define XMesaCreatePixmap(__d,__b,__w,__h,__depth) \
(*__d->CreatePixmap)(__d, __w, __h, __depth)
#endif
#define XMesaFreePixmap(__d,__b) \
(*__d->DestroyPixmap)(__b)

View file

@ -1,4 +1,3 @@
/*
* GL_ARB_multitexture demo
*
@ -32,7 +31,6 @@ static GLint NumUnits = 1;
static GLboolean TexEnabled[8];
static GLfloat Drift = 0.0;
static GLfloat drift_increment = 0.005;
static GLfloat Xrot = 20.0, Yrot = 30.0, Zrot = 0.0;
@ -41,9 +39,7 @@ static void Idle( void )
if (Animate) {
GLint i;
Drift += drift_increment;
if (Drift >= 1.0)
Drift = 0.0;
Drift = glutGet(GLUT_ELAPSED_TIME) * 0.001;
for (i = 0; i < NumUnits; i++) {
glActiveTextureARB(GL_TEXTURE0_ARB + i);
@ -57,10 +53,11 @@ static void Idle( void )
glTranslatef(0.0, Drift, 0.0);
}
else {
glTranslatef(0.5, 0.5, 0.0);
float tx = 0.5, ty = 0.5;
glTranslatef(tx, ty, 0.0);
glRotatef(180.0 * Drift, 0, 0, 1);
glScalef(1.0/i, 1.0/i, 1.0/i);
glTranslatef(-0.5, -0.5, 0.0);
glTranslatef(-tx, -ty + i * 0.1, 0.0);
}
}
glMatrixMode(GL_MODELVIEW);
@ -72,10 +69,9 @@ static void Idle( void )
static void DrawObject(void)
{
GLint i;
GLint j;
static const GLfloat tex_coords[] = { 0.0, 0.0, 1.0, 1.0, 0.0 };
static const GLfloat vtx_coords[] = { -1.0, -1.0, 1.0, 1.0, -1.0 };
static const GLfloat tex_coords[] = { 0.0, 0.0, 1.0, 1.0, 0.0 };
static const GLfloat vtx_coords[] = { -1.0, -1.0, 1.0, 1.0, -1.0 };
GLint i, j;
if (!TexEnabled[0] && !TexEnabled[1])
glColor3f(0.1, 0.1, 0.1); /* add onto this */
@ -83,37 +79,20 @@ static void DrawObject(void)
glColor3f(1, 1, 1); /* modulate this */
glBegin(GL_QUADS);
/* Toggle between the vector and scalar entry points. This is done purely
* to hit multiple paths in the driver.
*/
if ( Drift > 0.49 ) {
for (j = 0; j < 4; j++ ) {
for (i = 0; i < NumUnits; i++)
glMultiTexCoord2fARB(GL_TEXTURE0_ARB + i,
tex_coords[j], tex_coords[j+1]);
glVertex2f( vtx_coords[j], vtx_coords[j+1] );
for (j = 0; j < 4; j++ ) {
for (i = 0; i < NumUnits; i++) {
if (TexEnabled[i])
glMultiTexCoord2fARB(GL_TEXTURE0_ARB + i,
tex_coords[j], tex_coords[j+1]);
}
glVertex2f( vtx_coords[j], vtx_coords[j+1] );
}
else {
for (j = 0; j < 4; j++ ) {
for (i = 0; i < NumUnits; i++)
glMultiTexCoord2fvARB(GL_TEXTURE0_ARB + i, & tex_coords[j]);
glVertex2fv( & vtx_coords[j] );
}
}
glEnd();
}
static void Display( void )
{
static GLint T0 = 0;
static GLint Frames = 0;
GLint t;
glClear( GL_COLOR_BUFFER_BIT );
glPushMatrix();
@ -125,16 +104,6 @@ static void Display( void )
glPopMatrix();
glutSwapBuffers();
Frames++;
t = glutGet(GLUT_ELAPSED_TIME);
if (t - T0 >= 250) {
GLfloat seconds = (t - T0) / 1000.0;
drift_increment = 2.2 * seconds / Frames;
T0 = t;
Frames = 0;
}
}
@ -151,24 +120,34 @@ static void Reshape( int width, int height )
}
static void ToggleUnit(int unit)
{
TexEnabled[unit] = !TexEnabled[unit];
glActiveTextureARB(GL_TEXTURE0_ARB + unit);
if (TexEnabled[unit])
glEnable(GL_TEXTURE_2D);
else
glDisable(GL_TEXTURE_2D);
printf("Enabled: ");
for (unit = 0; unit < NumUnits; unit++)
printf("%d ", (int) TexEnabled[unit]);
printf("\n");
}
static void ModeMenu(int entry)
{
if (entry >= TEX0 && entry <= TEX7) {
/* toggle */
GLint i = entry - TEX0;
TexEnabled[i] = !TexEnabled[i];
glActiveTextureARB(GL_TEXTURE0_ARB + i);
if (TexEnabled[i])
glEnable(GL_TEXTURE_2D);
else
glDisable(GL_TEXTURE_2D);
printf("Enabled: ");
for (i = 0; i < NumUnits; i++)
printf("%d ", (int) TexEnabled[i]);
printf("\n");
ToggleUnit(i);
}
else if (entry==ANIMATE) {
Animate = !Animate;
if (Animate)
glutIdleFunc(Idle);
else
glutIdleFunc(NULL);
}
else if (entry==QUIT) {
exit(0);
@ -183,9 +162,36 @@ static void Key( unsigned char key, int x, int y )
(void) x;
(void) y;
switch (key) {
case 27:
exit(0);
break;
case 'a':
Animate = !Animate;
break;
case '0':
ToggleUnit(0);
break;
case '1':
ToggleUnit(1);
break;
case '2':
ToggleUnit(2);
break;
case '3':
ToggleUnit(3);
break;
case '4':
ToggleUnit(4);
break;
case '5':
ToggleUnit(5);
break;
case '6':
ToggleUnit(6);
break;
case '7':
ToggleUnit(7);
break;
case 27:
exit(0);
break;
}
glutPostRedisplay();
}
@ -327,7 +333,8 @@ int main( int argc, char *argv[] )
glutKeyboardFunc( Key );
glutSpecialFunc( SpecialKey );
glutDisplayFunc( Display );
glutIdleFunc( Idle );
if (Animate)
glutIdleFunc(Idle);
glutCreateMenu(ModeMenu);

View file

@ -106,8 +106,12 @@ Redisplay(void)
static void
Idle(void)
{
Zrot = glutGet(GLUT_ELAPSED_TIME) * 0.0005;
glutPostRedisplay();
if (anim) {
Zrot = glutGet(GLUT_ELAPSED_TIME) * 0.0005;
glutPostRedisplay();
}
else
abort();
}
@ -149,6 +153,12 @@ Key(unsigned char key, int x, int y)
else
glutIdleFunc(NULL);
break;
case 'z':
Zrot = 0;
break;
case 's':
Zrot += 0.05;
break;
case 27:
CleanUp();
exit(0);

View file

@ -25,6 +25,27 @@ static int doPrint = 1;
static int deltaY;
GLint windW, windH;
static const struct {
GLenum mode;
const char *name;
} LogicOpModes[] = {
{ GL_SET, "GL_SET" },
{ GL_COPY, "GL_COPY" },
{ GL_NOOP, "GL_NOOP" },
{ GL_AND, "GL_AND" },
{ GL_INVERT, "GL_INVERT" },
{ GL_OR, "GL_OR" },
{ GL_XOR, "GL_XOR" },
{ GL_NOR, "GL_NOR" },
{ GL_NAND, "GL_NAND" },
{ GL_OR_REVERSE, "GL_OR_REVERSE" },
{ GL_OR_INVERTED, "GL_OR_INVERTED" },
{ GL_AND_INVERTED, "GL_AND_INVERTED" },
{ 0, NULL }
};
static void DrawString(const char *string)
{
int i;
@ -47,7 +68,7 @@ static void Reshape(int width, int height)
windH = (GLint)height;
glViewport(0, 0, (GLint)width, (GLint)height);
deltaY = windH /16;
deltaY = windH /20;
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
@ -105,7 +126,7 @@ static void Draw(void)
{
int stringOffset = 5, stringx = 8;
int x1, x2, xleft, xright;
int i;
int i, k;
(dithering) ? glEnable(GL_DITHER) : glDisable(GL_DITHER);
glDisable(GL_BLEND);
@ -122,6 +143,7 @@ static void Draw(void)
/* Draw labels */
glColor3f(0.8, 0.8, 0.0);
i = windH - deltaY + stringOffset;
glRasterPos2f(stringx, i); i -= deltaY;
DrawString("SOURCE");
glRasterPos2f(stringx, i); i -= deltaY;
@ -136,21 +158,12 @@ static void Draw(void)
DrawString("reverse_subtract");
glRasterPos2f(stringx, i); i -= deltaY;
DrawString("clear");
glRasterPos2f(stringx, i); i -= deltaY;
DrawString("set");
glRasterPos2f(stringx, i); i -= deltaY;
DrawString("copy");
glRasterPos2f(stringx, i); i -= deltaY;
DrawString("noop");
glRasterPos2f(stringx, i); i -= deltaY;
DrawString("and");
glRasterPos2f(stringx, i); i -= deltaY;
DrawString("invert");
glRasterPos2f(stringx, i); i -= deltaY;
DrawString("or");
glRasterPos2f(stringx, i); i -= deltaY;
DrawString("xor");
for (k = 0; LogicOpModes[k].name; k++) {
glRasterPos2f(stringx, i);
i -= deltaY;
DrawString(LogicOpModes[k].name);
}
i = windH - deltaY;
x1 = windW/4;
@ -193,43 +206,23 @@ static void Draw(void)
glLogicOp(GL_CLEAR);
glRectf(x1, i, x2, i+deltaY);
i -= deltaY;
glLogicOp(GL_SET);
glRectf(x1, i, x2, i+deltaY);
for (k = 0; LogicOpModes[k].name; k++) {
i -= deltaY;
glLogicOp(LogicOpModes[k].mode);
glRectf(x1, i, x2, i+deltaY);
if (LogicOpModes[k].mode == GL_XOR) {
glRectf(x1, i+10, x2, i+5);
}
}
i -= deltaY;
glLogicOp(GL_COPY);
glRectf(x1, i, x2, i+deltaY);
i -= deltaY;
glLogicOp(GL_NOOP);
glRectf(x1, i, x2, i+deltaY);
i -= deltaY;
glLogicOp(GL_AND);
glRectf(x1, i, x2, i+deltaY);
i -= deltaY;
glLogicOp(GL_INVERT);
glRectf(x1, i, x2, i+deltaY);
i -= deltaY;
glLogicOp(GL_OR);
glRectf(x1, i, x2, i+deltaY);
i -= deltaY;
glLogicOp(GL_XOR);
glRectf(x1, i, x2, i+deltaY);
glRectf(x1, i+10, x2, i+5);
if (doPrint) {
glDisable(GL_BLEND);
if (supportlogops & 2)
if (doPrint) {
glDisable(GL_BLEND);
if (supportlogops & 2)
glDisable(GL_COLOR_LOGIC_OP);
glColor3f(1.0, 1.0, 1.0);
PrintColorStrings();
}
glFlush();
glColor3f(1.0, 1.0, 1.0);
PrintColorStrings();
}
glFlush();
if (doubleBuffer) {
glutSwapBuffers();
@ -271,7 +264,7 @@ int main(int argc, char **argv)
exit(1);
}
glutInitWindowPosition(0, 0); glutInitWindowSize( 800, 400);
glutInitWindowPosition(0, 0); glutInitWindowSize( 800, 520);
type = GLUT_RGB;
type |= (doubleBuffer) ? GLUT_DOUBLE : GLUT_SINGLE;

View file

@ -35,6 +35,7 @@ SOURCES = \
cva.c \
dinoshade.c \
drawbuffers.c \
exactrast.c \
floattex.c \
fbotest1.c \
fbotest2.c \

200
progs/tests/exactrast.c Normal file
View file

@ -0,0 +1,200 @@
/**
* Test for exact point/line/polygon rasterization, or at least rasterization
* that fits the tolerance of the OpenGL spec.
*
* Brian Paul
* 9 Nov 2007
*/
/*
* Notes:
* - 'm' to cycle through point, hline, vline and quad drawing
* - Use cursor keys to translate coordinates (z to reset)
* - Resize window to check for proper rasterization
* - Make sure your LCD is running in its native resolution
*
* If translation is (0,0):
* a point will be drawn where x%2==0 and y%2==0,
* a horizontal line will be drawn where x%2==0,
* a vertical line will be drawn where y%2==0,
* for quads, pixels will be set where (x%4)!=3 and (y%4)!=3
*
* XXX todo: do glReadPixels and test that the results are what's expected.
* Upon failure, iterate over sub-pixel translations to find the ideal offset.
*/
#include <stdio.h>
#include <stdlib.h>
#include <GL/glut.h>
static int Width = 400, Height = 400;
static int Win;
static float Xtrans = 0, Ytrans = 0;
static float Step = 0.125;
enum {
POINTS,
HLINES,
VLINES,
QUADS,
NUM_MODES
};
static int Mode = POINTS;
static void
Draw(void)
{
/* See the OpenGL Programming Guide, Appendix H, "OpenGL Correctness Tips"
* for information about the 0.375 translation factor.
*/
float tx = 0.375, ty = 0.375;
int i, j;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glPushMatrix();
glTranslatef(tx + Xtrans, ty + Ytrans, 0);
if (Mode == POINTS) {
glBegin(GL_POINTS);
for (j = 0; j < Height; j += 2) {
for (i = 0; i < Width; i += 2) {
glVertex2f(i, j);
}
}
glEnd();
}
else if (Mode == HLINES) {
glBegin(GL_LINES);
for (i = 0; i < Height; i += 2) {
glVertex2f(0, i);
glVertex2f(Width, i);
}
glEnd();
}
else if (Mode == VLINES) {
glBegin(GL_LINES);
for (i = 0; i < Width; i += 2) {
glVertex2f(i, 0 );
glVertex2f(i, Height);
}
glEnd();
}
else if (Mode == QUADS) {
glBegin(GL_QUADS);
for (j = 0; j < Height; j += 4) {
for (i = 0; i < Width; i += 4) {
glVertex2f(i, j );
glVertex2f(i + 3, j );
glVertex2f(i + 3, j + 3);
glVertex2f(i, j + 3);
}
}
glEnd();
}
glPopMatrix();
glutSwapBuffers();
}
static void
Reshape(int width, int height)
{
Width = width;
Height = height;
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, width, 0, height, -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
static void
Key(unsigned char key, int x, int y)
{
(void) x;
(void) y;
switch (key) {
case 'm':
case 'M':
Mode = (Mode + 1) % NUM_MODES;
break;
case 'z':
case 'Z':
Xtrans = Ytrans = 0;
printf("Translation: %f, %f\n", Xtrans, Ytrans);
break;
case 27:
glutDestroyWindow(Win);
exit(0);
break;
}
glutPostRedisplay();
}
static void
SpecialKey(int key, int x, int y)
{
(void) x;
(void) y;
switch (key) {
case GLUT_KEY_UP:
Ytrans += Step;
break;
case GLUT_KEY_DOWN:
Ytrans -= Step;
break;
case GLUT_KEY_LEFT:
Xtrans -= Step;
break;
case GLUT_KEY_RIGHT:
Xtrans += Step;
break;
}
glutPostRedisplay();
printf("Translation: %f, %f\n", Xtrans, Ytrans);
}
static void
Init(void)
{
}
static void
Usage(void)
{
printf("Keys:\n");
printf(" up/down/left/right - translate by %f\n", Step);
printf(" z - reset translation to zero\n");
printf(" m - change rendering mode (points, hlines, vlines, quads)\n");
printf(" Esc - exit\n");
}
int
main(int argc, char *argv[])
{
glutInit(&argc, argv);
glutInitWindowPosition(0, 0);
glutInitWindowSize(Width, Height);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE);
Win = glutCreateWindow(argv[0]);
glutReshapeFunc(Reshape);
glutKeyboardFunc(Key);
glutSpecialFunc(SpecialKey);
glutDisplayFunc(Draw);
Init();
Usage();
glutMainLoop();
return 0;
}

View file

@ -7,7 +7,6 @@
* September 2000
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
@ -211,7 +210,9 @@ static void SpecialKey( int key, int x, int y )
static void Init( int argc, char *argv[] )
{
GLfloat r[2];
GLuint u;
for (u = 0; u < 2; u++) {
glActiveTextureARB(GL_TEXTURE0_ARB + u);
glBindTexture(GL_TEXTURE_2D, 10+u);
@ -242,6 +243,15 @@ static void Init( int argc, char *argv[] )
printf("GL_VENDOR = %s\n", (char *) glGetString(GL_VENDOR));
printf("GL_EXTENSIONS = %s\n", (char *) glGetString(GL_EXTENSIONS));
}
glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, r);
printf("Non-smooth point size range: %g .. %g\n", r[0], r[1]);
glGetFloatv(GL_POINT_SIZE_RANGE, r);
printf("Smoothed point size range: %g .. %g\n", r[0], r[1]);
glGetFloatv(GL_ALIASED_LINE_WIDTH_RANGE, r);
printf("Non-smooth line width range: %g .. %g\n", r[0], r[1]);
glGetFloatv(GL_LINE_WIDTH_RANGE, r);
printf("Smoothed line width range: %g .. %g\n", r[0], r[1]);
}

View file

@ -22,15 +22,14 @@
* OF THIS SOFTWARE.
*/
#define GL_GLEXT_PROTOTYPES
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <GL/glut.h>
#define CI_OFFSET_1 16
#define CI_OFFSET_2 32
GLenum doubleBuffer;
@ -40,53 +39,63 @@ static void Init(void)
fprintf(stderr, "GL_VERSION = %s\n", (char *) glGetString(GL_VERSION));
fprintf(stderr, "GL_VENDOR = %s\n", (char *) glGetString(GL_VENDOR));
glClearColor(0.0, 0.0, 1.0, 0.0);
glClearColor(0.0, 0.0, 1.0, 0.0);
}
static void Reshape(int width, int height)
{
glViewport(0, 0, (GLint)width, (GLint)height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-1.0, 1.0, -1.0, 1.0, -0.5, 1000.0);
glOrtho(-1.0, 1.0, -1.0, 1.0, 0, 100.0);
glMatrixMode(GL_MODELVIEW);
}
static void Key(unsigned char key, int x, int y)
{
switch (key) {
case 27:
case 27:
exit(1);
default:
default:
return;
}
glutPostRedisplay();
}
static float
expected(float z, float size, const float atten[3])
{
float dist = fabs(z);
const GLfloat q = atten[0] + dist * (atten[1] + dist * atten[2]);
const GLfloat a = sqrt(1.0 / q);
return size * a;
}
static void Draw(void)
{
static GLfloat theQuad[3] = { 0.25, 0.0, 1/60.0 };
static GLfloat atten[3] = { 0.0, 0.1, .01 };
float size = 40.0;
int i;
glClear(GL_COLOR_BUFFER_BIT);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glPointSize(8.0);
glPointParameterfvARB(GL_POINT_DISTANCE_ATTENUATION_ARB, theQuad);
glPointSize(size);
glPointParameterfvARB(GL_POINT_DISTANCE_ATTENUATION_ARB, atten);
glBegin(GL_POINTS);
glColor3f(1,0,0);
glVertex3f( 0.9, -0.9, -30.0);
glColor3f(1,1,0);
glVertex3f( 0.9, 0.9, -30.0);
glColor3f(1,0,1);
glVertex3f(-0.9, 0.9, -30.0);
glColor3f(0,1,1);
glVertex3f(-0.9, -0.9, -30.0);
printf("Expected point sizes:\n");
glBegin(GL_POINTS);
for (i = 0; i < 5; i++) {
float x = -0.8 + i * 0.4;
float z = -i * 20 - 10;
glVertex3f( x, 0.0, z);
printf(" %f\n", expected(z, size, atten));
}
glEnd();
glFlush();
@ -96,6 +105,7 @@ static void Draw(void)
}
}
static GLenum Args(int argc, char **argv)
{
GLint i;
@ -115,6 +125,7 @@ static GLenum Args(int argc, char **argv)
return GL_TRUE;
}
int main(int argc, char **argv)
{
GLenum type;
@ -131,7 +142,7 @@ int main(int argc, char **argv)
type |= (doubleBuffer) ? GLUT_DOUBLE : GLUT_SINGLE;
glutInitDisplayMode(type);
if (glutCreateWindow("First Tri") == GL_FALSE) {
if (glutCreateWindow(argv[0]) == GL_FALSE) {
exit(1);
}
@ -141,5 +152,5 @@ int main(int argc, char **argv)
glutKeyboardFunc(Key);
glutDisplayFunc(Draw);
glutMainLoop();
return 0;
return 0;
}

View file

@ -47,7 +47,6 @@ PERFORMANCE OF THIS SOFTWARE.
#include <GL/glx.h>
#include <GL/glu.h>
#include <X11/keysym.h>
#include <stdlib.h>
#include <stdio.h>
@ -135,7 +134,7 @@ int main(int argc, char** argv) {
/* set up viewing parameters */
glMatrixMode(GL_PROJECTION);
gluPerspective(20, 1, 10, 20);
glFrustum(-1, 1, -1, 1, 6, 20);
glMatrixMode(GL_MODELVIEW);
glTranslatef(0, 0, -15);

View file

@ -333,7 +333,8 @@ _gl_get_context_mode_data(const __GLcontextModes *mode, int attribute,
*value_return = mode->bindToTextureRgba;
return 0;
case GLX_BIND_TO_MIPMAP_TEXTURE_EXT:
*value_return = mode->bindToMipmapTexture;
*value_return = mode->bindToMipmapTexture == GL_TRUE ? GL_TRUE :
GL_FALSE;
return 0;
case GLX_BIND_TO_TEXTURE_TARGETS_EXT:
*value_return = mode->bindToTextureTargets;
@ -453,19 +454,28 @@ _gl_context_modes_destroy( __GLcontextModes * modes )
*/
__GLcontextModes *
_gl_context_modes_find_visual( __GLcontextModes * modes, int vid )
_gl_context_modes_find_visual(__GLcontextModes *modes, int vid)
{
while ( modes != NULL ) {
if ( modes->visualID == vid ) {
break;
}
__GLcontextModes *m;
modes = modes->next;
}
for (m = modes; m != NULL; m = m->next)
if (m->visualID == vid)
return m;
return modes;
return NULL;
}
__GLcontextModes *
_gl_context_modes_find_fbconfig(__GLcontextModes *modes, int fbid)
{
__GLcontextModes *m;
for (m = modes; m != NULL; m = m->next)
if (m->fbconfigID == fbid)
return m;
return NULL;
}
/**
* Determine if two context-modes are the same. This is intended to be used

View file

@ -44,8 +44,10 @@ extern int _gl_get_context_mode_data( const __GLcontextModes *mode,
extern __GLcontextModes * _gl_context_modes_create( unsigned count,
size_t minimum_size );
extern void _gl_context_modes_destroy( __GLcontextModes * modes );
extern __GLcontextModes * _gl_context_modes_find_visual(
__GLcontextModes * modes, int vid );
extern __GLcontextModes *
_gl_context_modes_find_visual(__GLcontextModes *modes, int vid);
extern __GLcontextModes *
_gl_context_modes_find_fbconfig(__GLcontextModes *modes, int fbid);
extern GLboolean _gl_context_modes_are_same( const __GLcontextModes * a,
const __GLcontextModes * b );

View file

@ -232,15 +232,6 @@ struct __GLXcontextRec {
*/
XID share_xid;
/**
* Visual id.
*
* \deprecated
* This filed has been largely been replaced by the \c mode field, but
* the work is not quite done.
*/
VisualID vid;
/**
* Screen number.
*/
@ -351,34 +342,22 @@ struct __GLXcontextRec {
*/
GLint majorOpcode;
/**
* Pointer to the mode used to create this context.
*/
const __GLcontextModes * mode;
#ifdef GLX_DIRECT_RENDERING
/**
* Per context direct rendering interface functions and data.
*/
__DRIcontext driContext;
/**
* Pointer to the mode used to create this context.
*/
const __GLcontextModes * mode;
/**
* XID for the server side drm_context_t
*/
XID hwContextID;
#endif
/**
* \c GLXFBConfigID used to create this context. May be \c None. This
* field has been replaced by the \c mode field.
*
* \since Internal API version 20030317.
*
* \deprecated
* This filed has been largely been replaced by the \c mode field, but
* the work is not quite done.
*/
GLXFBConfigID fbconfigID;
/**
* The current read-drawable for this context. Will be None if this
@ -740,7 +719,12 @@ extern int __glXGetInternalVersion(void);
/* Get the unadjusted system time */
extern int __glXGetUST( int64_t * ust );
extern GLboolean __glXGetMscRateOML(__DRIdrawable *draw,
extern GLboolean __glXGetMscRateOML(Display * dpy, GLXDrawable drawable,
int32_t * numerator, int32_t * denominator);
#ifdef GLX_DIRECT_RENDERING
GLboolean
__driGetMscRateOML(__DRIdrawable *draw, int32_t *numerator, int32_t *denominator);
#endif
#endif /* !__GLX_client_h__ */

View file

@ -101,6 +101,7 @@ static void GarbageCollectDRIDrawables(Display *dpy, __GLXscreenConfigs *sc)
longer exists in the Xserver */
(*pdraw->driDrawable.destroyDrawable)(&pdraw->driDrawable);
XF86DRIDestroyDrawable(dpy, sc->scr, draw);
__glxHashDelete(sc->drawHash, draw);
Xfree(pdraw);
}
} while (__glxHashNext(sc->drawHash, &draw, (void *)&pdraw) == 1);
@ -379,17 +380,21 @@ CreateContext(Display *dpy, XVisualInfo *vis,
const __GLcontextModes * mode;
drm_context_t hwContext;
/* The value of fbconfig cannot change because it is tested
* later in the function.
*/
if ( fbconfig == NULL ) {
/* FIXME: Is it possible for the __GLcontextModes structure
* FIXME: to not be found?
*/
mode = _gl_context_modes_find_visual( psc->configs,
vis->visualid );
assert( mode != NULL );
assert( mode->screen == screen );
if (fbconfig == NULL) {
mode = _gl_context_modes_find_visual(psc->visuals, vis->visualid);
if (mode == NULL) {
xError error;
error.errorCode = BadValue;
error.resourceID = vis->visualid;
error.sequenceNumber = dpy->request;
error.type = X_Error;
error.majorCode = gc->majorOpcode;
error.minorCode = X_GLXCreateContext;
_XError(dpy, &error);
return None;
}
}
else {
mode = fbconfig;
@ -401,7 +406,7 @@ CreateContext(Display *dpy, XVisualInfo *vis,
if (!XF86DRICreateContextWithConfig(dpy, psc->scr,
mode->fbconfigID,
mode->visualID,
&gc->hwContextID, &hwContext))
/* gah, handle this better */
return NULL;
@ -416,8 +421,6 @@ CreateContext(Display *dpy, XVisualInfo *vis,
gc->isDirect = GL_TRUE;
gc->screen = mode->screen;
gc->psc = psc;
gc->vid = mode->visualID;
gc->fbconfigID = mode->fbconfigID;
gc->mode = mode;
}
else {
@ -1511,13 +1514,15 @@ static int __glXQueryContextInfo(Display *dpy, GLXContext ctx)
ctx->share_xid = *pProp++;
break;
case GLX_VISUAL_ID_EXT:
ctx->vid = *pProp++;
ctx->mode =
_gl_context_modes_find_visual(ctx->psc->visuals, *pProp++);
break;
case GLX_SCREEN:
ctx->screen = *pProp++;
break;
case GLX_FBCONFIG_ID:
ctx->fbconfigID = *pProp++;
ctx->mode =
_gl_context_modes_find_fbconfig(ctx->psc->configs, *pProp++);
break;
case GLX_RENDER_TYPE:
ctx->renderType = *pProp++;
@ -1542,7 +1547,7 @@ glXQueryContext(Display *dpy, GLXContext ctx, int attribute, int *value)
int retVal;
/* get the information from the server if we don't have it already */
if (!ctx->isDirect && (ctx->vid == None)) {
if (!ctx->isDirect && (ctx->mode == NULL)) {
retVal = __glXQueryContextInfo(dpy, ctx);
if (Success != retVal) return retVal;
}
@ -1551,13 +1556,13 @@ glXQueryContext(Display *dpy, GLXContext ctx, int attribute, int *value)
*value = (int)(ctx->share_xid);
break;
case GLX_VISUAL_ID_EXT:
*value = (int)(ctx->vid);
*value = ctx->mode ? ctx->mode->visualID : None;
break;
case GLX_SCREEN:
*value = (int)(ctx->screen);
break;
case GLX_FBCONFIG_ID:
*value = (int)(ctx->fbconfigID);
*value = ctx->mode ? ctx->mode->fbconfigID : None;
break;
case GLX_RENDER_TYPE:
*value = (int)(ctx->renderType);
@ -2165,6 +2170,68 @@ static Bool __glXGetSyncValuesOML(Display *dpy, GLXDrawable drawable,
return False;
}
#ifdef GLX_DIRECT_RENDERING
GLboolean
__driGetMscRateOML(__DRIdrawable *draw, int32_t *numerator, int32_t *denominator)
{
#ifdef XF86VIDMODE
__GLXscreenConfigs *psc;
XF86VidModeModeLine mode_line;
int dot_clock;
int i;
__GLXdrawable *glxDraw;
glxDraw = containerOf(draw, __GLXdrawable, driDrawable);
psc = glxDraw->psc;
if (XF86VidModeQueryVersion(psc->dpy, &i, &i) &&
XF86VidModeGetModeLine(psc->dpy, psc->scr, &dot_clock, &mode_line) ) {
unsigned n = dot_clock * 1000;
unsigned d = mode_line.vtotal * mode_line.htotal;
# define V_INTERLACE 0x010
# define V_DBLSCAN 0x020
if (mode_line.flags & V_INTERLACE)
n *= 2;
else if (mode_line.flags & V_DBLSCAN)
d *= 2;
/* The OML_sync_control spec requires that if the refresh rate is a
* whole number, that the returned numerator be equal to the refresh
* rate and the denominator be 1.
*/
if (n % d == 0) {
n /= d;
d = 1;
}
else {
static const unsigned f[] = { 13, 11, 7, 5, 3, 2, 0 };
/* This is a poor man's way to reduce a fraction. It's far from
* perfect, but it will work well enough for this situation.
*/
for (i = 0; f[i] != 0; i++) {
while (n % f[i] == 0 && d % f[i] == 0) {
d /= f[i];
n /= f[i];
}
}
}
*numerator = n;
*denominator = d;
return True;
}
else
return False;
#else
return False;
#endif
}
#endif
/**
* Determine the refresh rate of the specified drawable and display.
@ -2182,71 +2249,19 @@ static Bool __glXGetSyncValuesOML(Display *dpy, GLXDrawable drawable,
* when GLX_OML_sync_control appears in the client extension string.
*/
GLboolean __glXGetMscRateOML(__DRIdrawable *draw,
GLboolean __glXGetMscRateOML(Display * dpy, GLXDrawable drawable,
int32_t * numerator, int32_t * denominator)
{
#if defined( GLX_DIRECT_RENDERING ) && defined( XF86VIDMODE )
__GLXdrawable *glxDraw =
containerOf(draw, __GLXdrawable, driDrawable);
__GLXscreenConfigs *psc = glxDraw->psc;
Display *dpy = psc->dpy;
__GLXdisplayPrivate * const priv = __glXInitialize(dpy);
__DRIdrawable *driDraw = GetDRIDrawable(dpy, drawable, NULL);
if (driDraw == NULL)
return False;
if ( priv != NULL ) {
XF86VidModeModeLine mode_line;
int dot_clock;
int i;
if (XF86VidModeQueryVersion( dpy, & i, & i ) &&
XF86VidModeGetModeLine(dpy, psc->scr, &dot_clock, &mode_line) ) {
unsigned n = dot_clock * 1000;
unsigned d = mode_line.vtotal * mode_line.htotal;
# define V_INTERLACE 0x010
# define V_DBLSCAN 0x020
if ( (mode_line.flags & V_INTERLACE) ) {
n *= 2;
}
else if ( (mode_line.flags & V_DBLSCAN) ) {
d *= 2;
}
/* The OML_sync_control spec requires that if the refresh rate is a
* whole number, that the returned numerator be equal to the refresh
* rate and the denominator be 1.
*/
if ( (n % d) == 0 ) {
n /= d;
d = 1;
}
else {
static const unsigned f[] = { 13, 11, 7, 5, 3, 2, 0 };
/* This is a poor man's way to reduce a fraction. It's far from
* perfect, but it will work well enough for this situation.
*/
for ( i = 0 ; f[i] != 0 ; i++ ) {
while ( ((n % f[i]) == 0) && ((d % f[i]) == 0) ) {
d /= f[i];
n /= f[i];
}
}
}
*numerator = n;
*denominator = d;
return True;
}
}
return __driGetMscRateOML(driDraw, numerator, denominator);
#else
(void) draw;
(void) dpy;
(void) drawable;
(void) numerator;
(void) denominator;
#endif

View file

@ -635,18 +635,17 @@ __glXInitializeVisualConfigFromTags( __GLcontextModes *config, int count,
#ifdef GLX_DIRECT_RENDERING
static unsigned
static void
filter_modes( __GLcontextModes ** server_modes,
const __GLcontextModes * driver_modes )
{
__GLcontextModes * m;
__GLcontextModes ** prev_next;
const __GLcontextModes * check;
unsigned modes_count = 0;
if ( driver_modes == NULL ) {
if (driver_modes == NULL) {
fprintf(stderr, "libGL warning: 3D driver returned no fbconfigs.\n");
return 0;
return;
}
/* For each mode in server_modes, check to see if a matching mode exists
@ -684,12 +683,9 @@ filter_modes( __GLcontextModes ** server_modes,
_gl_context_modes_destroy( m );
}
else {
modes_count++;
prev_next = & m->next;
}
}
return modes_count;
}
#ifdef XDAMAGE_1_1_INTERFACE
@ -792,7 +788,7 @@ static const __DRIinterfaceMethods interface_methods = {
__glXDRIGetDrawableInfo,
__glXGetUST,
__glXGetMscRateOML,
__driGetMscRateOML,
__glXReportDamage,
};
@ -954,7 +950,8 @@ CallCreateNewScreen(Display *dpy, int scrn, __GLXscreenConfigs *psc,
& driver_modes );
filter_modes(&psc->configs, driver_modes);
_gl_context_modes_destroy( driver_modes );
filter_modes(&psc->visuals, driver_modes);
_gl_context_modes_destroy(driver_modes);
}
}
}

View file

@ -9203,80 +9203,6 @@ __indirect_glLoadProgramNV(GLenum target, GLuint id, GLsizei len,
}
}
#define X_GLrop_ProgramParameter4dvNV 4185
void
__indirect_glProgramParameter4dNV(GLenum target, GLuint index, GLdouble x,
GLdouble y, GLdouble z, GLdouble w)
{
__GLXcontext *const gc = __glXGetCurrentContext();
const GLuint cmdlen = 44;
emit_header(gc->pc, X_GLrop_ProgramParameter4dvNV, cmdlen);
(void) memcpy((void *) (gc->pc + 4), (void *) (&target), 4);
(void) memcpy((void *) (gc->pc + 8), (void *) (&index), 4);
(void) memcpy((void *) (gc->pc + 12), (void *) (&x), 8);
(void) memcpy((void *) (gc->pc + 20), (void *) (&y), 8);
(void) memcpy((void *) (gc->pc + 28), (void *) (&z), 8);
(void) memcpy((void *) (gc->pc + 36), (void *) (&w), 8);
gc->pc += cmdlen;
if (__builtin_expect(gc->pc > gc->limit, 0)) {
(void) __glXFlushRenderBuffer(gc, gc->pc);
}
}
#define X_GLrop_ProgramParameter4dvNV 4185
void
__indirect_glProgramParameter4dvNV(GLenum target, GLuint index,
const GLdouble * params)
{
__GLXcontext *const gc = __glXGetCurrentContext();
const GLuint cmdlen = 44;
emit_header(gc->pc, X_GLrop_ProgramParameter4dvNV, cmdlen);
(void) memcpy((void *) (gc->pc + 4), (void *) (&target), 4);
(void) memcpy((void *) (gc->pc + 8), (void *) (&index), 4);
(void) memcpy((void *) (gc->pc + 12), (void *) (params), 32);
gc->pc += cmdlen;
if (__builtin_expect(gc->pc > gc->limit, 0)) {
(void) __glXFlushRenderBuffer(gc, gc->pc);
}
}
#define X_GLrop_ProgramParameter4fvNV 4184
void
__indirect_glProgramParameter4fNV(GLenum target, GLuint index, GLfloat x,
GLfloat y, GLfloat z, GLfloat w)
{
__GLXcontext *const gc = __glXGetCurrentContext();
const GLuint cmdlen = 28;
emit_header(gc->pc, X_GLrop_ProgramParameter4fvNV, cmdlen);
(void) memcpy((void *) (gc->pc + 4), (void *) (&target), 4);
(void) memcpy((void *) (gc->pc + 8), (void *) (&index), 4);
(void) memcpy((void *) (gc->pc + 12), (void *) (&x), 4);
(void) memcpy((void *) (gc->pc + 16), (void *) (&y), 4);
(void) memcpy((void *) (gc->pc + 20), (void *) (&z), 4);
(void) memcpy((void *) (gc->pc + 24), (void *) (&w), 4);
gc->pc += cmdlen;
if (__builtin_expect(gc->pc > gc->limit, 0)) {
(void) __glXFlushRenderBuffer(gc, gc->pc);
}
}
#define X_GLrop_ProgramParameter4fvNV 4184
void
__indirect_glProgramParameter4fvNV(GLenum target, GLuint index,
const GLfloat * params)
{
__GLXcontext *const gc = __glXGetCurrentContext();
const GLuint cmdlen = 28;
emit_header(gc->pc, X_GLrop_ProgramParameter4fvNV, cmdlen);
(void) memcpy((void *) (gc->pc + 4), (void *) (&target), 4);
(void) memcpy((void *) (gc->pc + 8), (void *) (&index), 4);
(void) memcpy((void *) (gc->pc + 12), (void *) (params), 16);
gc->pc += cmdlen;
if (__builtin_expect(gc->pc > gc->limit, 0)) {
(void) __glXFlushRenderBuffer(gc, gc->pc);
}
}
#define X_GLrop_ProgramParameters4dvNV 4187
void
__indirect_glProgramParameters4dvNV(GLenum target, GLuint index, GLuint num,

View file

@ -639,10 +639,6 @@ extern HIDDEN void __indirect_glGetVertexAttribfvNV(GLuint index, GLenum pname,
extern HIDDEN void __indirect_glGetVertexAttribivNV(GLuint index, GLenum pname, GLint * params);
extern HIDDEN GLboolean __indirect_glIsProgramNV(GLuint program);
extern HIDDEN void __indirect_glLoadProgramNV(GLenum target, GLuint id, GLsizei len, const GLubyte * program);
extern HIDDEN void __indirect_glProgramParameter4dNV(GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
extern HIDDEN void __indirect_glProgramParameter4dvNV(GLenum target, GLuint index, const GLdouble * params);
extern HIDDEN void __indirect_glProgramParameter4fNV(GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
extern HIDDEN void __indirect_glProgramParameter4fvNV(GLenum target, GLuint index, const GLfloat * params);
extern HIDDEN void __indirect_glProgramParameters4dvNV(GLenum target, GLuint index, GLuint num, const GLdouble * params);
extern HIDDEN void __indirect_glProgramParameters4fvNV(GLenum target, GLuint index, GLuint num, const GLfloat * params);
extern HIDDEN void __indirect_glRequestResidentProgramsNV(GLsizei n, const GLuint * ids);

View file

@ -681,10 +681,6 @@ __GLapi * __glXNewIndirectAPI( void )
glAPI->GetVertexAttribivNV = __indirect_glGetVertexAttribivNV;
glAPI->IsProgramNV = __indirect_glIsProgramNV;
glAPI->LoadProgramNV = __indirect_glLoadProgramNV;
glAPI->ProgramParameter4dNV = __indirect_glProgramParameter4dNV;
glAPI->ProgramParameter4dvNV = __indirect_glProgramParameter4dvNV;
glAPI->ProgramParameter4fNV = __indirect_glProgramParameter4fNV;
glAPI->ProgramParameter4fvNV = __indirect_glProgramParameter4fvNV;
glAPI->ProgramParameters4dvNV = __indirect_glProgramParameters4dvNV;
glAPI->ProgramParameters4fvNV = __indirect_glProgramParameters4fvNV;
glAPI->RequestResidentProgramsNV = __indirect_glRequestResidentProgramsNV;

View file

@ -34,6 +34,7 @@
called by that routine when direct rendering is enabled.
*/
#ifdef GLX_DIRECT_RENDERING
#include "glxclient.h"
@ -209,7 +210,6 @@ static XCharStruct *isvalid(XFontStruct *fs, int which)
return(NULL);
}
void DRI_glXUseXFont( Font font, int first, int count, int listbase )
{
GLXContext CC;
@ -374,4 +374,4 @@ bm_height);
glPixelStorei(GL_UNPACK_ALIGNMENT, alignment);
}
/* The End. */
#endif

View file

@ -112,6 +112,8 @@ _mesa_init_driver_functions(struct dd_function_table *driver)
driver->DeleteTexture = _mesa_delete_texture_object;
driver->NewTextureImage = _mesa_new_texture_image;
driver->FreeTexImageData = _mesa_free_texture_image_data;
driver->MapTexture = NULL;
driver->UnmapTexture = NULL;
driver->TextureMemCpy = _mesa_memcpy;
driver->IsTextureResident = NULL;
driver->PrioritizeTexture = NULL;

View file

@ -40,9 +40,8 @@ dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
assert((location_mask & ~(DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_MEM_VRAM | DRM_BO_FLAG_MEM_PRIV0 |
DRM_BO_FLAG_MEM_PRIV1 | DRM_BO_FLAG_MEM_PRIV2 |
DRM_BO_FLAG_MEM_PRIV3 |
DRM_BO_FLAG_MEM_PRIV4)) == 0);
DRM_BO_FLAG_MEM_PRIV3 | DRM_BO_FLAG_MEM_PRIV4 |
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED)) == 0);
return bufmgr->bo_alloc(bufmgr, name, size, alignment, location_mask);
}

View file

@ -134,13 +134,42 @@ struct _dri_bufmgr {
* Tears down the buffer manager instance.
*/
void (*destroy)(dri_bufmgr *bufmgr);
/**
* Add relocation
*/
void (*emit_reloc)(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset, dri_bo *relocatee);
void *(*process_relocs)(dri_bo *batch_buf, GLuint *count);
/**
* Add relocation entry in reloc_buf, to be set on command submission.
*
* \param reloc_buf Buffer to write the relocation into.
* \param flags BO flags to be used in validating the target buffer.
* Applicable flags include:
* - DRM_BO_FLAG_READ: The buffer will be read in the process of
* command execution.
* - DRM_BO_FLAG_WRITE: The buffer will be written in the process of
* command execution.
* - DRM_BO_FLAG_MEM_TT: The buffer should be validated in TT memory.
* - DRM_BO_FLAG_MEM_VRAM: The buffer should be validated in video
* memory.
* \param delta Constant value to be added to the relocation target's offset.
* \param offset Byte offset within batch_buf of the relocated pointer.
* \param target Buffer whose offset should be written into the relocation
* entry.
*/
void (*emit_reloc)(dri_bo *reloc_buf, GLuint flags, GLuint delta,
GLuint offset, dri_bo *target);
/**
* Processes the relocations, either in userland or by converting the list
* for use in batchbuffer submission.
*
* Kernel-based implementations will return a pointer to the arguments
* to be handed with batchbuffer submission to the kernel. The userland
* implementation performs the buffer validation and emits relocations
* into them the appopriate order.
*
* \param batch_buf buffer at the root of the tree of relocations
* \param count returns the number of buffers validated.
* \return relocation record for use in command submission.
* */
void *(*process_relocs)(dri_bo *batch_buf, GLuint *count);
void (*post_submit)(dri_bo *batch_buf, dri_fence **fence);
};
@ -173,6 +202,7 @@ dri_bufmgr *dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
int (*fence_wait)(void *private,
unsigned int cookie),
void *driver_priv);
void dri_bufmgr_fake_set_debug(dri_bufmgr *bufmgr, GLboolean enable_debug);
void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
dri_bo *dri_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
unsigned int handle);

View file

@ -41,11 +41,10 @@
#include "mm.h"
#include "imports.h"
#if 0
#define DBG(...) _mesa_printf(__VA_ARGS__)
#else
#define DBG(...)
#endif
#define DBG(...) do { \
if (bufmgr_fake->debug) \
_mesa_printf(__VA_ARGS__); \
} while (0)
/* Internal flags:
*/
@ -63,20 +62,31 @@
struct fake_buffer_reloc
{
dri_bo *buf;
dri_bo *reloc_buf;
dri_bo *target_buf;
GLuint offset;
GLuint delta; /* not needed? */
GLuint delta;
GLuint validate_flags;
GLboolean relocated;
};
struct block {
struct block *next, *prev;
struct mem_block *mem; /* BM_MEM_AGP */
/**
* Marks that the block is currently in the aperture and has yet to be
* fenced.
*/
unsigned on_hardware:1;
/**
* Marks that the block is currently fenced (being used by rendering) and
* can't be freed until @fence is passed.
*/
unsigned fenced:1;
unsigned fence; /* BM_MEM_AGP, Split to read_fence, write_fence */
/** Fence cookie for the block. */
unsigned fence; /* Split to read_fence, write_fence */
dri_bo *bo;
void *virtual;
@ -118,13 +128,27 @@ typedef struct _bufmgr_fake {
/** Driver-supplied argument to driver callbacks */
void *driver_priv;
GLboolean debug;
/** fake relocation list */
struct fake_buffer_reloc reloc[MAX_RELOCS];
GLuint nr_relocs;
GLboolean performed_rendering;
GLboolean in_relocation;
} dri_bufmgr_fake;
#define RELOC_CACHE_COUNT 10
/**
* Relocation cache entry.
*
* These are used in buffer relocation to avoid re-mapping (and therefore
* dirtying) a buffer to emit constant relocations.
*/
struct reloc_cache {
unsigned int offset;
uint32_t data;
};
typedef struct _dri_bo_fake {
dri_bo bo;
@ -139,7 +163,21 @@ typedef struct _dri_bo_fake {
*/
unsigned int flags;
unsigned int alignment;
GLboolean is_static;
GLboolean is_static, validated;
unsigned int map_count;
/**
* Relocation count with this as reloc_buffer, to assist in determining the
* order to perform relocations.
*/
unsigned int nr_relocs;
struct reloc_cache reloc_cache[RELOC_CACHE_COUNT];
/* Flags for the buffer to be validated with in command submission */
uint64_t validate_flags;
/* Number of entries in the relocation data cache */
unsigned int reloc_cache_count;
struct block *block;
void *backing_store;
@ -545,6 +583,8 @@ dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
assert(size != 0);
bo_fake = calloc(1, sizeof(*bo_fake));
if (!bo_fake)
return NULL;
@ -581,6 +621,8 @@ dri_fake_bo_alloc_static(dri_bufmgr *bufmgr, const char *name,
bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
assert(size != 0);
bo_fake = calloc(1, sizeof(*bo_fake));
if (!bo_fake)
return NULL;
@ -629,6 +671,7 @@ dri_fake_bo_unreference(dri_bo *bo)
free_backing_store(bo);
_glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
free(bo);
DBG("drm_bo_unreference: free %s\n", bo_fake->name);
return;
}
_glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
@ -649,6 +692,17 @@ dri_fake_bo_map(dri_bo *bo, GLboolean write_enable)
return 0;
_glthread_LOCK_MUTEX(bufmgr_fake->mutex);
/* Allow recursive mapping, which is used internally in relocation. */
if (bo_fake->map_count++ != 0) {
_glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
return 0;
}
/* Clear the relocation cache if unknown data is going to be written in. */
if (!bufmgr_fake->in_relocation && write_enable) {
bo_fake->reloc_cache_count = 0;
}
{
DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
bo_fake->bo.size / 1024);
@ -692,20 +746,26 @@ dri_fake_bo_map(dri_bo *bo, GLboolean write_enable)
static int
dri_fake_bo_unmap(dri_bo *bo)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
/* Static buffers are always mapped. */
if (bo_fake->is_static)
return 0;
if (bo == NULL)
_glthread_LOCK_MUTEX(bufmgr_fake->mutex);
if (--bo_fake->map_count != 0) {
_glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
return 0;
}
DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
bo_fake->bo.size / 1024);
bo->virtual = NULL;
_glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
return 0;
}
@ -718,10 +778,10 @@ dri_fake_bo_validate(dri_bo *bo, unsigned int flags)
/* XXX: Sanity-check whether we've already validated this one under
* different flags. See drmAddValidateItem().
*/
bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
bo_fake->bo.size / 1024);
bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
_glthread_LOCK_MUTEX(bufmgr_fake->mutex);
{
@ -854,17 +914,34 @@ dri_fake_destroy(dri_bufmgr *bufmgr)
}
static void
dri_fake_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset,
dri_bo *relocatee)
dri_fake_emit_reloc(dri_bo *reloc_buf, GLuint flags, GLuint delta,
GLuint offset, dri_bo *target_buf)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)reloc_buf->bufmgr;
struct fake_buffer_reloc *r = &bufmgr_fake->reloc[bufmgr_fake->nr_relocs++];
dri_bo_fake *target_fake = (dri_bo_fake *)target_buf;
dri_bo_fake *reloc_fake = (dri_bo_fake *)reloc_buf;
assert(bufmgr_fake->nr_relocs <= MAX_RELOCS);
dri_bo_reference(relocatee);
dri_bo_reference(target_buf);
r->buf = relocatee;
if (target_fake->flags == 0) {
target_fake->validate_flags = flags;
} else {
/* Mask the memory location to the intersection of all the memory
* locations the buffer is being validated to.
*/
target_fake->validate_flags =
(target_fake->validate_flags & ~DRM_BO_MASK_MEM) |
(flags & target_fake->validate_flags & DRM_BO_MASK_MEM);
/* All the other flags just accumulate. */
target_fake->validate_flags |= flags & ~DRM_BO_MASK_MEM;
}
reloc_fake->nr_relocs++;
r->reloc_buf = reloc_buf;
r->target_buf = target_buf;
r->offset = offset;
r->delta = delta;
r->validate_flags = flags;
@ -872,81 +949,114 @@ dri_fake_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset
return;
}
static int
relocation_sort(const void *a_in, const void *b_in) {
const struct fake_buffer_reloc *a = a_in, *b = b_in;
return (intptr_t)a->buf < (intptr_t)b->buf ? -1 : 1;
}
static void *
dri_fake_process_relocs(dri_bo *batch_buf, GLuint *count_p)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
GLuint i;
GLuint *ptr;
GLuint count = 0;
assert(batch_buf->virtual != NULL);
ptr = batch_buf->virtual;
GLboolean cont;
int ret;
bufmgr_fake->performed_rendering = GL_FALSE;
bufmgr_fake->in_relocation = GL_TRUE;
/* Sort our relocation list in terms of referenced buffer pointer.
* This lets us uniquely validate the buffers with the sum of all the flags,
* while avoiding O(n^2) on number of relocations.
/* Loop over the relocation list validating and writing the relocation
* entries for target buffers that don't contain any remaining relocations.
* In the current examples we have, the depth of the tree of relocations
* is small (up to 3), so this loop shouldn't hurt too bad.
*/
qsort(bufmgr_fake->reloc, bufmgr_fake->nr_relocs, sizeof(bufmgr_fake->reloc[0]),
relocation_sort);
do {
cont = GL_FALSE;
/* Perform the necessary validations of buffers, and enter the relocations
* in the batchbuffer.
*/
for (i = 0; i < bufmgr_fake->nr_relocs; i++) {
struct fake_buffer_reloc *r = &bufmgr_fake->reloc[i];
for (i = 0; i < bufmgr_fake->nr_relocs; i++) {
struct fake_buffer_reloc *r = &bufmgr_fake->reloc[i];
dri_bo_fake *reloc_fake = (dri_bo_fake *)r->reloc_buf;
dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
uint32_t reloc_data;
int c;
GLboolean cached = GL_FALSE;
if (r->validate_flags & DRM_BO_FLAG_WRITE)
bufmgr_fake->performed_rendering = GL_TRUE;
if (r->relocated)
continue;
/* If this is the first time we've seen this buffer in the relocation
* list, figure out our flags and validate it.
*/
if (i == 0 || bufmgr_fake->reloc[i - 1].buf != r->buf) {
uint32_t validate_flags;
int j, ret;
/* Accumulate the flags we need for validating this buffer. */
validate_flags = r->validate_flags;
for (j = i + 1; j < bufmgr_fake->nr_relocs; j++) {
if (bufmgr_fake->reloc[j].buf != r->buf)
break;
validate_flags |= bufmgr_fake->reloc[j].validate_flags;
}
/* Validate. If we fail, fence to clear the unfenced list and bail
* out.
/* If there are still relocations to be done in the buffer, don't
* validate it yet.
*/
ret = dri_fake_bo_validate(r->buf, validate_flags);
if (ret != 0) {
dri_fence *fo;
dri_bo_unmap(batch_buf);
fo = dri_fake_fence_validated(batch_buf->bufmgr,
"batchbuffer failure fence", GL_TRUE);
dri_fence_unreference(fo);
goto done;
}
count++;
}
ptr[r->offset / 4] = r->buf->offset + r->delta;
dri_bo_unreference(r->buf);
}
dri_bo_unmap(batch_buf);
if (target_fake->nr_relocs != 0)
continue;
dri_fake_bo_validate(batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE);
/* Validate the target buffer if that hasn't been done. */
if (!target_fake->validated) {
ret = dri_fake_bo_validate(r->target_buf,
target_fake->validate_flags);
if (ret != 0) {
dri_fence *fo;
dri_bo_unmap(r->reloc_buf);
fo = dri_fake_fence_validated(batch_buf->bufmgr,
"batchbuffer failure fence",
GL_TRUE);
dri_fence_unreference(fo);
goto done;
}
if (target_fake->validate_flags & DRM_BO_FLAG_WRITE)
bufmgr_fake->performed_rendering = GL_TRUE;
count++;
}
/* Calculate the value of the relocation entry. */
reloc_data = r->target_buf->offset + r->delta;
/* Check the relocation cache of the buffer to see if we don't need
* to bother writing this one.
*/
for (c = 0; c < reloc_fake->reloc_cache_count; c++) {
if (reloc_fake->reloc_cache[c].offset == r->offset &&
reloc_fake->reloc_cache[c].data == reloc_data) {
cached = GL_TRUE;
}
}
if (!cached) {
/* Map and write in the relocation to reloc_buf */
if (reloc_fake->map_count == 0)
dri_bo_map(r->reloc_buf, GL_TRUE);
*(uint32_t *)(r->reloc_buf->virtual + r->offset) = reloc_data;
/* Stick this new entry in the relocation cache if possible */
if (reloc_fake->reloc_cache_count < RELOC_CACHE_COUNT) {
struct reloc_cache *entry;
entry = &reloc_fake->reloc_cache[reloc_fake->reloc_cache_count];
entry->offset = r->offset;
entry->data = reloc_data;
reloc_fake->reloc_cache_count++;
}
}
/* Mark this relocation in reloc_buf as done. If it was the last
* reloc to be done to it, unmap the buffer so it can be validated
* next.
*/
reloc_fake->nr_relocs--;
if (reloc_fake->nr_relocs == 0 && reloc_fake->map_count != 0)
dri_bo_unmap(r->reloc_buf);
r->relocated = GL_TRUE;
cont = GL_TRUE;
}
} while (cont);
ret = dri_fake_bo_validate(batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE);
assert(ret == 0);
*count_p = count;
bufmgr_fake->nr_relocs = 0;
bufmgr_fake->in_relocation = GL_FALSE;
done:
return NULL;
}
@ -956,6 +1066,7 @@ dri_fake_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
dri_fence *fo;
int i;
fo = dri_fake_fence_validated(batch_buf->bufmgr, "Batch fence", GL_TRUE);
@ -965,6 +1076,33 @@ dri_fake_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
} else {
dri_fence_unreference(fo);
}
/* Clean up the validation list. */
for (i = 0; i < bufmgr_fake->nr_relocs; i++) {
struct fake_buffer_reloc *r = &bufmgr_fake->reloc[i];
dri_bo_fake *reloc_fake = (dri_bo_fake *)r->reloc_buf;
dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
assert(r->relocated);
assert(reloc_fake->map_count == 0);
DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
reloc_fake->name, (uint32_t)r->reloc_buf->offset, r->offset,
target_fake->name, (uint32_t)r->target_buf->offset, r->delta);
reloc_fake->validate_flags = 0;
target_fake->validated = GL_FALSE;
r->relocated = GL_FALSE;
dri_bo_unreference(r->target_buf);
}
bufmgr_fake->nr_relocs = 0;
}
void
dri_bufmgr_fake_set_debug(dri_bufmgr *bufmgr, GLboolean enable_debug)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
bufmgr_fake->debug = enable_debug;
}
dri_bufmgr *

View file

@ -40,13 +40,6 @@ static const char UniformMatrix3fvARB_names[] =
"";
#endif
#if defined(need_GL_NV_vertex_program)
static const char ProgramParameter4fNV_names[] =
"iiffff\0" /* Parameter signature */
"glProgramParameter4fNV\0"
"";
#endif
#if defined(need_GL_VERSION_1_3) || defined(need_GL_ARB_multisample)
static const char SampleCoverageARB_names[] =
"fi\0" /* Parameter signature */
@ -572,13 +565,6 @@ static const char MatrixIndexusvARB_names[] =
"";
#endif
#if defined(need_GL_NV_vertex_program)
static const char ProgramParameter4dvNV_names[] =
"iip\0" /* Parameter signature */
"glProgramParameter4dvNV\0"
"";
#endif
#if defined(need_GL_VERSION_2_0) || defined(need_GL_ARB_vertex_program)
static const char DisableVertexAttribArrayARB_names[] =
"i\0" /* Parameter signature */
@ -967,13 +953,6 @@ static const char GenerateMipmapEXT_names[] =
"";
#endif
#if defined(need_GL_NV_vertex_program)
static const char ProgramParameter4dNV_names[] =
"iidddd\0" /* Parameter signature */
"glProgramParameter4dNV\0"
"";
#endif
#if defined(need_GL_ATI_fragment_shader)
static const char SetFragmentShaderConstantATI_names[] =
"ip\0" /* Parameter signature */
@ -1311,10 +1290,11 @@ static const char Color3fVertex3fSUN_names[] =
"";
#endif
#if defined(need_GL_ARB_vertex_program)
#if defined(need_GL_ARB_vertex_program) || defined(need_GL_NV_vertex_program)
static const char ProgramEnvParameter4fvARB_names[] =
"iip\0" /* Parameter signature */
"glProgramEnvParameter4fvARB\0"
"glProgramParameter4fvNV\0"
"";
#endif
@ -2035,13 +2015,6 @@ static const char WeightfvARB_names[] =
"";
#endif
#if defined(need_GL_NV_vertex_program)
static const char ProgramParameter4fvNV_names[] =
"iip\0" /* Parameter signature */
"glProgramParameter4fvNV\0"
"";
#endif
#if defined(need_GL_MESA_window_pos)
static const char WindowPos4fMESA_names[] =
"ffff\0" /* Parameter signature */
@ -2432,10 +2405,11 @@ static const char GetBufferPointervARB_names[] =
"";
#endif
#if defined(need_GL_ARB_vertex_program)
#if defined(need_GL_ARB_vertex_program) || defined(need_GL_NV_vertex_program)
static const char ProgramEnvParameter4fARB_names[] =
"iiffff\0" /* Parameter signature */
"glProgramEnvParameter4fARB\0"
"glProgramParameter4fNV\0"
"";
#endif
@ -2803,10 +2777,11 @@ static const char ReplacementCodePointerSUN_names[] =
"";
#endif
#if defined(need_GL_ARB_vertex_program)
#if defined(need_GL_ARB_vertex_program) || defined(need_GL_NV_vertex_program)
static const char ProgramEnvParameter4dARB_names[] =
"iidddd\0" /* Parameter signature */
"glProgramEnvParameter4dARB\0"
"glProgramParameter4dNV\0"
"";
#endif
@ -3660,10 +3635,11 @@ static const char GetColorTableParameteriv_names[] =
"";
#endif
#if defined(need_GL_ARB_vertex_program)
#if defined(need_GL_ARB_vertex_program) || defined(need_GL_NV_vertex_program)
static const char ProgramEnvParameter4dvARB_names[] =
"iip\0" /* Parameter signature */
"glProgramEnvParameter4dvARB\0"
"glProgramParameter4dvNV\0"
"";
#endif
@ -5748,12 +5724,10 @@ static const struct dri_extension_function GL_NV_vertex_array_range_functions[]
#if defined(need_GL_NV_vertex_program)
static const struct dri_extension_function GL_NV_vertex_program_functions[] = {
{ ProgramParameter4fNV_names, ProgramParameter4fNV_remap_index, -1 },
{ VertexAttrib4ubvNV_names, VertexAttrib4ubvNV_remap_index, -1 },
{ VertexAttrib4svNV_names, VertexAttrib4svNV_remap_index, -1 },
{ VertexAttribs1dvNV_names, VertexAttribs1dvNV_remap_index, -1 },
{ VertexAttrib1fvNV_names, VertexAttrib1fvNV_remap_index, -1 },
{ ProgramParameter4dvNV_names, ProgramParameter4dvNV_remap_index, -1 },
{ VertexAttrib4fNV_names, VertexAttrib4fNV_remap_index, -1 },
{ VertexAttrib2dNV_names, VertexAttrib2dNV_remap_index, -1 },
{ VertexAttrib4ubNV_names, VertexAttrib4ubNV_remap_index, -1 },
@ -5761,7 +5735,7 @@ static const struct dri_extension_function GL_NV_vertex_program_functions[] = {
{ VertexAttribs4fvNV_names, VertexAttribs4fvNV_remap_index, -1 },
{ VertexAttrib2sNV_names, VertexAttrib2sNV_remap_index, -1 },
{ VertexAttribs3fvNV_names, VertexAttribs3fvNV_remap_index, -1 },
{ ProgramParameter4dNV_names, ProgramParameter4dNV_remap_index, -1 },
{ ProgramEnvParameter4fvARB_names, ProgramEnvParameter4fvARB_remap_index, -1 },
{ LoadProgramNV_names, LoadProgramNV_remap_index, -1 },
{ VertexAttrib4fvNV_names, VertexAttrib4fvNV_remap_index, -1 },
{ VertexAttrib3fNV_names, VertexAttrib3fNV_remap_index, -1 },
@ -5771,14 +5745,15 @@ static const struct dri_extension_function GL_NV_vertex_program_functions[] = {
{ VertexAttrib2fvNV_names, VertexAttrib2fvNV_remap_index, -1 },
{ VertexAttrib2dvNV_names, VertexAttrib2dvNV_remap_index, -1 },
{ VertexAttrib1dvNV_names, VertexAttrib1dvNV_remap_index, -1 },
{ ProgramParameter4fvNV_names, ProgramParameter4fvNV_remap_index, -1 },
{ VertexAttrib1svNV_names, VertexAttrib1svNV_remap_index, -1 },
{ ProgramEnvParameter4fARB_names, ProgramEnvParameter4fARB_remap_index, -1 },
{ VertexAttribs2svNV_names, VertexAttribs2svNV_remap_index, -1 },
{ GetVertexAttribivNV_names, GetVertexAttribivNV_remap_index, -1 },
{ GetVertexAttribfvNV_names, GetVertexAttribfvNV_remap_index, -1 },
{ VertexAttrib2svNV_names, VertexAttrib2svNV_remap_index, -1 },
{ VertexAttribs1fvNV_names, VertexAttribs1fvNV_remap_index, -1 },
{ IsProgramNV_names, IsProgramNV_remap_index, -1 },
{ ProgramEnvParameter4dARB_names, ProgramEnvParameter4dARB_remap_index, -1 },
{ VertexAttrib2fNV_names, VertexAttrib2fNV_remap_index, -1 },
{ RequestResidentProgramsNV_names, RequestResidentProgramsNV_remap_index, -1 },
{ ExecuteProgramNV_names, ExecuteProgramNV_remap_index, -1 },
@ -5791,6 +5766,7 @@ static const struct dri_extension_function GL_NV_vertex_program_functions[] = {
{ GetProgramivNV_names, GetProgramivNV_remap_index, -1 },
{ GetVertexAttribdvNV_names, GetVertexAttribdvNV_remap_index, -1 },
{ VertexAttrib3fvNV_names, VertexAttrib3fvNV_remap_index, -1 },
{ ProgramEnvParameter4dvARB_names, ProgramEnvParameter4dvARB_remap_index, -1 },
{ VertexAttribs2fvNV_names, VertexAttribs2fvNV_remap_index, -1 },
{ DeleteProgramsNV_names, DeleteProgramsNV_remap_index, -1 },
{ GetVertexAttribPointervNV_names, GetVertexAttribPointervNV_remap_index, -1 },

View file

@ -46,7 +46,6 @@ DRIVER_SOURCES = \
intel_context.c \
intel_decode.c \
intel_ioctl.c \
intel_rotate.c \
intel_screen.c \
intel_span.c \
intel_state.c \
@ -62,7 +61,7 @@ C_SOURCES = \
ASM_SOURCES =
DRIVER_DEFINES = -I../intel -I../intel/server \
DRIVER_DEFINES = -I../intel -I../intel/server -DI915 \
$(shell pkg-config libdrm --atleast-version=2.3.1 \
&& echo "-DDRM_VBLANK_FLIP=DRM_VBLANK_FLIP")

View file

@ -196,10 +196,6 @@ extern void i830InitState(struct i830_context *i830);
*/
extern void i830InitMetaFuncs(struct i830_context *i830);
extern void
i830RotateWindow(struct intel_context *intel, __DRIdrawablePrivate * dPriv,
GLuint srcBuf);
/*======================================================================
* Inline conversion functions. These are better-typed than the
* macros used previously:

View file

@ -35,7 +35,6 @@
#include "intel_batchbuffer.h"
#include "intel_ioctl.h"
#include "intel_regions.h"
#include "intel_rotate.h"
#include "i915_context.h"
#include "i915_reg.h"

View file

@ -290,7 +290,7 @@ get_state_size(struct i915_hw_state *state)
/* Push the state into the sarea and/or texture memory.
*/
static void
i915_emit_state(struct intel_context *intel)
i915_do_emit_state(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
struct i915_hw_state *state = i915->current;
@ -307,11 +307,32 @@ i915_emit_state(struct intel_context *intel)
*/
intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
/* Workaround. There are cases I haven't been able to track down
* where we aren't emitting a full state at the start of a new
* batchbuffer. This code spots that we are on a new batchbuffer
* and forces a full state emit no matter what.
*
* In the normal case state->emitted is already zero, this code is
* another set of checks to make sure it really is.
*/
if (intel->batch->id != intel->last_state_batch_id ||
intel->batch->map == intel->batch->ptr)
{
state->emitted = 0;
intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
}
/* Do this here as we may have flushed the batchbuffer above,
* causing more state to be dirty!
*/
dirty = get_dirty(state);
state->emitted |= dirty;
assert(get_dirty(state) == 0);
if (intel->batch->id != intel->last_state_batch_id) {
assert(dirty & I915_UPLOAD_CTX);
intel->last_state_batch_id = intel->batch->id;
}
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "%s dirty: %x\n", __FUNCTION__, dirty);
@ -431,9 +452,32 @@ i915_emit_state(struct intel_context *intel)
i915_disassemble_program(state->Program, state->ProgramSize);
}
intel->batch->dirty_state &= ~dirty;
assert(get_dirty(state) == 0);
}
static void
i915_emit_state(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
i915_do_emit_state( intel );
/* Second chance - catch batchbuffer wrap in the middle of state
* emit. This shouldn't happen but it has been observed in
* testing.
*/
if (get_dirty( i915->current )) {
/* Force a full re-emit if this happens.
*/
i915->current->emitted = 0;
i915_do_emit_state( intel );
}
assert(get_dirty(i915->current) == 0);
assert((intel->batch->dirty_state & (1<<1)) == 0);
}
static void
i915_destroy_context(struct intel_context *intel)
{

View file

@ -1,250 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "intel_batchbuffer.h"
#include "intel_ioctl.h"
#include "intel_decode.h"
#include "intel_reg.h"
/* Relocations in kernel space:
* - pass dma buffer seperately
* - memory manager knows how to patch
* - pass list of dependent buffers
* - pass relocation list
*
* Either:
* - get back an offset for buffer to fire
* - memory manager knows how to fire buffer
*
* Really want the buffer to be AGP and pinned.
*
*/
/* Cliprect fence: The highest fence protecting a dma buffer
* containing explicit cliprect information. Like the old drawable
* lock but irq-driven. X server must wait for this fence to expire
* before changing cliprects [and then doing sw rendering?]. For
* other dma buffers, the scheduler will grab current cliprect info
* and mix into buffer. X server must hold the lock while changing
* cliprects??? Make per-drawable. Need cliprects in shared memory
* -- beats storing them with every cmd buffer in the queue.
*
* ==> X server must wait for this fence to expire before touching the
* framebuffer with new cliprects.
*
* ==> Cliprect-dependent buffers associated with a
* cliprect-timestamp. All of the buffers associated with a timestamp
* must go to hardware before any buffer with a newer timestamp.
*
* ==> Dma should be queued per-drawable for correct X/GL
* synchronization. Or can fences be used for this?
*
* Applies to: Blit operations, metaops, X server operations -- X
* server automatically waits on its own dma to complete before
* modifying cliprects ???
*/
void
intel_batchbuffer_reset(struct intel_batchbuffer *batch)
{
struct intel_context *intel = batch->intel;
if (batch->buf != NULL) {
dri_bo_unreference(batch->buf);
batch->buf = NULL;
}
batch->buf = dri_bo_alloc(intel->intelScreen->bufmgr, "batchbuffer",
intel->intelScreen->maxBatchSize, 4096,
DRM_BO_FLAG_MEM_TT);
dri_bo_map(batch->buf, GL_TRUE);
batch->map = batch->buf->virtual;
batch->size = intel->intelScreen->maxBatchSize;
batch->ptr = batch->map;
}
struct intel_batchbuffer *
intel_batchbuffer_alloc(struct intel_context *intel)
{
struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
batch->intel = intel;
batch->last_fence = NULL;
intel_batchbuffer_reset(batch);
return batch;
}
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
if (batch->last_fence) {
dri_fence_wait(batch->last_fence);
dri_fence_unreference(batch->last_fence);
batch->last_fence = NULL;
}
if (batch->map) {
dri_bo_unmap(batch->buf);
batch->map = NULL;
}
dri_bo_unreference(batch->buf);
batch->buf = NULL;
free(batch);
}
/* TODO: Push this whole function into bufmgr.
*/
static void
do_flush_locked(struct intel_batchbuffer *batch,
GLuint used,
GLboolean ignore_cliprects, GLboolean allow_unlock)
{
struct intel_context *intel = batch->intel;
void *start;
GLuint count;
start = dri_process_relocs(batch->buf, &count);
batch->map = NULL;
batch->ptr = NULL;
batch->flags = 0;
/* Throw away non-effective packets. Won't work once we have
* hardware contexts which would preserve statechanges beyond a
* single buffer.
*/
if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
if (intel->intelScreen->ttm == GL_TRUE) {
intel_exec_ioctl(batch->intel,
used, ignore_cliprects, allow_unlock,
start, count, &batch->last_fence);
} else {
intel_batch_ioctl(batch->intel,
batch->buf->offset,
used, ignore_cliprects, allow_unlock);
}
}
dri_post_submit(batch->buf, &batch->last_fence);
if (intel->numClipRects == 0 && !ignore_cliprects) {
if (allow_unlock) {
/* If we are not doing any actual user-visible rendering,
* do a sched_yield to keep the app from pegging the cpu while
* achieving nothing.
*/
UNLOCK_HARDWARE(intel);
sched_yield();
LOCK_HARDWARE(intel);
}
intel->vtbl.lost_hardware(intel);
}
if (INTEL_DEBUG & DEBUG_BATCH) {
// dri_bo_map(batch->buf, GL_FALSE);
// intel_decode(ptr, used / 4, batch->buf->offset,
// intel->intelScreen->deviceID);
// dri_bo_unmap(batch->buf);
}
}
void
intel_batchbuffer_flush(struct intel_batchbuffer *batch)
{
struct intel_context *intel = batch->intel;
GLuint used = batch->ptr - batch->map;
GLboolean was_locked = intel->locked;
if (used == 0)
return;
/* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
* performance drain that we would like to avoid.
*/
if (used & 4) {
((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->ptr)[1] = 0;
((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END;
used += 12;
}
else {
((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END;
used += 8;
}
/* TODO: Just pass the relocation list and dma buffer up to the
* kernel.
*/
if (!was_locked)
LOCK_HARDWARE(intel);
do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
GL_FALSE);
if (!was_locked)
UNLOCK_HARDWARE(intel);
/* Reset the buffer:
*/
intel_batchbuffer_reset(batch);
}
void
intel_batchbuffer_finish(struct intel_batchbuffer *batch)
{
intel_batchbuffer_flush(batch);
if (batch->last_fence != NULL)
dri_fence_wait(batch->last_fence);
}
/* This is the only way buffers get added to the validate list.
*/
GLboolean
intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
dri_bo *buffer,
GLuint flags, GLuint delta)
{
dri_emit_reloc(batch->buf, flags, delta, batch->ptr - batch->map, buffer);
batch->ptr += 4;
return GL_TRUE;
}
void
intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, GLuint bytes, GLuint flags)
{
assert((bytes & 3) == 0);
intel_batchbuffer_require_space(batch, bytes, flags);
__memcpy(batch->ptr, data, bytes);
batch->ptr += bytes;
}

View file

@ -0,0 +1 @@
../intel/intel_batchbuffer.c

View file

@ -1,491 +0,0 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <stdio.h>
#include <errno.h>
#include "mtypes.h"
#include "context.h"
#include "enums.h"
#include "intel_batchbuffer.h"
#include "intel_blit.h"
#include "intel_buffers.h"
#include "intel_context.h"
#include "intel_fbo.h"
#include "intel_reg.h"
#include "intel_regions.h"
#define FILE_DEBUG_FLAG DEBUG_BLIT
/**
* Copy the back color buffer to the front color buffer.
* Used for SwapBuffers().
*/
void
intelCopyBuffer(const __DRIdrawablePrivate * dPriv,
const drm_clip_rect_t * rect)
{
struct intel_context *intel;
const intelScreenPrivate *intelScreen;
DBG("%s\n", __FUNCTION__);
assert(dPriv);
intel = intelScreenContext(dPriv->driScreenPriv->private);
if (!intel)
return;
intelScreen = intel->intelScreen;
if (intel->last_swap_fence) {
dri_fence_wait(intel->last_swap_fence);
dri_fence_unreference(intel->last_swap_fence);
intel->last_swap_fence = NULL;
}
intel->last_swap_fence = intel->first_swap_fence;
intel->first_swap_fence = NULL;
/* The LOCK_HARDWARE is required for the cliprects. Buffer offsets
* should work regardless.
*/
LOCK_HARDWARE(intel);
if (dPriv && dPriv->numClipRects) {
struct intel_framebuffer *intel_fb = dPriv->driverPrivate;
const struct intel_region *frontRegion
= intel_get_rb_region(&intel_fb->Base, BUFFER_FRONT_LEFT);
const struct intel_region *backRegion
= intel_get_rb_region(&intel_fb->Base, BUFFER_BACK_LEFT);
const int nbox = dPriv->numClipRects;
const drm_clip_rect_t *pbox = dPriv->pClipRects;
const int pitch = frontRegion->pitch;
const int cpp = frontRegion->cpp;
int BR13, CMD;
int i;
ASSERT(intel_fb);
ASSERT(intel_fb->Base.Name == 0); /* Not a user-created FBO */
ASSERT(frontRegion);
ASSERT(backRegion);
ASSERT(frontRegion->pitch == backRegion->pitch);
ASSERT(frontRegion->cpp == backRegion->cpp);
if (cpp == 2) {
BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
CMD = XY_SRC_COPY_BLT_CMD;
}
else {
BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
CMD = (XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB);
}
for (i = 0; i < nbox; i++, pbox++) {
drm_clip_rect_t box;
if (pbox->x1 > pbox->x2 ||
pbox->y1 > pbox->y2 ||
pbox->x2 > intelScreen->width || pbox->y2 > intelScreen->height)
continue;
box = *pbox;
if (rect) {
if (rect->x1 > box.x1)
box.x1 = rect->x1;
if (rect->y1 > box.y1)
box.y1 = rect->y1;
if (rect->x2 < box.x2)
box.x2 = rect->x2;
if (rect->y2 < box.y2)
box.y2 = rect->y2;
if (box.x1 > box.x2 || box.y1 > box.y2)
continue;
}
BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((pbox->y1 << 16) | pbox->x1);
OUT_BATCH((pbox->y2 << 16) | pbox->x2);
OUT_RELOC(frontRegion->buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
0);
OUT_BATCH((pbox->y1 << 16) | pbox->x1);
OUT_BATCH(BR13 & 0xffff);
OUT_RELOC(backRegion->buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
0);
ADVANCE_BATCH();
}
if (intel->first_swap_fence)
dri_fence_unreference(intel->first_swap_fence);
intel_batchbuffer_flush(intel->batch);
intel->first_swap_fence = intel->batch->last_fence;
dri_fence_reference(intel->first_swap_fence);
}
UNLOCK_HARDWARE(intel);
}
void
intelEmitFillBlit(struct intel_context *intel,
GLuint cpp,
GLshort dst_pitch,
dri_bo *dst_buffer,
GLuint dst_offset,
GLshort x, GLshort y, GLshort w, GLshort h, GLuint color)
{
GLuint BR13, CMD;
BATCH_LOCALS;
dst_pitch *= cpp;
switch (cpp) {
case 1:
case 2:
case 3:
BR13 = dst_pitch | (0xF0 << 16) | (1 << 24);
CMD = XY_COLOR_BLT_CMD;
break;
case 4:
BR13 = dst_pitch | (0xF0 << 16) | (1 << 24) | (1 << 25);
CMD = (XY_COLOR_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB);
break;
default:
return;
}
DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__FUNCTION__, dst_buffer, dst_pitch, dst_offset, x, y, w, h);
BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((y << 16) | x);
OUT_BATCH(((y + h) << 16) | (x + w));
OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE, dst_offset);
OUT_BATCH(color);
ADVANCE_BATCH();
}
static GLuint translate_raster_op(GLenum logicop)
{
switch(logicop) {
case GL_CLEAR: return 0x00;
case GL_AND: return 0x88;
case GL_AND_REVERSE: return 0x44;
case GL_COPY: return 0xCC;
case GL_AND_INVERTED: return 0x22;
case GL_NOOP: return 0xAA;
case GL_XOR: return 0x66;
case GL_OR: return 0xEE;
case GL_NOR: return 0x11;
case GL_EQUIV: return 0x99;
case GL_INVERT: return 0x55;
case GL_OR_REVERSE: return 0xDD;
case GL_COPY_INVERTED: return 0x33;
case GL_OR_INVERTED: return 0xBB;
case GL_NAND: return 0x77;
case GL_SET: return 0xFF;
default: return 0;
}
}
/* Copy BitBlt
*/
void
intelEmitCopyBlit(struct intel_context *intel,
GLuint cpp,
GLshort src_pitch,
dri_bo *src_buffer,
GLuint src_offset,
GLshort dst_pitch,
dri_bo *dst_buffer,
GLuint dst_offset,
GLshort src_x, GLshort src_y,
GLshort dst_x, GLshort dst_y,
GLshort w, GLshort h,
GLenum logic_op)
{
GLuint CMD, BR13;
int dst_y2 = dst_y + h;
int dst_x2 = dst_x + w;
BATCH_LOCALS;
DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__FUNCTION__,
src_buffer, src_pitch, src_offset, src_x, src_y,
dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
src_pitch *= cpp;
dst_pitch *= cpp;
switch (cpp) {
case 1:
case 2:
case 3:
BR13 = (((GLint) dst_pitch) & 0xffff) |
(translate_raster_op(logic_op) << 16) | (1 << 24);
CMD = XY_SRC_COPY_BLT_CMD;
break;
case 4:
BR13 =
(((GLint) dst_pitch) & 0xffff) |
(translate_raster_op(logic_op) << 16) | (1 << 24) | (1 << 25);
CMD =
(XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB);
break;
default:
return;
}
if (dst_y2 < dst_y || dst_x2 < dst_x) {
return;
}
/* Initial y values don't seem to work with negative pitches. If
* we adjust the offsets manually (below), it seems to work fine.
*
* On the other hand, if we always adjust, the hardware doesn't
* know which blit directions to use, so overlapping copypixels get
* the wrong result.
*/
if (dst_pitch > 0 && src_pitch > 0) {
BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((dst_y << 16) | dst_x);
OUT_BATCH((dst_y2 << 16) | dst_x2);
OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE, dst_offset);
OUT_BATCH((src_y << 16) | src_x);
OUT_BATCH(((GLint) src_pitch & 0xffff));
OUT_RELOC(src_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, src_offset);
ADVANCE_BATCH();
}
else {
BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((0 << 16) | dst_x);
OUT_BATCH((h << 16) | dst_x2);
OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
dst_offset + dst_y * dst_pitch);
OUT_BATCH((0 << 16) | src_x);
OUT_BATCH(((GLint) src_pitch & 0xffff));
OUT_RELOC(src_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
src_offset + src_y * src_pitch);
ADVANCE_BATCH();
}
}
/**
* Use blitting to clear the renderbuffers named by 'flags'.
* Note: we can't use the ctx->DrawBuffer->_ColorDrawBufferMask field
* since that might include software renderbuffers or renderbuffers
* which we're clearing with triangles.
* \param mask bitmask of BUFFER_BIT_* values indicating buffers to clear
*/
void
intelClearWithBlit(GLcontext * ctx, GLbitfield mask)
{
struct intel_context *intel = intel_context(ctx);
struct gl_framebuffer *fb = ctx->DrawBuffer;
GLuint clear_depth;
GLbitfield skipBuffers = 0;
BATCH_LOCALS;
DBG("%s %x\n", __FUNCTION__, mask);
/*
* Compute values for clearing the buffers.
*/
clear_depth = 0;
if (mask & BUFFER_BIT_DEPTH) {
clear_depth = (GLuint) (fb->_DepthMax * ctx->Depth.Clear);
}
if (mask & BUFFER_BIT_STENCIL) {
clear_depth |= (ctx->Stencil.Clear & 0xff) << 24;
}
/* If clearing both depth and stencil, skip BUFFER_BIT_STENCIL in
* the loop below.
*/
if ((mask & BUFFER_BIT_DEPTH) && (mask & BUFFER_BIT_STENCIL)) {
skipBuffers = BUFFER_BIT_STENCIL;
}
/* XXX Move this flush/lock into the following conditional? */
intelFlush(&intel->ctx);
LOCK_HARDWARE(intel);
if (intel->numClipRects) {
GLint cx, cy, cw, ch;
drm_clip_rect_t clear;
int i;
/* Get clear bounds after locking */
cx = fb->_Xmin;
cy = fb->_Ymin;
cw = fb->_Xmax - cx;
ch = fb->_Ymax - cy;
if (fb->Name == 0) {
/* clearing a window */
/* flip top to bottom */
clear.x1 = cx + intel->drawX;
clear.y1 = intel->driDrawable->y + intel->driDrawable->h - cy - ch;
clear.x2 = clear.x1 + cw;
clear.y2 = clear.y1 + ch;
}
else {
/* clearing FBO */
assert(intel->numClipRects == 1);
assert(intel->pClipRects == &intel->fboRect);
clear.x1 = cx;
clear.y1 = cy;
clear.x2 = clear.x1 + cw;
clear.y2 = clear.y1 + ch;
/* no change to mask */
}
for (i = 0; i < intel->numClipRects; i++) {
const drm_clip_rect_t *box = &intel->pClipRects[i];
drm_clip_rect_t b;
GLuint buf;
GLuint clearMask = mask; /* use copy, since we modify it below */
GLboolean all = (cw == fb->Width && ch == fb->Height);
if (!all) {
intel_intersect_cliprects(&b, &clear, box);
}
else {
b = *box;
}
if (b.x1 >= b.x2 || b.y1 >= b.y2)
continue;
if (0)
_mesa_printf("clear %d,%d..%d,%d, mask %x\n",
b.x1, b.y1, b.x2, b.y2, mask);
/* Loop over all renderbuffers */
for (buf = 0; buf < BUFFER_COUNT && clearMask; buf++) {
const GLbitfield bufBit = 1 << buf;
if ((clearMask & bufBit) && !(bufBit & skipBuffers)) {
/* OK, clear this renderbuffer */
struct intel_region *irb_region =
intel_get_rb_region(fb, buf);
dri_bo *write_buffer =
intel_region_buffer(intel->intelScreen, irb_region,
all ? INTEL_WRITE_FULL :
INTEL_WRITE_PART);
GLuint clearVal;
GLint pitch, cpp;
GLuint BR13, CMD;
ASSERT(irb_region);
pitch = irb_region->pitch;
cpp = irb_region->cpp;
DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__FUNCTION__,
irb_region->buffer, (pitch * cpp),
irb_region->draw_offset,
b.x1, b.y1, b.x2 - b.x1, b.y2 - b.y1);
/* Setup the blit command */
if (cpp == 4) {
BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
CMD = XY_COLOR_BLT_CMD;
if (clearMask & BUFFER_BIT_DEPTH)
CMD |= XY_BLT_WRITE_RGB;
if (clearMask & BUFFER_BIT_STENCIL)
CMD |= XY_BLT_WRITE_ALPHA;
}
else {
/* clearing RGBA */
CMD = XY_COLOR_BLT_CMD |
XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
}
}
else {
ASSERT(cpp == 2 || cpp == 0);
BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
CMD = XY_COLOR_BLT_CMD;
}
if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
clearVal = clear_depth;
}
else {
clearVal = (cpp == 4)
? intel->ClearColor8888 : intel->ClearColor565;
}
/*
_mesa_debug(ctx, "hardware blit clear buf %d rb id %d\n",
buf, irb->Base.Name);
*/
intel_wait_flips(intel, INTEL_BATCH_NO_CLIPRECTS);
BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((b.y1 << 16) | b.x1);
OUT_BATCH((b.y2 << 16) | b.x2);
OUT_RELOC(write_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
irb_region->draw_offset);
OUT_BATCH(clearVal);
ADVANCE_BATCH();
clearMask &= ~bufBit; /* turn off bit, for faster loop exit */
}
}
}
intel_batchbuffer_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);
}

View file

@ -0,0 +1 @@
../intel/intel_blit.c

View file

@ -1,268 +0,0 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "imports.h"
#include "mtypes.h"
#include "bufferobj.h"
#include "intel_context.h"
#include "intel_buffer_objects.h"
#include "intel_regions.h"
#include "dri_bufmgr.h"
/** Allocates a new dri_bo to store the data for the buffer object. */
static void
intel_bufferobj_alloc_buffer(struct intel_context *intel,
struct intel_buffer_object *intel_obj)
{
intel_obj->buffer = dri_bo_alloc(intel->intelScreen->bufmgr, "bufferobj",
intel_obj->Base.Size, 64,
DRM_BO_FLAG_MEM_TT);
}
/**
* There is some duplication between mesa's bufferobjects and our
* bufmgr buffers. Both have an integer handle and a hashtable to
* lookup an opaque structure. It would be nice if the handles and
* internal structure where somehow shared.
*/
static struct gl_buffer_object *
intel_bufferobj_alloc(GLcontext * ctx, GLuint name, GLenum target)
{
struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
_mesa_initialize_buffer_object(&obj->Base, name, target);
obj->buffer = NULL;
return &obj->Base;
}
/* Break the COW tie to the region. The region gets to keep the data.
*/
void
intel_bufferobj_release_region(struct intel_context *intel,
struct intel_buffer_object *intel_obj)
{
assert(intel_obj->region->buffer == intel_obj->buffer);
intel_obj->region->pbo = NULL;
intel_obj->region = NULL;
dri_bo_unreference(intel_obj->buffer);
intel_obj->buffer = NULL;
}
/* Break the COW tie to the region. Both the pbo and the region end
* up with a copy of the data.
*/
void
intel_bufferobj_cow(struct intel_context *intel,
struct intel_buffer_object *intel_obj)
{
assert(intel_obj->region);
intel_region_cow(intel->intelScreen, intel_obj->region);
}
/**
* Deallocate/free a vertex/pixel buffer object.
* Called via glDeleteBuffersARB().
*/
static void
intel_bufferobj_free(GLcontext * ctx, struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
if (intel_obj->region) {
intel_bufferobj_release_region(intel, intel_obj);
}
else if (intel_obj->buffer) {
dri_bo_unreference(intel_obj->buffer);
}
_mesa_free(intel_obj);
}
/**
* Allocate space for and store data in a buffer object. Any data that was
* previously stored in the buffer object is lost. If data is NULL,
* memory will be allocated, but no copy will occur.
* Called via glBufferDataARB().
*/
static void
intel_bufferobj_data(GLcontext * ctx,
GLenum target,
GLsizeiptrARB size,
const GLvoid * data,
GLenum usage, struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
intel_obj->Base.Size = size;
intel_obj->Base.Usage = usage;
if (intel_obj->region)
intel_bufferobj_release_region(intel, intel_obj);
if (intel_obj->buffer != NULL && intel_obj->buffer->size != size) {
dri_bo_unreference(intel_obj->buffer);
intel_obj->buffer = NULL;
}
intel_bufferobj_alloc_buffer(intel, intel_obj);
if (data != NULL)
dri_bo_subdata(intel_obj->buffer, 0, size, data);
}
/**
* Replace data in a subrange of buffer object. If the data range
* specified by size + offset extends beyond the end of the buffer or
* if data is NULL, no copy is performed.
* Called via glBufferSubDataARB().
*/
static void
intel_bufferobj_subdata(GLcontext * ctx,
GLenum target,
GLintptrARB offset,
GLsizeiptrARB size,
const GLvoid * data, struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
if (intel_obj->region)
intel_bufferobj_cow(intel, intel_obj);
dri_bo_subdata(intel_obj->buffer, offset, size, data);
}
/**
* Called via glGetBufferSubDataARB().
*/
static void
intel_bufferobj_get_subdata(GLcontext * ctx,
GLenum target,
GLintptrARB offset,
GLsizeiptrARB size,
GLvoid * data, struct gl_buffer_object *obj)
{
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
dri_bo_get_subdata(intel_obj->buffer, offset, size, data);
}
/**
* Called via glMapBufferARB().
*/
static void *
intel_bufferobj_map(GLcontext * ctx,
GLenum target,
GLenum access, struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
/* XXX: Translate access to flags arg below:
*/
assert(intel_obj);
if (intel_obj->region)
intel_bufferobj_cow(intel, intel_obj);
if (intel_obj->buffer == NULL) {
obj->Pointer = NULL;
return NULL;
}
dri_bo_map(intel_obj->buffer, GL_TRUE);
obj->Pointer = intel_obj->buffer->virtual;
return obj->Pointer;
}
/**
* Called via glMapBufferARB().
*/
static GLboolean
intel_bufferobj_unmap(GLcontext * ctx,
GLenum target, struct gl_buffer_object *obj)
{
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
if (intel_obj->buffer != NULL) {
assert(obj->Pointer);
dri_bo_unmap(intel_obj->buffer);
obj->Pointer = NULL;
}
return GL_TRUE;
}
dri_bo *
intel_bufferobj_buffer(struct intel_context *intel,
struct intel_buffer_object *intel_obj, GLuint flag)
{
if (intel_obj->region) {
if (flag == INTEL_WRITE_PART)
intel_bufferobj_cow(intel, intel_obj);
else if (flag == INTEL_WRITE_FULL) {
intel_bufferobj_release_region(intel, intel_obj);
intel_bufferobj_alloc_buffer(intel, intel_obj);
}
}
return intel_obj->buffer;
}
void
intel_bufferobj_init(struct intel_context *intel)
{
GLcontext *ctx = &intel->ctx;
ctx->Driver.NewBufferObject = intel_bufferobj_alloc;
ctx->Driver.DeleteBuffer = intel_bufferobj_free;
ctx->Driver.BufferData = intel_bufferobj_data;
ctx->Driver.BufferSubData = intel_bufferobj_subdata;
ctx->Driver.GetBufferSubData = intel_bufferobj_get_subdata;
ctx->Driver.MapBuffer = intel_bufferobj_map;
ctx->Driver.UnmapBuffer = intel_bufferobj_unmap;
}

View file

@ -0,0 +1 @@
../intel/intel_buffer_objects.c

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1 @@
../intel/intel_buffers.c

View file

@ -1,833 +0,0 @@
/**************************************************************************
*
* Copyright © 2007 Red Hat Inc.
* Copyright © 2007 Intel Corporation
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include <unistd.h>
#include "glthread.h"
#include "errno.h"
#include "mtypes.h"
#include "dri_bufmgr.h"
#include "string.h"
#include "imports.h"
#include "i915_drm.h"
#include "intel_bufmgr_ttm.h"
#define BUFMGR_DEBUG 0
struct intel_reloc_info
{
GLuint type;
GLuint reloc;
GLuint delta; /* not needed? */
GLuint index;
drm_handle_t handle;
};
struct intel_bo_node
{
drmMMListHead head;
drmBO *buf;
struct drm_i915_op_arg bo_arg;
unsigned long arg0;
unsigned long arg1;
void (*destroy)(void *);
void *priv;
};
struct intel_bo_reloc_list
{
drmMMListHead head;
drmBO buf;
uint32_t *relocs;
};
struct intel_bo_reloc_node
{
drmMMListHead head;
drm_handle_t handle;
uint32_t nr_reloc_types;
struct intel_bo_reloc_list type_list;
};
struct intel_bo_list {
unsigned numCurrent;
drmMMListHead list;
void (*destroy)(void *node);
};
typedef struct _dri_bufmgr_ttm {
dri_bufmgr bufmgr;
int fd;
_glthread_Mutex mutex;
unsigned int fence_type;
unsigned int fence_type_flush;
uint32_t max_relocs;
/** ttm relocation list */
struct intel_bo_list list;
struct intel_bo_list reloc_list;
} dri_bufmgr_ttm;
typedef struct _dri_bo_ttm {
dri_bo bo;
int refcount; /* Protected by bufmgr->mutex */
drmBO drm_bo;
const char *name;
} dri_bo_ttm;
typedef struct _dri_fence_ttm
{
dri_fence fence;
int refcount; /* Protected by bufmgr->mutex */
const char *name;
drmFence drm_fence;
} dri_fence_ttm;
static void intel_bo_free_list(struct intel_bo_list *list)
{
struct intel_bo_node *node;
drmMMListHead *l;
l = list->list.next;
while(l != &list->list) {
DRMLISTDEL(l);
node = DRMLISTENTRY(struct intel_bo_node, l, head);
list->destroy(node);
l = list->list.next;
list->numCurrent--;
}
}
static void generic_destroy(void *nodep)
{
free(nodep);
}
static int intel_create_bo_list(int numTarget, struct intel_bo_list *list, void (*destroy)(void *))
{
DRMINITLISTHEAD(&list->list);
list->numCurrent = 0;
if (destroy)
list->destroy = destroy;
else
list->destroy = generic_destroy;
return 0;
}
static struct drm_i915_op_arg *
intel_setup_validate_list(int fd, struct intel_bo_list *list, struct intel_bo_list *reloc_list, GLuint *count_p)
{
struct intel_bo_node *node;
struct intel_bo_reloc_node *rl_node;
drmMMListHead *l, *rl;
struct drm_i915_op_arg *arg, *first;
struct drm_bo_op_req *req;
uint64_t *prevNext = NULL;
GLuint count = 0;
first = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(struct intel_bo_node, l, head);
arg = &node->bo_arg;
req = &arg->d.req;
if (!first)
first = arg;
if (prevNext)
*prevNext = (unsigned long) arg;
memset(arg, 0, sizeof(*arg));
prevNext = &arg->next;
req->bo_req.handle = node->buf->handle;
req->op = drm_bo_validate;
req->bo_req.flags = node->arg0;
req->bo_req.hint = 0;
req->bo_req.mask = node->arg1;
req->bo_req.fence_class = 0; /* Backwards compat. */
arg->reloc_handle = 0;
for (rl = reloc_list->list.next; rl != &reloc_list->list; rl = rl->next) {
rl_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
if (rl_node->handle == node->buf->handle) {
arg->reloc_handle = rl_node->type_list.buf.handle;
}
}
count++;
}
if (!first)
return 0;
*count_p = count;
return first;
}
static void intel_free_validate_list(int fd, struct intel_bo_list *list)
{
struct intel_bo_node *node;
drmMMListHead *l;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(struct intel_bo_node, l, head);
if (node->destroy)
(*node->destroy)(node->priv);
}
}
static void intel_free_reloc_list(int fd, struct intel_bo_list *reloc_list)
{
struct intel_bo_reloc_node *reloc_node;
drmMMListHead *rl, *tmp;
for (rl = reloc_list->list.next, tmp = rl->next; rl != &reloc_list->list; rl = tmp, tmp = rl->next) {
reloc_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
DRMLISTDEL(rl);
if (reloc_node->nr_reloc_types > 1) {
/* TODO */
}
drmBOUnmap(fd, &reloc_node->type_list.buf);
drmBOUnreference(fd, &reloc_node->type_list.buf);
free(reloc_node);
}
}
static int intel_add_validate_buffer(struct intel_bo_list *list, dri_bo *buf, unsigned flags,
unsigned mask, int *itemLoc, void (*destroy_cb)(void *))
{
struct intel_bo_node *node, *cur;
drmMMListHead *l;
int count = 0;
int ret = 0;
drmBO *buf_bo = &((dri_bo_ttm *)buf)->drm_bo;
cur = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(struct intel_bo_node, l, head);
if (node->buf->handle == buf_bo->handle) {
cur = node;
break;
}
count++;
}
if (!cur) {
cur = drmMalloc(sizeof(*cur));
if (!cur) {
return -ENOMEM;
}
cur->buf = buf_bo;
cur->priv = buf;
cur->arg0 = flags;
cur->arg1 = mask;
cur->destroy = destroy_cb;
ret = 1;
DRMLISTADDTAIL(&cur->head, &list->list);
} else {
unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM;
unsigned memFlags = cur->arg0 & flags & memMask;
if (!memFlags) {
return -EINVAL;
}
if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
return -EINVAL;
}
cur->arg1 |= mask;
cur->arg0 = memFlags | ((cur->arg0 | flags) &
cur->arg1 & ~DRM_BO_MASK_MEM);
}
*itemLoc = count;
return ret;
}
#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * sizeof(uint32_t))
static int intel_create_new_reloc_type_list(int fd, struct intel_bo_reloc_list *cur_type, int max_relocs)
{
int ret;
/* should allocate a drmBO here */
ret = drmBOCreate(fd, RELOC_BUF_SIZE(max_relocs), 0,
NULL,
DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_CACHED,
0, &cur_type->buf);
if (ret)
return ret;
ret = drmBOMap(fd, &cur_type->buf, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0, (void **)&cur_type->relocs);
if (ret)
return ret;
return 0;
}
static int intel_add_validate_reloc(int fd, struct intel_bo_list *reloc_list, struct intel_reloc_info *reloc_info, uint32_t max_relocs)
{
struct intel_bo_reloc_node *rl_node, *cur;
drmMMListHead *rl, *l;
int ret = 0;
uint32_t *reloc_start;
int num_relocs;
struct intel_bo_reloc_list *cur_type;
cur = NULL;
for (rl = reloc_list->list.next; rl != &reloc_list->list; rl = rl->next) {
rl_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
if (rl_node->handle == reloc_info->handle) {
cur = rl_node;
break;
}
}
if (!cur) {
cur = malloc(sizeof(*cur));
if (!cur)
return -ENOMEM;
cur->nr_reloc_types = 1;
cur->handle = reloc_info->handle;
cur_type = &cur->type_list;
DRMINITLISTHEAD(&cur->type_list.head);
ret = intel_create_new_reloc_type_list(fd, cur_type, max_relocs);
if (ret) {
return -1;
}
DRMLISTADDTAIL(&cur->head, &reloc_list->list);
cur_type->relocs[0] = 0 | (reloc_info->type << 16);
cur_type->relocs[1] = 0; // next reloc buffer handle is 0
} else {
int found = 0;
if ((cur->type_list.relocs[0] >> 16) == reloc_info->type) {
cur_type = &cur->type_list;
found = 1;
} else {
for (l = cur->type_list.head.next; l != &cur->type_list.head; l = l->next) {
cur_type = DRMLISTENTRY(struct intel_bo_reloc_list, l, head);
if (((cur_type->relocs[0] >> 16) & 0xffff) == reloc_info->type)
found = 1;
break;
}
}
/* didn't find the relocation type */
if (!found) {
cur_type = malloc(sizeof(*cur_type));
if (!cur_type) {
return -ENOMEM;
}
ret = intel_create_new_reloc_type_list(fd, cur_type, max_relocs);
DRMLISTADDTAIL(&cur_type->head, &cur->type_list.head);
cur_type->relocs[0] = (reloc_info->type << 16);
cur_type->relocs[1] = 0;
cur->nr_reloc_types++;
}
}
reloc_start = cur_type->relocs;
num_relocs = (reloc_start[0] & 0xffff);
reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER] = reloc_info->reloc;
reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER+1] = reloc_info->delta;
reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER+2] = reloc_info->index;
reloc_start[0]++;
if (((reloc_start[0] & 0xffff)) > (max_relocs)) {
return -ENOMEM;
}
return 0;
}
#if 0
int
driFenceSignaled(DriFenceObject * fence, unsigned type)
{
int signaled;
int ret;
if (fence == NULL)
return GL_TRUE;
_glthread_LOCK_MUTEX(fence->mutex);
ret = drmFenceSignaled(bufmgr_ttm->fd, &fence->fence, type, &signaled);
_glthread_UNLOCK_MUTEX(fence->mutex);
BM_CKFATAL(ret);
return signaled;
}
#endif
static dri_bo *
dri_ttm_alloc(dri_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment,
unsigned int location_mask)
{
dri_bufmgr_ttm *ttm_bufmgr;
dri_bo_ttm *ttm_buf;
unsigned int pageSize = getpagesize();
int ret;
unsigned int flags, hint;
ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
ttm_buf = malloc(sizeof(*ttm_buf));
if (!ttm_buf)
return NULL;
/* The mask argument doesn't do anything for us that we want other than
* determine which pool (TTM or local) the buffer is allocated into, so just
* pass all of the allocation class flags.
*/
flags = location_mask | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_EXE;
/* No hints we want to use. */
hint = 0;
ret = drmBOCreate(ttm_bufmgr->fd, size, alignment / pageSize,
NULL, flags, hint, &ttm_buf->drm_bo);
if (ret != 0) {
free(ttm_buf);
return NULL;
}
ttm_buf->bo.size = ttm_buf->drm_bo.size;
ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
ttm_buf->bo.virtual = NULL;
ttm_buf->bo.bufmgr = bufmgr;
ttm_buf->name = name;
ttm_buf->refcount = 1;
#if BUFMGR_DEBUG
fprintf(stderr, "bo_create: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
#endif
return &ttm_buf->bo;
}
/* Our TTM backend doesn't allow creation of static buffers, as that requires
* privelege for the non-fake case, and the lock in the fake case where we were
* working around the X Server not creating buffers and passing handles to us.
*/
static dri_bo *
dri_ttm_alloc_static(dri_bufmgr *bufmgr, const char *name,
unsigned long offset, unsigned long size, void *virtual,
unsigned int location_mask)
{
return NULL;
}
/** Returns a dri_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
*/
dri_bo *
intel_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
unsigned int handle)
{
dri_bufmgr_ttm *ttm_bufmgr;
dri_bo_ttm *ttm_buf;
int ret;
ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
ttm_buf = malloc(sizeof(*ttm_buf));
if (!ttm_buf)
return NULL;
ret = drmBOReference(ttm_bufmgr->fd, handle, &ttm_buf->drm_bo);
if (ret != 0) {
free(ttm_buf);
return NULL;
}
ttm_buf->bo.size = ttm_buf->drm_bo.size;
ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
ttm_buf->bo.virtual = NULL;
ttm_buf->bo.bufmgr = bufmgr;
ttm_buf->name = name;
ttm_buf->refcount = 1;
#if BUFMGR_DEBUG
fprintf(stderr, "bo_create_from_handle: %p %08x (%s)\n", &ttm_buf->bo, handle,
ttm_buf->name);
#endif
return &ttm_buf->bo;
}
static void
dri_ttm_bo_reference(dri_bo *buf)
{
dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
_glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
ttm_buf->refcount++;
_glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
}
static void
dri_ttm_bo_unreference(dri_bo *buf)
{
dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
if (!buf)
return;
_glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
if (--ttm_buf->refcount == 0) {
int ret;
ret = drmBOUnreference(bufmgr_ttm->fd, &ttm_buf->drm_bo);
if (ret != 0) {
fprintf(stderr, "drmBOUnreference failed (%s): %s\n", ttm_buf->name,
strerror(-ret));
}
#if BUFMGR_DEBUG
fprintf(stderr, "bo_unreference final: %p (%s)\n",
&ttm_buf->bo, ttm_buf->name);
#endif
_glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
free(buf);
return;
}
_glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
}
static int
dri_ttm_bo_map(dri_bo *buf, GLboolean write_enable)
{
dri_bufmgr_ttm *bufmgr_ttm;
dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
unsigned int flags;
bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
flags = DRM_BO_FLAG_READ;
if (write_enable)
flags |= DRM_BO_FLAG_WRITE;
assert(buf->virtual == NULL);
#if BUFMGR_DEBUG
fprintf(stderr, "bo_map: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
#endif
return drmBOMap(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, 0, &buf->virtual);
}
static int
dri_ttm_bo_unmap(dri_bo *buf)
{
dri_bufmgr_ttm *bufmgr_ttm;
dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
if (buf == NULL)
return 0;
bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
assert(buf->virtual != NULL);
buf->virtual = NULL;
#if BUFMGR_DEBUG
fprintf(stderr, "bo_unmap: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
#endif
return drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
}
/* Returns a dri_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
*/
dri_fence *
intel_ttm_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
drm_fence_arg_t *arg)
{
dri_bufmgr_ttm *ttm_bufmgr;
dri_fence_ttm *ttm_fence;
ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
ttm_fence = malloc(sizeof(*ttm_fence));
if (!ttm_fence)
return NULL;
ttm_fence->drm_fence.handle = arg->handle;
ttm_fence->drm_fence.fence_class = arg->fence_class;
ttm_fence->drm_fence.type = arg->type;
ttm_fence->drm_fence.flags = arg->flags;
ttm_fence->drm_fence.signaled = 0;
ttm_fence->drm_fence.sequence = arg->sequence;
ttm_fence->fence.bufmgr = bufmgr;
ttm_fence->name = name;
ttm_fence->refcount = 1;
#if BUFMGR_DEBUG
fprintf(stderr, "fence_create_from_handle: %p (%s)\n", &ttm_fence->fence,
ttm_fence->name);
#endif
return &ttm_fence->fence;
}
static void
dri_ttm_fence_reference(dri_fence *fence)
{
dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
_glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
++fence_ttm->refcount;
_glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
#if BUFMGR_DEBUG
fprintf(stderr, "fence_reference: %p (%s)\n", &fence_ttm->fence,
fence_ttm->name);
#endif
}
static void
dri_ttm_fence_unreference(dri_fence *fence)
{
dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
if (!fence)
return;
#if BUFMGR_DEBUG
fprintf(stderr, "fence_unreference: %p (%s)\n", &fence_ttm->fence,
fence_ttm->name);
#endif
_glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
if (--fence_ttm->refcount == 0) {
int ret;
ret = drmFenceUnreference(bufmgr_ttm->fd, &fence_ttm->drm_fence);
if (ret != 0) {
fprintf(stderr, "drmFenceUnreference failed (%s): %s\n",
fence_ttm->name, strerror(-ret));
}
_glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
free(fence);
return;
}
_glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
}
static void
dri_ttm_fence_wait(dri_fence *fence)
{
dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
int ret;
_glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
ret = drmFenceWait(bufmgr_ttm->fd, 0, &fence_ttm->drm_fence, 0);
_glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
if (ret != 0) {
_mesa_printf("%s:%d: Error %d waiting for fence %s.\n",
__FILE__, __LINE__, ret, fence_ttm->name);
abort();
}
#if BUFMGR_DEBUG
fprintf(stderr, "fence_wait: %p (%s)\n", &fence_ttm->fence,
fence_ttm->name);
#endif
}
static void
dri_bufmgr_ttm_destroy(dri_bufmgr *bufmgr)
{
dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
intel_bo_free_list(&bufmgr_ttm->list);
intel_bo_free_list(&bufmgr_ttm->reloc_list);
_glthread_DESTROY_MUTEX(bufmgr_ttm->mutex);
free(bufmgr);
}
static void intel_dribo_destroy_callback(void *priv)
{
dri_bo *dribo = priv;
if (dribo) {
dri_bo_unreference(dribo);
}
}
static void
dri_ttm_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset,
dri_bo *relocatee)
{
dri_bo_ttm *ttm_buf = (dri_bo_ttm *)batch_buf;
dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
int newItem;
struct intel_reloc_info reloc;
int mask;
int ret;
mask = DRM_BO_MASK_MEM;
mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE);
ret = intel_add_validate_buffer(&bufmgr_ttm->list, relocatee, flags, mask, &newItem, intel_dribo_destroy_callback);
if (ret < 0)
return;
if (ret == 1) {
dri_bo_reference(relocatee);
}
reloc.type = I915_RELOC_TYPE_0;
reloc.reloc = offset;
reloc.delta = delta;
reloc.index = newItem;
reloc.handle = ttm_buf->drm_bo.handle;
intel_add_validate_reloc(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list, &reloc, bufmgr_ttm->max_relocs);
return;
}
static void *
dri_ttm_process_reloc(dri_bo *batch_buf, GLuint *count)
{
dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
void *ptr;
int itemLoc;
dri_bo_unmap(batch_buf);
intel_add_validate_buffer(&bufmgr_ttm->list, batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE, &itemLoc, NULL);
ptr = intel_setup_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list, &bufmgr_ttm->reloc_list, count);
return ptr;
}
static void
dri_ttm_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
{
dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
intel_free_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list);
intel_free_reloc_list(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list);
intel_bo_free_list(&bufmgr_ttm->list);
}
/**
* Initializes the TTM buffer manager, which uses the kernel to allocate, map,
* and manage map buffer objections.
*
* \param fd File descriptor of the opened DRM device.
* \param fence_type Driver-specific fence type used for fences with no flush.
* \param fence_type_flush Driver-specific fence type used for fences with a
* flush.
*/
dri_bufmgr *
intel_bufmgr_ttm_init(int fd, unsigned int fence_type,
unsigned int fence_type_flush, int batch_size)
{
dri_bufmgr_ttm *bufmgr_ttm;
bufmgr_ttm = malloc(sizeof(*bufmgr_ttm));
bufmgr_ttm->fd = fd;
bufmgr_ttm->fence_type = fence_type;
bufmgr_ttm->fence_type_flush = fence_type_flush;
_glthread_INIT_MUTEX(bufmgr_ttm->mutex);
/* lets go with one relocation per every four dwords - purely heuristic */
bufmgr_ttm->max_relocs = batch_size / sizeof(uint32_t) / 4;
intel_create_bo_list(10, &bufmgr_ttm->list, NULL);
intel_create_bo_list(1, &bufmgr_ttm->reloc_list, NULL);
bufmgr_ttm->bufmgr.bo_alloc = dri_ttm_alloc;
bufmgr_ttm->bufmgr.bo_alloc_static = dri_ttm_alloc_static;
bufmgr_ttm->bufmgr.bo_reference = dri_ttm_bo_reference;
bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference;
bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map;
bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap;
bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference;
bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference;
bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait;
bufmgr_ttm->bufmgr.destroy = dri_bufmgr_ttm_destroy;
bufmgr_ttm->bufmgr.emit_reloc = dri_ttm_emit_reloc;
bufmgr_ttm->bufmgr.process_relocs = dri_ttm_process_reloc;
bufmgr_ttm->bufmgr.post_submit = dri_ttm_post_submit;
return &bufmgr_ttm->bufmgr;
}

View file

@ -0,0 +1 @@
../intel/intel_bufmgr_ttm.c

View file

@ -245,6 +245,7 @@ static const struct dri_debug_control debug_control[] = {
{"reg", DEBUG_REGION},
{"fbo", DEBUG_FBO},
{"lock", DEBUG_LOCK},
{"sync", DEBUG_SYNC},
{NULL, 0}
};
@ -278,37 +279,6 @@ intelFlush(GLcontext * ctx)
*/
}
/**
* Check if we need to rotate/warp the front color buffer to the
* rotated screen. We generally need to do this when we get a glFlush
* or glFinish after drawing to the front color buffer.
*/
static void
intelCheckFrontRotate(GLcontext * ctx)
{
struct intel_context *intel = intel_context(ctx);
if (intel->ctx.DrawBuffer->_ColorDrawBufferMask[0] ==
BUFFER_BIT_FRONT_LEFT) {
intelScreenPrivate *screen = intel->intelScreen;
if (screen->current_rotation != 0) {
__DRIdrawablePrivate *dPriv = intel->driDrawable;
intelRotateWindow(intel, dPriv, BUFFER_BIT_FRONT_LEFT);
}
}
}
/**
* Called via glFlush.
*/
static void
intelglFlush(GLcontext * ctx)
{
intelFlush(ctx);
intelCheckFrontRotate(ctx);
}
void
intelFinish(GLcontext * ctx)
{
@ -319,7 +289,6 @@ intelFinish(GLcontext * ctx)
dri_fence_unreference(intel->batch->last_fence);
intel->batch->last_fence = NULL;
}
intelCheckFrontRotate(ctx);
}
@ -328,7 +297,7 @@ intelInitDriverFunctions(struct dd_function_table *functions)
{
_mesa_init_driver_functions(functions);
functions->Flush = intelglFlush;
functions->Flush = intelFlush;
functions->Finish = intelFinish;
functions->GetString = intelGetString;
functions->UpdateState = intelInvalidateState;
@ -371,7 +340,6 @@ intelInitContext(struct intel_context *intel,
intel->width = intelScreen->width;
intel->height = intelScreen->height;
intel->current_rotation = intelScreen->current_rotation;
if (!lockMutexInit) {
lockMutexInit = GL_TRUE;
@ -492,6 +460,8 @@ intelInitContext(struct intel_context *intel,
#if DO_DEBUG
INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
if (!intel->intelScreen->ttm && (INTEL_DEBUG & DEBUG_BUFMGR))
dri_bufmgr_fake_set_debug(intel->intelScreen->bufmgr, GL_TRUE);
#endif
if (getenv("INTEL_NO_RAST")) {
@ -676,16 +646,8 @@ intelContendedLock(struct intel_context *intel, GLuint flags)
intel_decode_context_reset();
}
if (sarea->width != intelScreen->width ||
sarea->height != intelScreen->height ||
sarea->rotation != intelScreen->current_rotation) {
intelUpdateScreenRotation(sPriv, sarea);
}
if (sarea->width != intel->width ||
sarea->height != intel->height ||
sarea->rotation != intel->current_rotation) {
sarea->height != intel->height) {
int numClipRects = intel->numClipRects;
/*
@ -713,7 +675,6 @@ intelContendedLock(struct intel_context *intel, GLuint flags)
intel->width = sarea->width;
intel->height = sarea->height;
intel->current_rotation = sarea->rotation;
}
/* Drawable changed?

View file

@ -36,6 +36,7 @@
#include "texmem.h"
#include "intel_screen.h"
#include "intel_tex_obj.h"
#include "i915_drm.h"
#include "i830_common.h"
#include "tnl/t_vertex.h"
@ -73,49 +74,6 @@ extern void intelFallback(struct intel_context *intel, GLuint bit,
#define INTEL_WRITE_FULL 0x2
#define INTEL_READ 0x4
struct intel_texture_object
{
struct gl_texture_object base; /* The "parent" object */
/* The mipmap tree must include at least these levels once
* validated:
*/
GLuint firstLevel;
GLuint lastLevel;
/* Offset for firstLevel image:
*/
GLuint textureOffset;
/* On validation any active images held in main memory or in other
* regions will be copied to this region and the old storage freed.
*/
struct intel_mipmap_tree *mt;
GLboolean imageOverride;
GLint depthOverride;
GLuint pitchOverride;
};
struct intel_texture_image
{
struct gl_texture_image base;
/* These aren't stored in gl_texture_image
*/
GLuint level;
GLuint face;
/* If intelImage->mt != NULL, image data is stored here.
* Else if intelImage->base.Data != NULL, image is stored there.
* Else there is no image data.
*/
struct intel_mipmap_tree *mt;
};
#define INTEL_MAX_FIXUP 64
struct intel_context
@ -174,8 +132,6 @@ struct intel_context
GLuint pitch,
GLuint height,
GLenum format, GLenum type);
void (*rotate_window) (struct intel_context * intel,
__DRIdrawablePrivate * dPriv, GLuint srcBuf);
void (*assert_not_dirty) (struct intel_context *intel);
@ -189,6 +145,7 @@ struct intel_context
dri_fence *first_swap_fence;
struct intel_batchbuffer *batch;
GLuint last_state_batch_id;
struct
{
@ -229,14 +186,6 @@ struct intel_context
GLuint vertex_size;
GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
#if 0
struct intel_region *front_region; /* XXX FBO: obsolete */
struct intel_region *rotated_region; /* XXX FBO: obsolete */
struct intel_region *back_region; /* XXX FBO: obsolete */
struct intel_region *draw_region; /* XXX FBO: rename to color_region */
struct intel_region *depth_region; /**< currently bound depth/Z region */
#endif
/* Fallback rasterization functions
*/
intel_point_func draw_point;
@ -272,13 +221,9 @@ struct intel_context
*/
driOptionCache optionCache;
/* Rotation. Need to match that of the
* current screen.
*/
int width;
int height;
int current_rotation;
/* Last seen width/height of the screen */
int width;
int height;
};
/* These are functions now:
@ -369,6 +314,7 @@ extern int INTEL_DEBUG;
#define DEBUG_REGION 0x400
#define DEBUG_FBO 0x800
#define DEBUG_LOCK 0x1000
#define DEBUG_SYNC 0x2000
#define DBG(...) do { if (INTEL_DEBUG & FILE_DEBUG_FLAG) _mesa_printf(__VA_ARGS__); } while(0)
@ -478,20 +424,4 @@ intel_context(GLcontext * ctx)
return (struct intel_context *) ctx;
}
static INLINE struct intel_texture_object *
intel_texture_object(struct gl_texture_object *obj)
{
return (struct intel_texture_object *) obj;
}
static INLINE struct intel_texture_image *
intel_texture_image(struct gl_texture_image *img)
{
return (struct intel_texture_image *) img;
}
extern struct intel_renderbuffer *intel_renderbuffer(struct gl_renderbuffer
*rb);
#endif

View file

@ -1,282 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "glheader.h"
#include "imports.h"
#include "context.h"
#include "depthstencil.h"
#include "fbobject.h"
#include "framebuffer.h"
#include "hash.h"
#include "mtypes.h"
#include "renderbuffer.h"
#include "intel_context.h"
#include "intel_fbo.h"
#include "intel_depthstencil.h"
#include "intel_regions.h"
/**
* The GL_EXT_framebuffer_object allows the user to create their own
* framebuffer objects consisting of color renderbuffers (0 or more),
* depth renderbuffers (0 or 1) and stencil renderbuffers (0 or 1).
*
* The spec considers depth and stencil renderbuffers to be totally independent
* buffers. In reality, most graphics hardware today uses a combined
* depth+stencil buffer (one 32-bit pixel = 24 bits of Z + 8 bits of stencil).
*
* This causes difficulty because the user may create some number of depth
* renderbuffers and some number of stencil renderbuffers and bind them
* together in framebuffers in any combination.
*
* This code manages all that.
*
* 1. Depth renderbuffers are always allocated in hardware as 32bpp
* GL_DEPTH24_STENCIL8 buffers.
*
* 2. Stencil renderbuffers are initially allocated in software as 8bpp
* GL_STENCIL_INDEX8 buffers.
*
* 3. Depth and Stencil renderbuffers use the PairedStencil and PairedDepth
* fields (respectively) to indicate if the buffer's currently paired
* with another stencil or depth buffer (respectively).
*
* 4. When a depth and stencil buffer are initially both attached to the
* current framebuffer, we merge the stencil buffer values into the
* depth buffer (really a depth+stencil buffer). The then hardware uses
* the combined buffer.
*
* 5. Whenever a depth or stencil buffer is reallocated (with
* glRenderbufferStorage) we undo the pairing and copy the stencil values
* from the combined depth/stencil buffer back to the stencil-only buffer.
*
* 6. We also undo the pairing when we find a change in buffer bindings.
*
* 7. If a framebuffer is only using a depth renderbuffer (no stencil), we
* just use the combined depth/stencil buffer and ignore the stencil values.
*
* 8. If a framebuffer is only using a stencil renderbuffer (no depth) we have
* to promote the 8bpp software stencil buffer to a 32bpp hardware
* depth+stencil buffer.
*
*/
static void
map_regions(GLcontext * ctx,
struct intel_renderbuffer *depthRb,
struct intel_renderbuffer *stencilRb)
{
struct intel_context *intel = intel_context(ctx);
if (depthRb && depthRb->region) {
intel_region_map(intel->intelScreen, depthRb->region);
depthRb->pfMap = depthRb->region->map;
depthRb->pfPitch = depthRb->region->pitch;
}
if (stencilRb && stencilRb->region) {
intel_region_map(intel->intelScreen, stencilRb->region);
stencilRb->pfMap = stencilRb->region->map;
stencilRb->pfPitch = stencilRb->region->pitch;
}
}
static void
unmap_regions(GLcontext * ctx,
struct intel_renderbuffer *depthRb,
struct intel_renderbuffer *stencilRb)
{
struct intel_context *intel = intel_context(ctx);
if (depthRb && depthRb->region) {
intel_region_unmap(intel->intelScreen, depthRb->region);
depthRb->pfMap = NULL;
depthRb->pfPitch = 0;
}
if (stencilRb && stencilRb->region) {
intel_region_unmap(intel->intelScreen, stencilRb->region);
stencilRb->pfMap = NULL;
stencilRb->pfPitch = 0;
}
}
/**
* Undo the pairing/interleaving between depth and stencil buffers.
* irb should be a depth/stencil or stencil renderbuffer.
*/
void
intel_unpair_depth_stencil(GLcontext * ctx, struct intel_renderbuffer *irb)
{
if (irb->PairedStencil) {
/* irb is a depth/stencil buffer */
struct gl_renderbuffer *stencilRb;
struct intel_renderbuffer *stencilIrb;
ASSERT(irb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
stencilRb = _mesa_lookup_renderbuffer(ctx, irb->PairedStencil);
stencilIrb = intel_renderbuffer(stencilRb);
if (stencilIrb) {
/* need to extract stencil values from the depth buffer */
ASSERT(stencilIrb->PairedDepth == irb->Base.Name);
map_regions(ctx, irb, stencilIrb);
_mesa_extract_stencil(ctx, &irb->Base, &stencilIrb->Base);
unmap_regions(ctx, irb, stencilIrb);
stencilIrb->PairedDepth = 0;
}
irb->PairedStencil = 0;
}
else if (irb->PairedDepth) {
/* irb is a stencil buffer */
struct gl_renderbuffer *depthRb;
struct intel_renderbuffer *depthIrb;
ASSERT(irb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT ||
irb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
depthRb = _mesa_lookup_renderbuffer(ctx, irb->PairedDepth);
depthIrb = intel_renderbuffer(depthRb);
if (depthIrb) {
/* need to extract stencil values from the depth buffer */
ASSERT(depthIrb->PairedStencil == irb->Base.Name);
map_regions(ctx, depthIrb, irb);
_mesa_extract_stencil(ctx, &depthIrb->Base, &irb->Base);
unmap_regions(ctx, depthIrb, irb);
depthIrb->PairedStencil = 0;
}
irb->PairedDepth = 0;
}
else {
_mesa_problem(ctx, "Problem in undo_depth_stencil_pairing");
}
ASSERT(irb->PairedStencil == 0);
ASSERT(irb->PairedDepth == 0);
}
/**
* Examine the depth and stencil renderbuffers which are attached to the
* framebuffer. If both depth and stencil are attached, make sure that the
* renderbuffers are 'paired' (combined). If only depth or only stencil is
* attached, undo any previous pairing.
*
* Must be called if NewState & _NEW_BUFFER (when renderbuffer attachments
* change, for example).
*/
void
intel_validate_paired_depth_stencil(GLcontext * ctx,
struct gl_framebuffer *fb)
{
struct intel_renderbuffer *depthRb, *stencilRb;
depthRb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
stencilRb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
if (depthRb && stencilRb) {
if (depthRb == stencilRb) {
/* Using a user-created combined depth/stencil buffer.
* Nothing to do.
*/
ASSERT(depthRb->Base._BaseFormat == GL_DEPTH_STENCIL_EXT);
ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
}
else {
/* Separate depth/stencil buffers, need to interleave now */
ASSERT(depthRb->Base._BaseFormat == GL_DEPTH_COMPONENT);
ASSERT(stencilRb->Base._BaseFormat == GL_STENCIL_INDEX);
/* may need to interleave depth/stencil now */
if (depthRb->PairedStencil == stencilRb->Base.Name) {
/* OK, the depth and stencil buffers are already interleaved */
ASSERT(stencilRb->PairedDepth == depthRb->Base.Name);
}
else {
/* need to setup new pairing/interleaving */
if (depthRb->PairedStencil) {
intel_unpair_depth_stencil(ctx, depthRb);
}
if (stencilRb->PairedDepth) {
intel_unpair_depth_stencil(ctx, stencilRb);
}
ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
ASSERT(stencilRb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT ||
stencilRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
/* establish new pairing: interleave stencil into depth buffer */
map_regions(ctx, depthRb, stencilRb);
_mesa_insert_stencil(ctx, &depthRb->Base, &stencilRb->Base);
unmap_regions(ctx, depthRb, stencilRb);
depthRb->PairedStencil = stencilRb->Base.Name;
stencilRb->PairedDepth = depthRb->Base.Name;
}
}
}
else if (depthRb) {
/* Depth buffer but no stencil buffer.
* We'll use a GL_DEPTH24_STENCIL8 buffer and ignore the stencil bits.
*/
/* can't assert this until storage is allocated:
ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
*/
/* intel_undo any previous pairing */
if (depthRb->PairedStencil) {
intel_unpair_depth_stencil(ctx, depthRb);
}
}
else if (stencilRb) {
/* Stencil buffer but no depth buffer.
* Since h/w doesn't typically support just 8bpp stencil w/out Z,
* we'll use a GL_DEPTH24_STENCIL8 buffer and ignore the depth bits.
*/
/* undo any previous pairing */
if (stencilRb->PairedDepth) {
intel_unpair_depth_stencil(ctx, stencilRb);
}
if (stencilRb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT) {
/* promote buffer to GL_DEPTH24_STENCIL8 for hw rendering */
_mesa_promote_stencil(ctx, &stencilRb->Base);
ASSERT(stencilRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
}
}
/* Finally, update the fb->_DepthBuffer and fb->_StencilBuffer fields */
_mesa_update_depth_buffer(ctx, fb, BUFFER_DEPTH);
if (depthRb && depthRb->PairedStencil)
_mesa_update_stencil_buffer(ctx, fb, BUFFER_DEPTH);
else
_mesa_update_stencil_buffer(ctx, fb, BUFFER_STENCIL);
/* The hardware should use fb->Attachment[BUFFER_DEPTH].Renderbuffer
* first, if present, then fb->Attachment[BUFFER_STENCIL].Renderbuffer
* if present.
*/
}

View file

@ -0,0 +1 @@
../intel/intel_depthstencil.c

View file

@ -1,687 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "imports.h"
#include "mtypes.h"
#include "fbobject.h"
#include "framebuffer.h"
#include "renderbuffer.h"
#include "context.h"
#include "texformat.h"
#include "texrender.h"
#include "intel_context.h"
#include "intel_buffers.h"
#include "intel_depthstencil.h"
#include "intel_fbo.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_span.h"
#define FILE_DEBUG_FLAG DEBUG_FBO
#define INTEL_RB_CLASS 0x12345678
/* XXX FBO: move this to intel_context.h (inlined) */
/**
* Return a gl_renderbuffer ptr casted to intel_renderbuffer.
* NULL will be returned if the rb isn't really an intel_renderbuffer.
* This is determiend by checking the ClassID.
*/
struct intel_renderbuffer *
intel_renderbuffer(struct gl_renderbuffer *rb)
{
struct intel_renderbuffer *irb = (struct intel_renderbuffer *) rb;
if (irb && irb->Base.ClassID == INTEL_RB_CLASS) {
/*_mesa_warning(NULL, "Returning non-intel Rb\n");*/
return irb;
}
else
return NULL;
}
struct intel_renderbuffer *
intel_get_renderbuffer(struct gl_framebuffer *fb, GLuint attIndex)
{
return intel_renderbuffer(fb->Attachment[attIndex].Renderbuffer);
}
void
intel_flip_renderbuffers(struct intel_framebuffer *intel_fb)
{
int current_page = intel_fb->pf_current_page;
int next_page = (current_page + 1) % intel_fb->pf_num_pages;
struct gl_renderbuffer *tmp_rb;
/* Exchange renderbuffers if necessary but make sure their reference counts
* are preserved.
*/
if (intel_fb->color_rb[current_page] &&
intel_fb->Base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
&intel_fb->color_rb[current_page]->Base) {
tmp_rb = NULL;
_mesa_reference_renderbuffer(&tmp_rb,
intel_fb->Base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
tmp_rb = &intel_fb->color_rb[current_page]->Base;
_mesa_reference_renderbuffer(
&intel_fb->Base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
_mesa_reference_renderbuffer(&tmp_rb, NULL);
}
if (intel_fb->color_rb[next_page] &&
intel_fb->Base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
&intel_fb->color_rb[next_page]->Base) {
tmp_rb = NULL;
_mesa_reference_renderbuffer(&tmp_rb,
intel_fb->Base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
tmp_rb = &intel_fb->color_rb[next_page]->Base;
_mesa_reference_renderbuffer(
&intel_fb->Base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
_mesa_reference_renderbuffer(&tmp_rb, NULL);
}
}
struct intel_region *
intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
{
struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
if (irb)
return irb->region;
else
return NULL;
}
/**
* Create a new framebuffer object.
*/
static struct gl_framebuffer *
intel_new_framebuffer(GLcontext * ctx, GLuint name)
{
/* Only drawable state in intel_framebuffer at this time, just use Mesa's
* class
*/
return _mesa_new_framebuffer(ctx, name);
}
static void
intel_delete_renderbuffer(struct gl_renderbuffer *rb)
{
GET_CURRENT_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
ASSERT(irb);
if (irb->PairedStencil || irb->PairedDepth) {
intel_unpair_depth_stencil(ctx, irb);
}
if (intel && irb->region) {
intel_region_release(&irb->region);
}
_mesa_free(irb);
}
/**
* Return a pointer to a specific pixel in a renderbuffer.
*/
static void *
intel_get_pointer(GLcontext * ctx, struct gl_renderbuffer *rb,
GLint x, GLint y)
{
/* By returning NULL we force all software rendering to go through
* the span routines.
*/
return NULL;
}
/**
* Called via glRenderbufferStorageEXT() to set the format and allocate
* storage for a user-created renderbuffer.
*/
static GLboolean
intel_alloc_renderbuffer_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat,
GLuint width, GLuint height)
{
struct intel_context *intel = intel_context(ctx);
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
GLboolean softwareBuffer = GL_FALSE;
int cpp;
ASSERT(rb->Name != 0);
switch (internalFormat) {
case GL_R3_G3_B2:
case GL_RGB4:
case GL_RGB5:
rb->_ActualFormat = GL_RGB5;
rb->DataType = GL_UNSIGNED_BYTE;
rb->RedBits = 5;
rb->GreenBits = 6;
rb->BlueBits = 5;
cpp = 2;
break;
case GL_RGB:
case GL_RGB8:
case GL_RGB10:
case GL_RGB12:
case GL_RGB16:
case GL_RGBA:
case GL_RGBA2:
case GL_RGBA4:
case GL_RGB5_A1:
case GL_RGBA8:
case GL_RGB10_A2:
case GL_RGBA12:
case GL_RGBA16:
rb->_ActualFormat = GL_RGBA8;
rb->DataType = GL_UNSIGNED_BYTE;
rb->RedBits = 8;
rb->GreenBits = 8;
rb->BlueBits = 8;
rb->AlphaBits = 8;
cpp = 4;
break;
case GL_STENCIL_INDEX:
case GL_STENCIL_INDEX1_EXT:
case GL_STENCIL_INDEX4_EXT:
case GL_STENCIL_INDEX8_EXT:
case GL_STENCIL_INDEX16_EXT:
/* alloc a depth+stencil buffer */
rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
rb->StencilBits = 8;
cpp = 4;
break;
case GL_DEPTH_COMPONENT16:
rb->_ActualFormat = GL_DEPTH_COMPONENT16;
rb->DataType = GL_UNSIGNED_SHORT;
rb->DepthBits = 16;
cpp = 2;
break;
case GL_DEPTH_COMPONENT:
case GL_DEPTH_COMPONENT24:
case GL_DEPTH_COMPONENT32:
rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
rb->DepthBits = 24;
cpp = 4;
break;
case GL_DEPTH_STENCIL_EXT:
case GL_DEPTH24_STENCIL8_EXT:
rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
rb->DepthBits = 24;
rb->StencilBits = 8;
cpp = 4;
break;
default:
_mesa_problem(ctx,
"Unexpected format in intel_alloc_renderbuffer_storage");
return GL_FALSE;
}
intelFlush(ctx);
/* free old region */
if (irb->region) {
intel_region_release(&irb->region);
}
/* allocate new memory region/renderbuffer */
if (softwareBuffer) {
return _mesa_soft_renderbuffer_storage(ctx, rb, internalFormat,
width, height);
}
else {
/* Choose a pitch to match hardware requirements:
*/
GLuint pitch = ((cpp * width + 63) & ~63) / cpp;
/* alloc hardware renderbuffer */
DBG("Allocating %d x %d Intel RBO (pitch %d)\n", width,
height, pitch);
irb->region = intel_region_alloc(intel->intelScreen, cpp, pitch, height);
if (!irb->region)
return GL_FALSE; /* out of memory? */
ASSERT(irb->region->buffer);
rb->Width = width;
rb->Height = height;
/* This sets the Get/PutRow/Value functions */
intel_set_span_functions(&irb->Base);
return GL_TRUE;
}
}
/**
* Called for each hardware renderbuffer when a _window_ is resized.
* Just update fields.
* Not used for user-created renderbuffers!
*/
static GLboolean
intel_alloc_window_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat, GLuint width, GLuint height)
{
ASSERT(rb->Name == 0);
rb->Width = width;
rb->Height = height;
rb->_ActualFormat = internalFormat;
return GL_TRUE;
}
static void
intel_resize_buffers(GLcontext *ctx, struct gl_framebuffer *fb,
GLuint width, GLuint height)
{
struct intel_framebuffer *intel_fb = (struct intel_framebuffer*)fb;
int i;
_mesa_resize_framebuffer(ctx, fb, width, height);
fb->Initialized = GL_TRUE; /* XXX remove someday */
if (fb->Name != 0) {
return;
}
/* Make sure all window system renderbuffers are up to date */
for (i = 0; i < 3; i++) {
struct gl_renderbuffer *rb = &intel_fb->color_rb[i]->Base;
/* only resize if size is changing */
if (rb && (rb->Width != width || rb->Height != height)) {
rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
}
}
}
static GLboolean
intel_nop_alloc_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat, GLuint width, GLuint height)
{
_mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
return GL_FALSE;
}
/**
* Create a new intel_renderbuffer which corresponds to an on-screen window,
* not a user-created renderbuffer.
* \param width the screen width
* \param height the screen height
*/
struct intel_renderbuffer *
intel_create_renderbuffer(GLenum intFormat, GLsizei width, GLsizei height,
int offset, int pitch, int cpp, void *map)
{
GET_CURRENT_CONTEXT(ctx);
struct intel_renderbuffer *irb;
const GLuint name = 0;
irb = CALLOC_STRUCT(intel_renderbuffer);
if (!irb) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
return NULL;
}
_mesa_init_renderbuffer(&irb->Base, name);
irb->Base.ClassID = INTEL_RB_CLASS;
switch (intFormat) {
case GL_RGB5:
irb->Base._ActualFormat = GL_RGB5;
irb->Base._BaseFormat = GL_RGBA;
irb->Base.RedBits = 5;
irb->Base.GreenBits = 6;
irb->Base.BlueBits = 5;
irb->Base.DataType = GL_UNSIGNED_BYTE;
cpp = 2;
break;
case GL_RGBA8:
irb->Base._ActualFormat = GL_RGBA8;
irb->Base._BaseFormat = GL_RGBA;
irb->Base.RedBits = 8;
irb->Base.GreenBits = 8;
irb->Base.BlueBits = 8;
irb->Base.AlphaBits = 8;
irb->Base.DataType = GL_UNSIGNED_BYTE;
cpp = 4;
break;
case GL_STENCIL_INDEX8_EXT:
irb->Base._ActualFormat = GL_STENCIL_INDEX8_EXT;
irb->Base._BaseFormat = GL_STENCIL_INDEX;
irb->Base.StencilBits = 8;
irb->Base.DataType = GL_UNSIGNED_BYTE;
cpp = 1;
break;
case GL_DEPTH_COMPONENT16:
irb->Base._ActualFormat = GL_DEPTH_COMPONENT16;
irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
irb->Base.DepthBits = 16;
irb->Base.DataType = GL_UNSIGNED_SHORT;
cpp = 2;
break;
case GL_DEPTH_COMPONENT24:
irb->Base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
irb->Base.DepthBits = 24;
irb->Base.DataType = GL_UNSIGNED_INT;
cpp = 4;
break;
case GL_DEPTH24_STENCIL8_EXT:
irb->Base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
irb->Base._BaseFormat = GL_DEPTH_STENCIL_EXT;
irb->Base.DepthBits = 24;
irb->Base.StencilBits = 8;
irb->Base.DataType = GL_UNSIGNED_INT_24_8_EXT;
cpp = 4;
break;
default:
_mesa_problem(NULL,
"Unexpected intFormat in intel_create_renderbuffer");
return NULL;
}
irb->Base.InternalFormat = intFormat;
/* intel-specific methods */
irb->Base.Delete = intel_delete_renderbuffer;
irb->Base.AllocStorage = intel_alloc_window_storage;
irb->Base.GetPointer = intel_get_pointer;
/* This sets the Get/PutRow/Value functions */
intel_set_span_functions(&irb->Base);
irb->pfMap = map;
irb->pfPitch = pitch / cpp; /* in pixels */
#if 00
irb->region = intel_region_create_static(intel,
DRM_MM_TT,
offset, map, cpp, width, height);
#endif
return irb;
}
/**
* Create a new renderbuffer object.
* Typically called via glBindRenderbufferEXT().
*/
static struct gl_renderbuffer *
intel_new_renderbuffer(GLcontext * ctx, GLuint name)
{
/*struct intel_context *intel = intel_context(ctx); */
struct intel_renderbuffer *irb;
irb = CALLOC_STRUCT(intel_renderbuffer);
if (!irb) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
return NULL;
}
_mesa_init_renderbuffer(&irb->Base, name);
irb->Base.ClassID = INTEL_RB_CLASS;
/* intel-specific methods */
irb->Base.Delete = intel_delete_renderbuffer;
irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
irb->Base.GetPointer = intel_get_pointer;
/* span routines set in alloc_storage function */
return &irb->Base;
}
/**
* Called via glBindFramebufferEXT().
*/
static void
intel_bind_framebuffer(GLcontext * ctx, GLenum target,
struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
{
if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
intel_draw_buffer(ctx, fb);
/* Integer depth range depends on depth buffer bits */
ctx->Driver.DepthRange(ctx, ctx->Viewport.Near, ctx->Viewport.Far);
}
else {
/* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
}
}
/**
* Called via glFramebufferRenderbufferEXT().
*/
static void
intel_framebuffer_renderbuffer(GLcontext * ctx,
struct gl_framebuffer *fb,
GLenum attachment, struct gl_renderbuffer *rb)
{
DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
intelFlush(ctx);
_mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
intel_draw_buffer(ctx, fb);
}
/**
* When glFramebufferTexture[123]D is called this function sets up the
* gl_renderbuffer wrapper around the texture image.
* This will have the region info needed for hardware rendering.
*/
static struct intel_renderbuffer *
intel_wrap_texture(GLcontext * ctx, struct gl_texture_image *texImage)
{
const GLuint name = ~0; /* not significant, but distinct for debugging */
struct intel_renderbuffer *irb;
/* make an intel_renderbuffer to wrap the texture image */
irb = CALLOC_STRUCT(intel_renderbuffer);
if (!irb) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
return NULL;
}
_mesa_init_renderbuffer(&irb->Base, name);
irb->Base.ClassID = INTEL_RB_CLASS;
if (texImage->TexFormat == &_mesa_texformat_argb8888) {
irb->Base._ActualFormat = GL_RGBA8;
irb->Base._BaseFormat = GL_RGBA;
DBG("Render to RGBA8 texture OK\n");
}
else if (texImage->TexFormat == &_mesa_texformat_rgb565) {
irb->Base._ActualFormat = GL_RGB5;
irb->Base._BaseFormat = GL_RGB;
DBG("Render to RGB5 texture OK\n");
}
else if (texImage->TexFormat == &_mesa_texformat_z16) {
irb->Base._ActualFormat = GL_DEPTH_COMPONENT16;
irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
DBG("Render to DEPTH16 texture OK\n");
}
else {
DBG("Render to texture BAD FORMAT %d\n",
texImage->TexFormat->MesaFormat);
_mesa_free(irb);
return NULL;
}
irb->Base.InternalFormat = irb->Base._ActualFormat;
irb->Base.Width = texImage->Width;
irb->Base.Height = texImage->Height;
irb->Base.DataType = GL_UNSIGNED_BYTE; /* FBO XXX fix */
irb->Base.RedBits = texImage->TexFormat->RedBits;
irb->Base.GreenBits = texImage->TexFormat->GreenBits;
irb->Base.BlueBits = texImage->TexFormat->BlueBits;
irb->Base.AlphaBits = texImage->TexFormat->AlphaBits;
irb->Base.DepthBits = texImage->TexFormat->DepthBits;
irb->Base.Delete = intel_delete_renderbuffer;
irb->Base.AllocStorage = intel_nop_alloc_storage;
intel_set_span_functions(&irb->Base);
irb->RenderToTexture = GL_TRUE;
return irb;
}
/**
* Called by glFramebufferTexture[123]DEXT() (and other places) to
* prepare for rendering into texture memory. This might be called
* many times to choose different texture levels, cube faces, etc
* before intel_finish_render_texture() is ever called.
*/
static void
intel_render_texture(GLcontext * ctx,
struct gl_framebuffer *fb,
struct gl_renderbuffer_attachment *att)
{
struct gl_texture_image *newImage
= att->Texture->Image[att->CubeMapFace][att->TextureLevel];
struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
struct intel_texture_image *intel_image;
GLuint imageOffset;
(void) fb;
ASSERT(newImage);
if (!irb) {
irb = intel_wrap_texture(ctx, newImage);
if (irb) {
/* bind the wrapper to the attachment point */
_mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
}
else {
/* fallback to software rendering */
_mesa_render_texture(ctx, fb, att);
return;
}
}
DBG("Begin render texture tid %x tex=%u w=%d h=%d refcount=%d\n",
_glthread_GetID(),
att->Texture->Name, newImage->Width, newImage->Height,
irb->Base.RefCount);
/* point the renderbufer's region to the texture image region */
intel_image = intel_texture_image(newImage);
if (irb->region != intel_image->mt->region) {
if (irb->region)
intel_region_release(&irb->region);
intel_region_reference(&irb->region, intel_image->mt->region);
}
/* compute offset of the particular 2D image within the texture region */
imageOffset = intel_miptree_image_offset(intel_image->mt,
att->CubeMapFace,
att->TextureLevel);
if (att->Texture->Target == GL_TEXTURE_3D) {
const GLuint *offsets = intel_miptree_depth_offsets(intel_image->mt,
att->TextureLevel);
imageOffset += offsets[att->Zoffset];
}
/* store that offset in the region */
intel_image->mt->region->draw_offset = imageOffset;
/* update drawing region, etc */
intel_draw_buffer(ctx, fb);
}
/**
* Called by Mesa when rendering to a texture is done.
*/
static void
intel_finish_render_texture(GLcontext * ctx,
struct gl_renderbuffer_attachment *att)
{
struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
DBG("End render texture (tid %x) tex %u\n", _glthread_GetID(), att->Texture->Name);
if (irb) {
/* just release the region */
intel_region_release(&irb->region);
}
else if (att->Renderbuffer) {
/* software fallback */
_mesa_finish_render_texture(ctx, att);
/* XXX FBO: Need to unmap the buffer (or in intelSpanRenderStart???) */
}
}
/**
* Do one-time context initializations related to GL_EXT_framebuffer_object.
* Hook in device driver functions.
*/
void
intel_fbo_init(struct intel_context *intel)
{
intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
intel->ctx.Driver.RenderTexture = intel_render_texture;
intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
}

View file

@ -0,0 +1 @@
../intel/intel_fbo.c

View file

@ -1,388 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "enums.h"
#define FILE_DEBUG_FLAG DEBUG_MIPTREE
static GLenum
target_to_target(GLenum target)
{
switch (target) {
case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
return GL_TEXTURE_CUBE_MAP_ARB;
default:
return target;
}
}
struct intel_mipmap_tree *
intel_miptree_create(struct intel_context *intel,
GLenum target,
GLenum internal_format,
GLuint first_level,
GLuint last_level,
GLuint width0,
GLuint height0,
GLuint depth0, GLuint cpp, GLuint compress_byte)
{
GLboolean ok;
struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
DBG("%s target %s format %s level %d..%d\n", __FUNCTION__,
_mesa_lookup_enum_by_nr(target),
_mesa_lookup_enum_by_nr(internal_format), first_level, last_level);
mt->target = target_to_target(target);
mt->internal_format = internal_format;
mt->first_level = first_level;
mt->last_level = last_level;
mt->width0 = width0;
mt->height0 = height0;
mt->depth0 = depth0;
mt->cpp = compress_byte ? compress_byte : cpp;
mt->compressed = compress_byte ? 1 : 0;
mt->refcount = 1;
switch (intel->intelScreen->deviceID) {
case PCI_CHIP_I945_G:
case PCI_CHIP_I945_GM:
case PCI_CHIP_I945_GME:
case PCI_CHIP_G33_G:
case PCI_CHIP_Q33_G:
case PCI_CHIP_Q35_G:
ok = i945_miptree_layout(mt);
break;
case PCI_CHIP_I915_G:
case PCI_CHIP_I915_GM:
case PCI_CHIP_I830_M:
case PCI_CHIP_I855_GM:
case PCI_CHIP_I865_G:
default:
/* All the i830 chips and the i915 use this layout:
*/
ok = i915_miptree_layout(mt);
break;
}
if (ok) {
if (!mt->compressed) {
int align;
if (intel->intelScreen->ttm) {
/* XXX: Align pitch to multiple of 64 bytes for now to allow
* render-to-texture to work in all cases. This should probably be
* replaced at some point by some scheme to only do this when really
* necessary.
*/
align = 63;
} else {
align = 3;
}
mt->pitch = (mt->pitch * cpp + align) & ~align;
/* XXX: At least the i915 seems very upset when the pitch is a multiple
* of 1024 and sometimes 512 bytes - performance can drop by several
* times. Go to the next multiple of the required alignment for now.
*/
if (!(mt->pitch & 511))
mt->pitch += align + 1;
mt->pitch /= cpp;
}
mt->region = intel_region_alloc(intel->intelScreen,
mt->cpp, mt->pitch, mt->total_height);
}
if (!mt->region) {
free(mt);
return NULL;
}
return mt;
}
void
intel_miptree_reference(struct intel_mipmap_tree **dst,
struct intel_mipmap_tree *src)
{
src->refcount++;
*dst = src;
DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
}
void
intel_miptree_release(struct intel_context *intel,
struct intel_mipmap_tree **mt)
{
if (!*mt)
return;
DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
if (--(*mt)->refcount <= 0) {
GLuint i;
DBG("%s deleting %p\n", __FUNCTION__, *mt);
intel_region_release(&((*mt)->region));
for (i = 0; i < MAX_TEXTURE_LEVELS; i++)
if ((*mt)->level[i].image_offset)
free((*mt)->level[i].image_offset);
free(*mt);
}
*mt = NULL;
}
/* Can the image be pulled into a unified mipmap tree. This mirrors
* the completeness test in a lot of ways.
*
* Not sure whether I want to pass gl_texture_image here.
*/
GLboolean
intel_miptree_match_image(struct intel_mipmap_tree *mt,
struct gl_texture_image *image,
GLuint face, GLuint level)
{
/* Images with borders are never pulled into mipmap trees.
*/
if (image->Border)
return GL_FALSE;
if (image->InternalFormat != mt->internal_format ||
image->IsCompressed != mt->compressed)
return GL_FALSE;
/* Test image dimensions against the base level image adjusted for
* minification. This will also catch images not present in the
* tree, changed targets, etc.
*/
if (image->Width != mt->level[level].width ||
image->Height != mt->level[level].height ||
image->Depth != mt->level[level].depth)
return GL_FALSE;
return GL_TRUE;
}
void
intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
GLuint level,
GLuint nr_images,
GLuint x, GLuint y, GLuint w, GLuint h, GLuint d)
{
mt->level[level].width = w;
mt->level[level].height = h;
mt->level[level].depth = d;
mt->level[level].level_offset = (x + y * mt->pitch) * mt->cpp;
mt->level[level].nr_images = nr_images;
DBG("%s level %d size: %d,%d,%d offset %d,%d (0x%x)\n", __FUNCTION__,
level, w, h, d, x, y, mt->level[level].level_offset);
/* Not sure when this would happen, but anyway:
*/
if (mt->level[level].image_offset) {
free(mt->level[level].image_offset);
mt->level[level].image_offset = NULL;
}
assert(nr_images);
mt->level[level].image_offset = malloc(nr_images * sizeof(GLuint));
mt->level[level].image_offset[0] = 0;
}
void
intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
GLuint level, GLuint img, GLuint x, GLuint y)
{
if (img == 0 && level == 0)
assert(x == 0 && y == 0);
assert(img < mt->level[level].nr_images);
mt->level[level].image_offset[img] = (x + y * mt->pitch);
DBG("%s level %d img %d pos %d,%d image_offset %x\n",
__FUNCTION__, level, img, x, y, mt->level[level].image_offset[img]);
}
/* Although we use the image_offset[] array to store relative offsets
* to cube faces, Mesa doesn't know anything about this and expects
* each cube face to be treated as a separate image.
*
* These functions present that view to mesa:
*/
const GLuint *
intel_miptree_depth_offsets(struct intel_mipmap_tree *mt, GLuint level)
{
static const GLuint zero = 0;
if (mt->target != GL_TEXTURE_3D || mt->level[level].nr_images == 1)
return &zero;
else
return mt->level[level].image_offset;
}
GLuint
intel_miptree_image_offset(struct intel_mipmap_tree * mt,
GLuint face, GLuint level)
{
if (mt->target == GL_TEXTURE_CUBE_MAP_ARB)
return (mt->level[level].level_offset +
mt->level[level].image_offset[face] * mt->cpp);
else
return mt->level[level].level_offset;
}
/**
* Map a teximage in a mipmap tree.
* \param row_stride returns row stride in bytes
* \param image_stride returns image stride in bytes (for 3D textures).
* \return address of mapping
*/
GLubyte *
intel_miptree_image_map(struct intel_context * intel,
struct intel_mipmap_tree * mt,
GLuint face,
GLuint level,
GLuint * row_stride, GLuint * image_offsets)
{
DBG("%s \n", __FUNCTION__);
if (row_stride)
*row_stride = mt->pitch * mt->cpp;
if (image_offsets)
memcpy(image_offsets, mt->level[level].image_offset,
mt->level[level].depth * sizeof(GLuint));
return (intel_region_map(intel->intelScreen, mt->region) +
intel_miptree_image_offset(mt, face, level));
}
void
intel_miptree_image_unmap(struct intel_context *intel,
struct intel_mipmap_tree *mt)
{
DBG("%s\n", __FUNCTION__);
intel_region_unmap(intel->intelScreen, mt->region);
}
/* Upload data for a particular image.
*/
void
intel_miptree_image_data(struct intel_context *intel,
struct intel_mipmap_tree *dst,
GLuint face,
GLuint level,
void *src,
GLuint src_row_pitch, GLuint src_image_pitch)
{
GLuint depth = dst->level[level].depth;
GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
const GLuint *dst_depth_offset = intel_miptree_depth_offsets(dst, level);
GLuint i;
GLuint height = 0;
DBG("%s\n", __FUNCTION__);
for (i = 0; i < depth; i++) {
height = dst->level[level].height;
if(dst->compressed)
height /= 4;
intel_region_data(intel->intelScreen, dst->region,
dst_offset + dst_depth_offset[i], /* dst_offset */
0, 0, /* dstx, dsty */
src,
src_row_pitch,
0, 0, /* source x, y */
dst->level[level].width, height); /* width, height */
src += src_image_pitch * dst->cpp;
}
}
extern GLuint intel_compressed_alignment(GLenum);
/* Copy mipmap image between trees
*/
void
intel_miptree_image_copy(struct intel_context *intel,
struct intel_mipmap_tree *dst,
GLuint face, GLuint level,
struct intel_mipmap_tree *src)
{
GLuint width = src->level[level].width;
GLuint height = src->level[level].height;
GLuint depth = src->level[level].depth;
GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
GLuint src_offset = intel_miptree_image_offset(src, face, level);
const GLuint *dst_depth_offset = intel_miptree_depth_offsets(dst, level);
const GLuint *src_depth_offset = intel_miptree_depth_offsets(src, level);
GLuint i;
if (dst->compressed) {
GLuint alignment = intel_compressed_alignment(dst->internal_format);
height = (height + 3) / 4;
width = ((width + alignment - 1) & ~(alignment - 1));
}
for (i = 0; i < depth; i++) {
intel_region_copy(intel->intelScreen,
dst->region, dst_offset + dst_depth_offset[i],
0,
0,
src->region, src_offset + src_depth_offset[i],
0, 0, width, height);
}
}

View file

@ -0,0 +1 @@
../intel/intel_mipmap_tree.c

View file

@ -342,8 +342,8 @@ do_blit_copypixels(GLcontext * ctx,
intelEmitCopyBlit(intel, dst->cpp,
src->pitch, src->buffer, 0,
dst->pitch, dst->buffer, 0,
src->pitch, src->buffer, 0, src->tiled,
dst->pitch, dst->buffer, 0, dst->tiled,
rect.x1 + delta_x,
rect.y1 + delta_y, /* srcx, srcy */
rect.x1, rect.y1, /* dstx, dsty */

View file

@ -312,10 +312,8 @@ do_blit_drawpixels(GLcontext * ctx,
intelEmitCopyBlit(intel,
dest->cpp,
rowLength,
src_buffer, src_offset,
dest->pitch,
dest->buffer, 0,
rowLength, src_buffer, src_offset, GL_FALSE,
dest->pitch, dest->buffer, 0, dest->tiled,
rect.x1 - dest_rect.x1,
rect.y2 - dest_rect.y2,
rect.x1,

View file

@ -264,9 +264,8 @@ do_blit_readpixels(GLcontext * ctx,
intelEmitCopyBlit(intel,
src->cpp,
src->pitch, src->buffer, 0,
rowLength,
dst_buffer, dst_offset,
src->pitch, src->buffer, 0, src->tiled,
rowLength, dst_buffer, dst_offset, GL_FALSE,
rect.x1,
rect.y1,
rect.x1 - src_rect.x1,

View file

@ -1,483 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/* Provide additional functionality on top of bufmgr buffers:
* - 2d semantics and blit operations
* - refcounting of buffers for multiple images in a buffer.
* - refcounting of buffer mappings.
* - some logic for moving the buffers to the best memory pools for
* given operations.
*
* Most of this is to make it easier to implement the fixed-layout
* mipmap tree required by intel hardware in the face of GL's
* programming interface where each image can be specifed in random
* order and it isn't clear what layout the tree should have until the
* last moment.
*/
#include "intel_context.h"
#include "intel_regions.h"
#include "intel_blit.h"
#include "intel_buffer_objects.h"
#include "dri_bufmgr.h"
#include "intel_bufmgr_ttm.h"
#include "intel_batchbuffer.h"
#define FILE_DEBUG_FLAG DEBUG_REGION
void
intel_region_idle(intelScreenPrivate *intelScreen, struct intel_region *region)
{
DBG("%s\n", __FUNCTION__);
/* XXX: Using this function is likely bogus -- it ought to only have been
* used before a map, anyway, but leave this cheap implementation of it
* for now.
*/
if (region && region->buffer) {
/* Mapping it for read will ensure that any acceleration to the region
* would have landed already.
*/
dri_bo_map(region->buffer, GL_TRUE);
dri_bo_unmap(region->buffer);
}
}
/* XXX: Thread safety?
*/
GLubyte *
intel_region_map(intelScreenPrivate *intelScreen, struct intel_region *region)
{
DBG("%s\n", __FUNCTION__);
if (!region->map_refcount++) {
if (region->pbo)
intel_region_cow(intelScreen, region);
dri_bo_map(region->buffer, GL_TRUE);
region->map = region->buffer->virtual;
}
return region->map;
}
void
intel_region_unmap(intelScreenPrivate *intelScreen, struct intel_region *region)
{
DBG("%s\n", __FUNCTION__);
if (!--region->map_refcount) {
dri_bo_unmap(region->buffer);
region->map = NULL;
}
}
struct intel_region *
intel_region_alloc(intelScreenPrivate *intelScreen,
GLuint cpp, GLuint pitch, GLuint height)
{
struct intel_region *region = calloc(sizeof(*region), 1);
DBG("%s\n", __FUNCTION__);
region->cpp = cpp;
region->pitch = pitch;
region->height = height; /* needed? */
region->refcount = 1;
region->buffer = dri_bo_alloc(intelScreen->bufmgr, "region",
pitch * cpp * height, 64, DRM_BO_FLAG_MEM_TT);
return region;
}
void
intel_region_reference(struct intel_region **dst, struct intel_region *src)
{
assert(*dst == NULL);
if (src) {
src->refcount++;
*dst = src;
}
}
void
intel_region_release(struct intel_region **region)
{
if (!*region)
return;
DBG("%s %d\n", __FUNCTION__, (*region)->refcount - 1);
ASSERT((*region)->refcount > 0);
(*region)->refcount--;
if ((*region)->refcount == 0) {
assert((*region)->map_refcount == 0);
if ((*region)->pbo)
(*region)->pbo->region = NULL;
(*region)->pbo = NULL;
dri_bo_unreference((*region)->buffer);
free(*region);
}
*region = NULL;
}
struct intel_region *
intel_region_create_static(intelScreenPrivate *intelScreen,
GLuint mem_type,
unsigned int bo_handle,
GLuint offset,
void *virtual,
GLuint cpp, GLuint pitch, GLuint height)
{
struct intel_region *region = calloc(sizeof(*region), 1);
DBG("%s\n", __FUNCTION__);
region->cpp = cpp;
region->pitch = pitch;
region->height = height; /* needed? */
region->refcount = 1;
if (intelScreen->ttm) {
assert(bo_handle != -1);
region->buffer = intel_ttm_bo_create_from_handle(intelScreen->bufmgr,
"static region",
bo_handle);
} else {
region->buffer = dri_bo_alloc_static(intelScreen->bufmgr,
"static region",
offset, pitch * cpp * height,
virtual,
DRM_BO_FLAG_MEM_TT);
}
return region;
}
void
intel_region_update_static(intelScreenPrivate *intelScreen,
struct intel_region *region,
GLuint mem_type,
unsigned int bo_handle,
GLuint offset,
void *virtual,
GLuint cpp, GLuint pitch, GLuint height)
{
DBG("%s\n", __FUNCTION__);
region->cpp = cpp;
region->pitch = pitch;
region->height = height; /* needed? */
/*
* We use a "shared" buffer type to indicate buffers created and
* shared by others.
*/
dri_bo_unreference(region->buffer);
if (intelScreen->ttm) {
assert(bo_handle != -1);
region->buffer = intel_ttm_bo_create_from_handle(intelScreen->bufmgr,
"static region",
bo_handle);
} else {
region->buffer = dri_bo_alloc_static(intelScreen->bufmgr,
"static region",
offset, pitch * cpp * height,
virtual,
DRM_BO_FLAG_MEM_TT);
}
}
/*
* XXX Move this into core Mesa?
*/
static void
_mesa_copy_rect(GLubyte * dst,
GLuint cpp,
GLuint dst_pitch,
GLuint dst_x,
GLuint dst_y,
GLuint width,
GLuint height,
const GLubyte * src,
GLuint src_pitch, GLuint src_x, GLuint src_y)
{
GLuint i;
dst_pitch *= cpp;
src_pitch *= cpp;
dst += dst_x * cpp;
src += src_x * cpp;
dst += dst_y * dst_pitch;
src += src_y * dst_pitch;
width *= cpp;
if (width == dst_pitch && width == src_pitch)
memcpy(dst, src, height * width);
else {
for (i = 0; i < height; i++) {
memcpy(dst, src, width);
dst += dst_pitch;
src += src_pitch;
}
}
}
/* Upload data to a rectangular sub-region. Lots of choices how to do this:
*
* - memcpy by span to current destination
* - upload data as new buffer and blit
*
* Currently always memcpy.
*/
void
intel_region_data(intelScreenPrivate *intelScreen,
struct intel_region *dst,
GLuint dst_offset,
GLuint dstx, GLuint dsty,
const void *src, GLuint src_pitch,
GLuint srcx, GLuint srcy, GLuint width, GLuint height)
{
struct intel_context *intel = intelScreenContext(intelScreen);
DBG("%s\n", __FUNCTION__);
if (intel == NULL)
return;
if (dst->pbo) {
if (dstx == 0 &&
dsty == 0 && width == dst->pitch && height == dst->height)
intel_region_release_pbo(intelScreen, dst);
else
intel_region_cow(intelScreen, dst);
}
LOCK_HARDWARE(intel);
_mesa_copy_rect(intel_region_map(intelScreen, dst) + dst_offset,
dst->cpp,
dst->pitch,
dstx, dsty, width, height, src, src_pitch, srcx, srcy);
intel_region_unmap(intelScreen, dst);
UNLOCK_HARDWARE(intel);
}
/* Copy rectangular sub-regions. Need better logic about when to
* push buffers into AGP - will currently do so whenever possible.
*/
void
intel_region_copy(intelScreenPrivate *intelScreen,
struct intel_region *dst,
GLuint dst_offset,
GLuint dstx, GLuint dsty,
struct intel_region *src,
GLuint src_offset,
GLuint srcx, GLuint srcy, GLuint width, GLuint height)
{
struct intel_context *intel = intelScreenContext(intelScreen);
DBG("%s\n", __FUNCTION__);
if (intel == NULL)
return;
if (dst->pbo) {
if (dstx == 0 &&
dsty == 0 && width == dst->pitch && height == dst->height)
intel_region_release_pbo(intelScreen, dst);
else
intel_region_cow(intelScreen, dst);
}
assert(src->cpp == dst->cpp);
intelEmitCopyBlit(intel,
dst->cpp,
src->pitch, src->buffer, src_offset,
dst->pitch, dst->buffer, dst_offset,
srcx, srcy, dstx, dsty, width, height,
GL_COPY);
}
/* Fill a rectangular sub-region. Need better logic about when to
* push buffers into AGP - will currently do so whenever possible.
*/
void
intel_region_fill(intelScreenPrivate *intelScreen,
struct intel_region *dst,
GLuint dst_offset,
GLuint dstx, GLuint dsty,
GLuint width, GLuint height, GLuint color)
{
struct intel_context *intel = intelScreenContext(intelScreen);
DBG("%s\n", __FUNCTION__);
if (intel == NULL)
return;
if (dst->pbo) {
if (dstx == 0 &&
dsty == 0 && width == dst->pitch && height == dst->height)
intel_region_release_pbo(intelScreen, dst);
else
intel_region_cow(intelScreen, dst);
}
intelEmitFillBlit(intel,
dst->cpp,
dst->pitch, dst->buffer, dst_offset,
dstx, dsty, width, height, color);
}
/* Attach to a pbo, discarding our data. Effectively zero-copy upload
* the pbo's data.
*/
void
intel_region_attach_pbo(intelScreenPrivate *intelScreen,
struct intel_region *region,
struct intel_buffer_object *pbo)
{
if (region->pbo == pbo)
return;
/* If there is already a pbo attached, break the cow tie now.
* Don't call intel_region_release_pbo() as that would
* unnecessarily allocate a new buffer we would have to immediately
* discard.
*/
if (region->pbo) {
region->pbo->region = NULL;
region->pbo = NULL;
}
if (region->buffer) {
dri_bo_unreference(region->buffer);
region->buffer = NULL;
}
region->pbo = pbo;
region->pbo->region = region;
dri_bo_reference(pbo->buffer);
region->buffer = pbo->buffer;
}
/* Break the COW tie to the pbo and allocate a new buffer.
* The pbo gets to keep the data.
*/
void
intel_region_release_pbo(intelScreenPrivate *intelScreen,
struct intel_region *region)
{
assert(region->buffer == region->pbo->buffer);
region->pbo->region = NULL;
region->pbo = NULL;
dri_bo_unreference(region->buffer);
region->buffer = NULL;
region->buffer = dri_bo_alloc(intelScreen->bufmgr, "region",
region->pitch * region->cpp * region->height,
64, DRM_BO_FLAG_MEM_TT);
}
/* Break the COW tie to the pbo. Both the pbo and the region end up
* with a copy of the data.
*/
void
intel_region_cow(intelScreenPrivate *intelScreen, struct intel_region *region)
{
struct intel_context *intel = intelScreenContext(intelScreen);
struct intel_buffer_object *pbo = region->pbo;
if (intel == NULL)
return;
intel_region_release_pbo(intelScreen, region);
assert(region->cpp * region->pitch * region->height == pbo->Base.Size);
DBG("%s (%d bytes)\n", __FUNCTION__, pbo->Base.Size);
/* Now blit from the texture buffer to the new buffer:
*/
intel_batchbuffer_flush(intel->batch);
if (!intel->locked) {
LOCK_HARDWARE(intel);
intelEmitCopyBlit(intel,
region->cpp,
region->pitch,
region->buffer, 0,
region->pitch,
pbo->buffer, 0,
0, 0, 0, 0,
region->pitch, region->height,
GL_COPY);
intel_batchbuffer_flush(intel->batch);
UNLOCK_HARDWARE(intel);
}
else {
intelEmitCopyBlit(intel,
region->cpp,
region->pitch,
region->buffer, 0,
region->pitch,
pbo->buffer, 0,
0, 0, 0, 0,
region->pitch, region->height,
GL_COPY);
intel_batchbuffer_flush(intel->batch);
}
}
dri_bo *
intel_region_buffer(intelScreenPrivate *intelScreen,
struct intel_region *region, GLuint flag)
{
if (region->pbo) {
if (flag == INTEL_WRITE_PART)
intel_region_cow(intelScreen, region);
else if (flag == INTEL_WRITE_FULL)
intel_region_release_pbo(intelScreen, region);
}
return region->buffer;
}

View file

@ -0,0 +1 @@
../intel/intel_regions.c

View file

@ -1,237 +0,0 @@
/**
* Routines for simple 2D->2D transformations for rotated, flipped screens.
*
* XXX This code is not intel-specific. Move it into a common/utility
* someday.
*/
#include "intel_rotate.h"
#define MIN2(A, B) ( ((A) < (B)) ? (A) : (B) )
#define ABS(A) ( ((A) < 0) ? -(A) : (A) )
void
matrix23Set(struct matrix23 *m,
int m00, int m01, int m02, int m10, int m11, int m12)
{
m->m00 = m00;
m->m01 = m01;
m->m02 = m02;
m->m10 = m10;
m->m11 = m11;
m->m12 = m12;
}
/*
* Transform (x,y) coordinate by the given matrix.
*/
void
matrix23TransformCoordf(const struct matrix23 *m, float *x, float *y)
{
const float x0 = *x;
const float y0 = *y;
*x = m->m00 * x0 + m->m01 * y0 + m->m02;
*y = m->m10 * x0 + m->m11 * y0 + m->m12;
}
void
matrix23TransformCoordi(const struct matrix23 *m, int *x, int *y)
{
const int x0 = *x;
const int y0 = *y;
*x = m->m00 * x0 + m->m01 * y0 + m->m02;
*y = m->m10 * x0 + m->m11 * y0 + m->m12;
}
/*
* Transform a width and height by the given matrix.
* XXX this could be optimized quite a bit.
*/
void
matrix23TransformDistance(const struct matrix23 *m, int *xDist, int *yDist)
{
int x0 = 0, y0 = 0;
int x1 = *xDist, y1 = 0;
int x2 = 0, y2 = *yDist;
matrix23TransformCoordi(m, &x0, &y0);
matrix23TransformCoordi(m, &x1, &y1);
matrix23TransformCoordi(m, &x2, &y2);
*xDist = (x1 - x0) + (x2 - x0);
*yDist = (y1 - y0) + (y2 - y0);
if (*xDist < 0)
*xDist = -*xDist;
if (*yDist < 0)
*yDist = -*yDist;
}
/**
* Transform the rect defined by (x, y, w, h) by m.
*/
void
matrix23TransformRect(const struct matrix23 *m, int *x, int *y, int *w,
int *h)
{
int x0 = *x, y0 = *y;
int x1 = *x + *w, y1 = *y;
int x2 = *x + *w, y2 = *y + *h;
int x3 = *x, y3 = *y + *h;
matrix23TransformCoordi(m, &x0, &y0);
matrix23TransformCoordi(m, &x1, &y1);
matrix23TransformCoordi(m, &x2, &y2);
matrix23TransformCoordi(m, &x3, &y3);
*w = ABS(x1 - x0) + ABS(x2 - x1);
/**w = ABS(*w);*/
*h = ABS(y1 - y0) + ABS(y2 - y1);
/**h = ABS(*h);*/
*x = MIN2(x0, x1);
*x = MIN2(*x, x2);
*y = MIN2(y0, y1);
*y = MIN2(*y, y2);
}
/*
* Make rotation matrix for width X height screen.
*/
void
matrix23Rotate(struct matrix23 *m, int width, int height, int angle)
{
switch (angle) {
case 0:
matrix23Set(m, 1, 0, 0, 0, 1, 0);
break;
case 90:
matrix23Set(m, 0, 1, 0, -1, 0, width);
break;
case 180:
matrix23Set(m, -1, 0, width, 0, -1, height);
break;
case 270:
matrix23Set(m, 0, -1, height, 1, 0, 0);
break;
default:
/*abort() */ ;
}
}
/*
* Make flip/reflection matrix for width X height screen.
*/
void
matrix23Flip(struct matrix23 *m, int width, int height, int xflip, int yflip)
{
if (xflip) {
m->m00 = -1;
m->m01 = 0;
m->m02 = width - 1;
}
else {
m->m00 = 1;
m->m01 = 0;
m->m02 = 0;
}
if (yflip) {
m->m10 = 0;
m->m11 = -1;
m->m12 = height - 1;
}
else {
m->m10 = 0;
m->m11 = 1;
m->m12 = 0;
}
}
/*
* result = a * b
*/
void
matrix23Multiply(struct matrix23 *result,
const struct matrix23 *a, const struct matrix23 *b)
{
result->m00 = a->m00 * b->m00 + a->m01 * b->m10;
result->m01 = a->m00 * b->m01 + a->m01 * b->m11;
result->m02 = a->m00 * b->m02 + a->m01 * b->m12 + a->m02;
result->m10 = a->m10 * b->m00 + a->m11 * b->m10;
result->m11 = a->m10 * b->m01 + a->m11 * b->m11;
result->m12 = a->m10 * b->m02 + a->m11 * b->m12 + a->m12;
}
#if 000
#include <stdio.h>
int
main(int argc, char *argv[])
{
int width = 500, height = 400;
int rot;
int fx = 0, fy = 0; /* flip x and/or y ? */
int coords[4][2];
/* four corner coords to test with */
coords[0][0] = 0;
coords[0][1] = 0;
coords[1][0] = width - 1;
coords[1][1] = 0;
coords[2][0] = width - 1;
coords[2][1] = height - 1;
coords[3][0] = 0;
coords[3][1] = height - 1;
for (rot = 0; rot < 360; rot += 90) {
struct matrix23 rotate, flip, m;
int i;
printf("Rot %d, xFlip %d, yFlip %d:\n", rot, fx, fy);
/* make transformation matrix 'm' */
matrix23Rotate(&rotate, width, height, rot);
matrix23Flip(&flip, width, height, fx, fy);
matrix23Multiply(&m, &rotate, &flip);
/* xform four coords */
for (i = 0; i < 4; i++) {
int x = coords[i][0];
int y = coords[i][1];
matrix23TransformCoordi(&m, &x, &y);
printf(" %d, %d -> %d %d\n", coords[i][0], coords[i][1], x, y);
}
/* xform width, height */
{
int x = width;
int y = height;
matrix23TransformDistance(&m, &x, &y);
printf(" %d x %d -> %d x %d\n", width, height, x, y);
}
/* xform rect */
{
int x = 50, y = 10, w = 200, h = 100;
matrix23TransformRect(&m, &x, &y, &w, &h);
printf(" %d,%d %d x %d -> %d, %d %d x %d\n", 50, 10, 200, 100,
x, y, w, h);
}
}
return 0;
}
#endif

View file

@ -1,39 +0,0 @@
#ifndef INTEL_ROTATE_H
#define INTEL_ROTATE_H 1
struct matrix23
{
int m00, m01, m02;
int m10, m11, m12;
};
extern void
matrix23Set(struct matrix23 *m,
int m00, int m01, int m02, int m10, int m11, int m12);
extern void matrix23TransformCoordi(const struct matrix23 *m, int *x, int *y);
extern void
matrix23TransformCoordf(const struct matrix23 *m, float *x, float *y);
extern void
matrix23TransformDistance(const struct matrix23 *m, int *xDist, int *yDist);
extern void
matrix23TransformRect(const struct matrix23 *m,
int *x, int *y, int *w, int *h);
extern void
matrix23Rotate(struct matrix23 *m, int width, int height, int angle);
extern void
matrix23Flip(struct matrix23 *m, int width, int height, int xflip, int yflip);
extern void
matrix23Multiply(struct matrix23 *result,
const struct matrix23 *a, const struct matrix23 *b);
#endif /* INTEL_ROTATE_H */

View file

@ -1,945 +0,0 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "glheader.h"
#include "context.h"
#include "framebuffer.h"
#include "matrix.h"
#include "renderbuffer.h"
#include "simple_list.h"
#include "utils.h"
#include "vblank.h"
#include "xmlpool.h"
#include "intel_screen.h"
#include "intel_buffers.h"
#include "intel_tex.h"
#include "intel_span.h"
#include "intel_tris.h"
#include "intel_ioctl.h"
#include "intel_fbo.h"
#include "i830_dri.h"
#include "dri_bufmgr.h"
#include "intel_regions.h"
#include "intel_batchbuffer.h"
#include "intel_bufmgr_ttm.h"
PUBLIC const char __driConfigOptions[] =
DRI_CONF_BEGIN DRI_CONF_SECTION_PERFORMANCE
DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
DRI_CONF_SECTION_END DRI_CONF_SECTION_QUALITY
DRI_CONF_FORCE_S3TC_ENABLE(false)
DRI_CONF_ALLOW_LARGE_TEXTURES(1)
DRI_CONF_SECTION_END DRI_CONF_END;
const GLuint __driNConfigOptions = 4;
#ifdef USE_NEW_INTERFACE
static PFNGLXCREATECONTEXTMODES create_context_modes = NULL;
#endif /*USE_NEW_INTERFACE */
extern const struct dri_extension card_extensions[];
extern const struct dri_extension ttm_extensions[];
/**
* Map all the memory regions described by the screen.
* \return GL_TRUE if success, GL_FALSE if error.
*/
GLboolean
intelMapScreenRegions(__DRIscreenPrivate * sPriv)
{
intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
if (intelScreen->front.handle) {
if (drmMap(sPriv->fd,
intelScreen->front.handle,
intelScreen->front.size,
(drmAddress *) & intelScreen->front.map) != 0) {
_mesa_problem(NULL, "drmMap(frontbuffer) failed!");
return GL_FALSE;
}
}
else {
_mesa_warning(NULL, "no front buffer handle in intelMapScreenRegions!");
}
if (0)
_mesa_printf("Back 0x%08x ", intelScreen->back.handle);
if (drmMap(sPriv->fd,
intelScreen->back.handle,
intelScreen->back.size,
(drmAddress *) & intelScreen->back.map) != 0) {
intelUnmapScreenRegions(intelScreen);
return GL_FALSE;
}
if (intelScreen->third.handle) {
if (0)
_mesa_printf("Third 0x%08x ", intelScreen->third.handle);
if (drmMap(sPriv->fd,
intelScreen->third.handle,
intelScreen->third.size,
(drmAddress *) & intelScreen->third.map) != 0) {
intelUnmapScreenRegions(intelScreen);
return GL_FALSE;
}
}
if (0)
_mesa_printf("Depth 0x%08x ", intelScreen->depth.handle);
if (drmMap(sPriv->fd,
intelScreen->depth.handle,
intelScreen->depth.size,
(drmAddress *) & intelScreen->depth.map) != 0) {
intelUnmapScreenRegions(intelScreen);
return GL_FALSE;
}
if (0)
_mesa_printf("TEX 0x%08x ", intelScreen->tex.handle);
if (intelScreen->tex.size != 0) {
if (drmMap(sPriv->fd,
intelScreen->tex.handle,
intelScreen->tex.size,
(drmAddress *) & intelScreen->tex.map) != 0) {
intelUnmapScreenRegions(intelScreen);
return GL_FALSE;
}
}
if (0)
printf("Mappings: front: %p back: %p third: %p depth: %p tex: %p\n",
intelScreen->front.map,
intelScreen->back.map, intelScreen->third.map,
intelScreen->depth.map, intelScreen->tex.map);
return GL_TRUE;
}
/** Driver-specific fence emit implementation for the fake memory manager. */
static unsigned int
intel_fence_emit(void *private)
{
intelScreenPrivate *intelScreen = (intelScreenPrivate *)private;
unsigned int fence;
/* XXX: Need to emit a flush, if we haven't already (at least with the
* current batchbuffer implementation, we have).
*/
fence = intelEmitIrqLocked(intelScreen);
return fence;
}
/** Driver-specific fence wait implementation for the fake memory manager. */
static int
intel_fence_wait(void *private, unsigned int cookie)
{
intelScreenPrivate *intelScreen = (intelScreenPrivate *)private;
intelWaitIrq(intelScreen, cookie);
return 0;
}
static struct intel_region *
intel_recreate_static(intelScreenPrivate *intelScreen,
struct intel_region *region,
intelRegion *region_desc,
GLuint mem_type)
{
if (region) {
intel_region_update_static(intelScreen, region, mem_type,
region_desc->bo_handle, region_desc->offset,
region_desc->map, intelScreen->cpp,
region_desc->pitch / intelScreen->cpp,
intelScreen->height);
} else {
region = intel_region_create_static(intelScreen, mem_type,
region_desc->bo_handle,
region_desc->offset,
region_desc->map, intelScreen->cpp,
region_desc->pitch / intelScreen->cpp,
intelScreen->height);
}
assert(region->buffer != NULL);
return region;
}
/* Create intel_region structs to describe the static front,back,depth
* buffers created by the xserver.
*
* Although FBO's mean we now no longer use these as render targets in
* all circumstances, they won't go away until the back and depth
* buffers become private, and the front and rotated buffers will
* remain even then.
*
* Note that these don't allocate video memory, just describe
* allocations alread made by the X server.
*/
static void
intel_recreate_static_regions(intelScreenPrivate *intelScreen)
{
intelScreen->front_region =
intel_recreate_static(intelScreen,
intelScreen->front_region,
&intelScreen->front,
DRM_BO_FLAG_MEM_TT);
/* The rotated region is only used for old DDXes that didn't handle rotation
\ * on their own.
*/
if (intelScreen->driScrnPriv->ddx_version.minor < 8) {
intelScreen->rotated_region =
intel_recreate_static(intelScreen,
intelScreen->rotated_region,
&intelScreen->rotated,
DRM_BO_FLAG_MEM_TT);
}
intelScreen->back_region =
intel_recreate_static(intelScreen,
intelScreen->back_region,
&intelScreen->back,
DRM_BO_FLAG_MEM_TT);
if (intelScreen->third.handle) {
intelScreen->third_region =
intel_recreate_static(intelScreen,
intelScreen->third_region,
&intelScreen->third,
DRM_BO_FLAG_MEM_TT);
}
/* Still assumes front.cpp == depth.cpp. We can kill this when we move to
* private buffers.
*/
intelScreen->depth_region =
intel_recreate_static(intelScreen,
intelScreen->depth_region,
&intelScreen->depth,
DRM_BO_FLAG_MEM_TT);
}
/**
* Use the information in the sarea to update the screen parameters
* related to screen rotation. Needs to be called locked.
*/
void
intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea)
{
intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
intelUnmapScreenRegions(intelScreen);
intelUpdateScreenFromSAREA(intelScreen, sarea);
if (!intelMapScreenRegions(sPriv)) {
fprintf(stderr, "ERROR Remapping screen regions!!!\n");
}
intel_recreate_static_regions(intelScreen);
}
void
intelUnmapScreenRegions(intelScreenPrivate * intelScreen)
{
#define REALLY_UNMAP 1
if (intelScreen->front.map) {
#if REALLY_UNMAP
if (drmUnmap(intelScreen->front.map, intelScreen->front.size) != 0)
printf("drmUnmap front failed!\n");
#endif
intelScreen->front.map = NULL;
}
if (intelScreen->back.map) {
#if REALLY_UNMAP
if (drmUnmap(intelScreen->back.map, intelScreen->back.size) != 0)
printf("drmUnmap back failed!\n");
#endif
intelScreen->back.map = NULL;
}
if (intelScreen->third.map) {
#if REALLY_UNMAP
if (drmUnmap(intelScreen->third.map, intelScreen->third.size) != 0)
printf("drmUnmap third failed!\n");
#endif
intelScreen->third.map = NULL;
}
if (intelScreen->depth.map) {
#if REALLY_UNMAP
drmUnmap(intelScreen->depth.map, intelScreen->depth.size);
intelScreen->depth.map = NULL;
#endif
}
if (intelScreen->tex.map) {
#if REALLY_UNMAP
drmUnmap(intelScreen->tex.map, intelScreen->tex.size);
intelScreen->tex.map = NULL;
#endif
}
}
static void
intelPrintDRIInfo(intelScreenPrivate * intelScreen,
__DRIscreenPrivate * sPriv, I830DRIPtr gDRIPriv)
{
fprintf(stderr, "*** Front size: 0x%x offset: 0x%x pitch: %d\n",
intelScreen->front.size, intelScreen->front.offset,
intelScreen->front.pitch);
fprintf(stderr, "*** Back size: 0x%x offset: 0x%x pitch: %d\n",
intelScreen->back.size, intelScreen->back.offset,
intelScreen->back.pitch);
fprintf(stderr, "*** Depth size: 0x%x offset: 0x%x pitch: %d\n",
intelScreen->depth.size, intelScreen->depth.offset,
intelScreen->depth.pitch);
fprintf(stderr, "*** Rotated size: 0x%x offset: 0x%x pitch: %d\n",
intelScreen->rotated.size, intelScreen->rotated.offset,
intelScreen->rotated.pitch);
fprintf(stderr, "*** Texture size: 0x%x offset: 0x%x\n",
intelScreen->tex.size, intelScreen->tex.offset);
fprintf(stderr, "*** Memory : 0x%x\n", gDRIPriv->mem);
}
static void
intelPrintSAREA(const drmI830Sarea * sarea)
{
fprintf(stderr, "SAREA: sarea width %d height %d\n", sarea->width,
sarea->height);
fprintf(stderr, "SAREA: pitch: %d\n", sarea->pitch);
fprintf(stderr,
"SAREA: front offset: 0x%08x size: 0x%x handle: 0x%x\n",
sarea->front_offset, sarea->front_size,
(unsigned) sarea->front_handle);
fprintf(stderr,
"SAREA: back offset: 0x%08x size: 0x%x handle: 0x%x\n",
sarea->back_offset, sarea->back_size,
(unsigned) sarea->back_handle);
fprintf(stderr, "SAREA: depth offset: 0x%08x size: 0x%x handle: 0x%x\n",
sarea->depth_offset, sarea->depth_size,
(unsigned) sarea->depth_handle);
fprintf(stderr, "SAREA: tex offset: 0x%08x size: 0x%x handle: 0x%x\n",
sarea->tex_offset, sarea->tex_size, (unsigned) sarea->tex_handle);
fprintf(stderr, "SAREA: rotation: %d\n", sarea->rotation);
fprintf(stderr,
"SAREA: rotated offset: 0x%08x size: 0x%x\n",
sarea->rotated_offset, sarea->rotated_size);
fprintf(stderr, "SAREA: rotated pitch: %d\n", sarea->rotated_pitch);
}
/**
* A number of the screen parameters are obtained/computed from
* information in the SAREA. This function updates those parameters.
*/
void
intelUpdateScreenFromSAREA(intelScreenPrivate * intelScreen,
drmI830Sarea * sarea)
{
intelScreen->width = sarea->width;
intelScreen->height = sarea->height;
intelScreen->front.offset = sarea->front_offset;
intelScreen->front.pitch = sarea->pitch * intelScreen->cpp;
intelScreen->front.handle = sarea->front_handle;
intelScreen->front.size = sarea->front_size;
intelScreen->back.offset = sarea->back_offset;
intelScreen->back.pitch = sarea->pitch * intelScreen->cpp;
intelScreen->back.handle = sarea->back_handle;
intelScreen->back.size = sarea->back_size;
if (intelScreen->driScrnPriv->ddx_version.minor >= 8) {
intelScreen->third.offset = sarea->third_offset;
intelScreen->third.pitch = sarea->pitch * intelScreen->cpp;
intelScreen->third.handle = sarea->third_handle;
intelScreen->third.size = sarea->third_size;
}
intelScreen->depth.offset = sarea->depth_offset;
intelScreen->depth.pitch = sarea->pitch * intelScreen->cpp;
intelScreen->depth.handle = sarea->depth_handle;
intelScreen->depth.size = sarea->depth_size;
if (intelScreen->driScrnPriv->ddx_version.minor >= 9) {
intelScreen->front.bo_handle = sarea->front_bo_handle;
intelScreen->back.bo_handle = sarea->back_bo_handle;
intelScreen->third.bo_handle = sarea->third_bo_handle;
intelScreen->depth.bo_handle = sarea->depth_bo_handle;
} else {
intelScreen->front.bo_handle = -1;
intelScreen->back.bo_handle = -1;
intelScreen->third.bo_handle = -1;
intelScreen->depth.bo_handle = -1;
}
intelScreen->tex.offset = sarea->tex_offset;
intelScreen->logTextureGranularity = sarea->log_tex_granularity;
intelScreen->tex.handle = sarea->tex_handle;
intelScreen->tex.size = sarea->tex_size;
intelScreen->rotated.offset = sarea->rotated_offset;
intelScreen->rotated.pitch = sarea->rotated_pitch * intelScreen->cpp;
intelScreen->rotated.size = sarea->rotated_size;
intelScreen->current_rotation = sarea->rotation;
matrix23Rotate(&intelScreen->rotMatrix,
sarea->width, sarea->height, sarea->rotation);
intelScreen->rotatedWidth = sarea->virtualX;
intelScreen->rotatedHeight = sarea->virtualY;
if (0)
intelPrintSAREA(sarea);
}
static const __DRItexOffsetExtension intelTexOffsetExtension = {
{ __DRI_TEX_OFFSET },
intelSetTexOffset,
};
static const __DRIextension *intelExtensions[] = {
&driReadDrawableExtension,
&driCopySubBufferExtension.base,
&driSwapControlExtension.base,
&driFrameTrackingExtension.base,
&driMediaStreamCounterExtension.base,
&intelTexOffsetExtension.base,
NULL
};
static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv)
{
intelScreenPrivate *intelScreen;
I830DRIPtr gDRIPriv = (I830DRIPtr) sPriv->pDevPriv;
drmI830Sarea *sarea;
if (sPriv->devPrivSize != sizeof(I830DRIRec)) {
fprintf(stderr,
"\nERROR! sizeof(I830DRIRec) does not match passed size from device driver\n");
return GL_FALSE;
}
/* Allocate the private area */
intelScreen = (intelScreenPrivate *) CALLOC(sizeof(intelScreenPrivate));
if (!intelScreen) {
fprintf(stderr, "\nERROR! Allocating private area failed\n");
return GL_FALSE;
}
/* parse information in __driConfigOptions */
driParseOptionInfo(&intelScreen->optionCache,
__driConfigOptions, __driNConfigOptions);
intelScreen->driScrnPriv = sPriv;
sPriv->private = (void *) intelScreen;
intelScreen->sarea_priv_offset = gDRIPriv->sarea_priv_offset;
sarea = (drmI830Sarea *)
(((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
intelScreen->deviceID = gDRIPriv->deviceID;
if (intelScreen->deviceID == PCI_CHIP_I865_G)
intelScreen->maxBatchSize = 4096;
else
intelScreen->maxBatchSize = BATCH_SZ;
intelScreen->mem = gDRIPriv->mem;
intelScreen->cpp = gDRIPriv->cpp;
switch (gDRIPriv->bitsPerPixel) {
case 16:
intelScreen->fbFormat = DV_PF_565;
break;
case 32:
intelScreen->fbFormat = DV_PF_8888;
break;
default:
exit(1);
break;
}
intelUpdateScreenFromSAREA(intelScreen, sarea);
if (!intelMapScreenRegions(sPriv)) {
fprintf(stderr, "\nERROR! mapping regions\n");
_mesa_free(intelScreen);
sPriv->private = NULL;
return GL_FALSE;
}
intelScreen->sarea_priv_offset = gDRIPriv->sarea_priv_offset;
if (0)
intelPrintDRIInfo(intelScreen, sPriv, gDRIPriv);
intelScreen->drmMinor = sPriv->drm_version.minor;
/* Determine if IRQs are active? */
{
int ret;
drmI830GetParam gp;
gp.param = I830_PARAM_IRQ_ACTIVE;
gp.value = &intelScreen->irq_active;
ret = drmCommandWriteRead(sPriv->fd, DRM_I830_GETPARAM,
&gp, sizeof(gp));
if (ret) {
fprintf(stderr, "drmI830GetParam: %d\n", ret);
return GL_FALSE;
}
}
/* Determine if batchbuffers are allowed */
{
int ret;
drmI830GetParam gp;
gp.param = I830_PARAM_ALLOW_BATCHBUFFER;
gp.value = &intelScreen->allow_batchbuffer;
ret = drmCommandWriteRead(sPriv->fd, DRM_I830_GETPARAM,
&gp, sizeof(gp));
if (ret) {
fprintf(stderr, "drmI830GetParam: (%d) %d\n", gp.param, ret);
return GL_FALSE;
}
}
sPriv->extensions = intelExtensions;
/* If we've got a new enough DDX that's initializing TTM and giving us
* object handles for the shared buffers, use that.
*/
intelScreen->ttm = GL_FALSE;
if (getenv("INTEL_NO_TTM") == NULL &&
intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
intelScreen->drmMinor >= 11 &&
intelScreen->front.bo_handle != -1) {
intelScreen->bufmgr = intel_bufmgr_ttm_init(sPriv->fd,
DRM_FENCE_TYPE_EXE,
DRM_FENCE_TYPE_EXE |
DRM_I915_FENCE_TYPE_RW,
BATCH_SZ);
if (intelScreen->bufmgr != NULL)
intelScreen->ttm = GL_TRUE;
}
/* Otherwise, use the classic buffer manager. */
if (intelScreen->bufmgr == NULL) {
if (intelScreen->tex.size == 0) {
fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
__func__, __LINE__);
return GL_FALSE;
}
fprintf(stderr, "[%s:%u] Failed to init TTM buffer manager, falling back"
" to classic.\n", __func__, __LINE__);
intelScreen->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
intelScreen->tex.map,
intelScreen->tex.size,
intel_fence_emit,
intel_fence_wait,
intelScreen);
}
intel_recreate_static_regions(intelScreen);
return GL_TRUE;
}
static void
intelDestroyScreen(__DRIscreenPrivate * sPriv)
{
intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
intelUnmapScreenRegions(intelScreen);
dri_bufmgr_destroy(intelScreen->bufmgr);
FREE(intelScreen);
sPriv->private = NULL;
}
/**
* This is called when we need to set up GL rendering to a new X window.
*/
static GLboolean
intelCreateBuffer(__DRIscreenPrivate * driScrnPriv,
__DRIdrawablePrivate * driDrawPriv,
const __GLcontextModes * mesaVis, GLboolean isPixmap)
{
intelScreenPrivate *screen = (intelScreenPrivate *) driScrnPriv->private;
if (isPixmap) {
return GL_FALSE; /* not implemented */
}
else {
GLboolean swStencil = (mesaVis->stencilBits > 0 &&
mesaVis->depthBits != 24);
GLenum rgbFormat = (mesaVis->redBits == 5 ? GL_RGB5 : GL_RGBA8);
struct intel_framebuffer *intel_fb = CALLOC_STRUCT(intel_framebuffer);
if (!intel_fb)
return GL_FALSE;
_mesa_initialize_framebuffer(&intel_fb->Base, mesaVis);
/* setup the hardware-based renderbuffers */
{
intel_fb->color_rb[0]
= intel_create_renderbuffer(rgbFormat,
screen->width, screen->height,
screen->front.offset,
screen->front.pitch,
screen->cpp,
screen->front.map);
intel_set_span_functions(&intel_fb->color_rb[0]->Base);
_mesa_add_renderbuffer(&intel_fb->Base, BUFFER_FRONT_LEFT,
&intel_fb->color_rb[0]->Base);
}
if (mesaVis->doubleBufferMode) {
intel_fb->color_rb[1]
= intel_create_renderbuffer(rgbFormat,
screen->width, screen->height,
screen->back.offset,
screen->back.pitch,
screen->cpp,
screen->back.map);
intel_set_span_functions(&intel_fb->color_rb[1]->Base);
_mesa_add_renderbuffer(&intel_fb->Base, BUFFER_BACK_LEFT,
&intel_fb->color_rb[1]->Base);
if (screen->third.handle) {
struct gl_renderbuffer *tmp_rb = NULL;
intel_fb->color_rb[2]
= intel_create_renderbuffer(rgbFormat,
screen->width, screen->height,
screen->third.offset,
screen->third.pitch,
screen->cpp,
screen->third.map);
intel_set_span_functions(&intel_fb->color_rb[2]->Base);
_mesa_reference_renderbuffer(&tmp_rb, &intel_fb->color_rb[2]->Base);
}
}
if (mesaVis->depthBits == 24 && mesaVis->stencilBits == 8) {
/* combined depth/stencil buffer */
struct intel_renderbuffer *depthStencilRb
= intel_create_renderbuffer(GL_DEPTH24_STENCIL8_EXT,
screen->width, screen->height,
screen->depth.offset,
screen->depth.pitch,
screen->cpp, /* 4! */
screen->depth.map);
intel_set_span_functions(&depthStencilRb->Base);
/* note: bind RB to two attachment points */
_mesa_add_renderbuffer(&intel_fb->Base, BUFFER_DEPTH,
&depthStencilRb->Base);
_mesa_add_renderbuffer(&intel_fb->Base, BUFFER_STENCIL,
&depthStencilRb->Base);
}
else if (mesaVis->depthBits == 16) {
/* just 16-bit depth buffer, no hw stencil */
struct intel_renderbuffer *depthRb
= intel_create_renderbuffer(GL_DEPTH_COMPONENT16,
screen->width, screen->height,
screen->depth.offset,
screen->depth.pitch,
screen->cpp, /* 2! */
screen->depth.map);
intel_set_span_functions(&depthRb->Base);
_mesa_add_renderbuffer(&intel_fb->Base, BUFFER_DEPTH, &depthRb->Base);
}
/* now add any/all software-based renderbuffers we may need */
_mesa_add_soft_renderbuffers(&intel_fb->Base,
GL_FALSE, /* never sw color */
GL_FALSE, /* never sw depth */
swStencil, mesaVis->accumRedBits > 0,
GL_FALSE, /* never sw alpha */
GL_FALSE /* never sw aux */ );
driDrawPriv->driverPrivate = (void *) intel_fb;
return GL_TRUE;
}
}
static void
intelDestroyBuffer(__DRIdrawablePrivate * driDrawPriv)
{
_mesa_unreference_framebuffer((GLframebuffer **)(&(driDrawPriv->driverPrivate)));
}
/**
* Get information about previous buffer swaps.
*/
static int
intelGetSwapInfo(__DRIdrawablePrivate * dPriv, __DRIswapInfo * sInfo)
{
struct intel_framebuffer *intel_fb;
if ((dPriv == NULL) || (dPriv->driverPrivate == NULL)
|| (sInfo == NULL)) {
return -1;
}
intel_fb = dPriv->driverPrivate;
sInfo->swap_count = intel_fb->swap_count;
sInfo->swap_ust = intel_fb->swap_ust;
sInfo->swap_missed_count = intel_fb->swap_missed_count;
sInfo->swap_missed_usage = (sInfo->swap_missed_count != 0)
? driCalculateSwapUsage(dPriv, 0, intel_fb->swap_missed_ust)
: 0.0;
return 0;
}
/* There are probably better ways to do this, such as an
* init-designated function to register chipids and createcontext
* functions.
*/
extern GLboolean i830CreateContext(const __GLcontextModes * mesaVis,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate);
extern GLboolean i915CreateContext(const __GLcontextModes * mesaVis,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate);
static GLboolean
intelCreateContext(const __GLcontextModes * mesaVis,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate)
{
__DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
switch (intelScreen->deviceID) {
/* Don't deal with i830 until texture work complete:
*/
case PCI_CHIP_845_G:
case PCI_CHIP_I830_M:
case PCI_CHIP_I855_GM:
case PCI_CHIP_I865_G:
return i830CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
case PCI_CHIP_I915_G:
case PCI_CHIP_I915_GM:
case PCI_CHIP_I945_G:
case PCI_CHIP_I945_GM:
case PCI_CHIP_I945_GME:
case PCI_CHIP_G33_G:
case PCI_CHIP_Q35_G:
case PCI_CHIP_Q33_G:
return i915CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
default:
fprintf(stderr, "Unrecognized deviceID %x\n", intelScreen->deviceID);
return GL_FALSE;
}
}
static const struct __DriverAPIRec intelAPI = {
.DestroyScreen = intelDestroyScreen,
.CreateContext = intelCreateContext,
.DestroyContext = intelDestroyContext,
.CreateBuffer = intelCreateBuffer,
.DestroyBuffer = intelDestroyBuffer,
.SwapBuffers = intelSwapBuffers,
.MakeCurrent = intelMakeCurrent,
.UnbindContext = intelUnbindContext,
.GetSwapInfo = intelGetSwapInfo,
.GetMSC = driGetMSC32,
.GetDrawableMSC = driDrawableGetMSC32,
.WaitForMSC = driWaitForMSC32,
.WaitForSBC = NULL,
.SwapBuffersMSC = NULL,
.CopySubBuffer = intelCopySubBuffer,
.setTexOffset = intelSetTexOffset,
};
static __GLcontextModes *
intelFillInModes(unsigned pixel_bits, unsigned depth_bits,
unsigned stencil_bits, GLboolean have_back_buffer)
{
__GLcontextModes *modes;
__GLcontextModes *m;
unsigned num_modes;
unsigned depth_buffer_factor;
unsigned back_buffer_factor;
GLenum fb_format;
GLenum fb_type;
/* GLX_SWAP_COPY_OML is only supported because the Intel driver doesn't
* support pageflipping at all.
*/
static const GLenum back_buffer_modes[] = {
GLX_NONE, GLX_SWAP_UNDEFINED_OML, GLX_SWAP_COPY_OML
};
u_int8_t depth_bits_array[3];
u_int8_t stencil_bits_array[3];
depth_bits_array[0] = 0;
depth_bits_array[1] = depth_bits;
depth_bits_array[2] = depth_bits;
/* Just like with the accumulation buffer, always provide some modes
* with a stencil buffer. It will be a sw fallback, but some apps won't
* care about that.
*/
stencil_bits_array[0] = 0;
stencil_bits_array[1] = 0;
if (depth_bits == 24)
stencil_bits_array[1] = (stencil_bits == 0) ? 8 : stencil_bits;
stencil_bits_array[2] = (stencil_bits == 0) ? 8 : stencil_bits;
depth_buffer_factor = ((depth_bits != 0) || (stencil_bits != 0)) ? 3 : 1;
back_buffer_factor = (have_back_buffer) ? 3 : 1;
num_modes = depth_buffer_factor * back_buffer_factor * 4;
if (pixel_bits == 16) {
fb_format = GL_RGB;
fb_type = GL_UNSIGNED_SHORT_5_6_5;
}
else {
fb_format = GL_BGRA;
fb_type = GL_UNSIGNED_INT_8_8_8_8_REV;
}
modes =
(*dri_interface->createContextModes) (num_modes,
sizeof(__GLcontextModes));
m = modes;
if (!driFillInModes(&m, fb_format, fb_type,
depth_bits_array, stencil_bits_array,
depth_buffer_factor, back_buffer_modes,
back_buffer_factor, GLX_TRUE_COLOR)) {
fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
__LINE__);
return NULL;
}
if (!driFillInModes(&m, fb_format, fb_type,
depth_bits_array, stencil_bits_array,
depth_buffer_factor, back_buffer_modes,
back_buffer_factor, GLX_DIRECT_COLOR)) {
fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
__LINE__);
return NULL;
}
/* Mark the visual as slow if there are "fake" stencil bits.
*/
for (m = modes; m != NULL; m = m->next) {
if ((m->stencilBits != 0) && (m->stencilBits != stencil_bits)) {
m->visualRating = GLX_SLOW_CONFIG;
}
}
return modes;
}
/**
* This is the driver specific part of the createNewScreen entry point.
*
* \todo maybe fold this into intelInitDriver
*
* \return the __GLcontextModes supported by this driver
*/
PUBLIC __GLcontextModes *__driDriverInitScreen(__DRIscreenPrivate *psp)
{
static const __DRIversion ddx_expected = { 1, 5, 0 };
static const __DRIversion dri_expected = { 4, 0, 0 };
static const __DRIversion drm_expected = { 1, 5, 0 };
I830DRIPtr dri_priv = (I830DRIPtr) psp->pDevPriv;
psp->DriverAPI = intelAPI;
if (!driCheckDriDdxDrmVersions2("i915",
&psp->dri_version, &dri_expected,
&psp->ddx_version, &ddx_expected,
&psp->drm_version, &drm_expected)) {
return NULL;
}
/* Calling driInitExtensions here, with a NULL context pointer,
* does not actually enable the extensions. It just makes sure
* that all the dispatch offsets for all the extensions that
* *might* be enables are known. This is needed because the
* dispatch offsets need to be known when _mesa_context_create is
* called, but we can't enable the extensions until we have a
* context pointer.
*
* Hello chicken. Hello egg. How are you two today?
*/
driInitExtensions(NULL, card_extensions, GL_FALSE);
driInitExtensions(NULL, ttm_extensions, GL_FALSE);
if (!intelInitDriver(psp))
return NULL;
return intelFillInModes(dri_priv->cpp * 8,
(dri_priv->cpp == 2) ? 16 : 24,
(dri_priv->cpp == 2) ? 0 : 8, 1);
}
struct intel_context *intelScreenContext(intelScreenPrivate *intelScreen)
{
/*
* This should probably change to have the screen allocate a dummy
* context at screen creation. For now just use the current context.
*/
GET_CURRENT_CONTEXT(ctx);
if (ctx == NULL) {
_mesa_problem(NULL, "No current context in intelScreenContext\n");
return NULL;
}
return intel_context(ctx);
}

View file

@ -0,0 +1 @@
../intel/intel_screen.c

View file

@ -1,409 +0,0 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "glheader.h"
#include "macros.h"
#include "mtypes.h"
#include "colormac.h"
#include "intel_fbo.h"
#include "intel_screen.h"
#include "intel_span.h"
#include "intel_regions.h"
#include "intel_ioctl.h"
#include "intel_tex.h"
#include "swrast/swrast.h"
/*
break intelWriteRGBASpan_ARGB8888
*/
#undef DBG
#define DBG 0
#define LOCAL_VARS \
struct intel_context *intel = intel_context(ctx); \
struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
const GLint yScale = irb->RenderToTexture ? 1 : -1; \
const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
GLubyte *buf = (GLubyte *) irb->pfMap \
+ (intel->drawY * irb->pfPitch + intel->drawX) * irb->region->cpp;\
GLuint p; \
assert(irb->pfMap);\
(void) p;
/* XXX FBO: this is identical to the macro in spantmp2.h except we get
* the cliprect info from the context, not the driDrawable.
* Move this into spantmp2.h someday.
*/
#define HW_CLIPLOOP() \
do { \
int _nc = intel->numClipRects; \
while ( _nc-- ) { \
int minx = intel->pClipRects[_nc].x1 - intel->drawX; \
int miny = intel->pClipRects[_nc].y1 - intel->drawY; \
int maxx = intel->pClipRects[_nc].x2 - intel->drawX; \
int maxy = intel->pClipRects[_nc].y2 - intel->drawY;
#define Y_FLIP(_y) ((_y) * yScale + yBias)
#define HW_LOCK()
#define HW_UNLOCK()
/* 16 bit, RGB565 color spanline and pixel functions
*/
#define SPANTMP_PIXEL_FMT GL_RGB
#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_5_6_5
#define TAG(x) intel##x##_RGB565
#define TAG2(x,y) intel##x##_RGB565##y
#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 2)
#include "spantmp2.h"
/* 32 bit, ARGB8888 color spanline and pixel functions
*/
#define SPANTMP_PIXEL_FMT GL_BGRA
#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
#define TAG(x) intel##x##_ARGB8888
#define TAG2(x,y) intel##x##_ARGB8888##y
#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 4)
#include "spantmp2.h"
#define LOCAL_DEPTH_VARS \
struct intel_context *intel = intel_context(ctx); \
struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
const GLuint pitch = irb->pfPitch/***XXX region->pitch*/; /* in pixels */ \
const GLint yScale = irb->RenderToTexture ? 1 : -1; \
const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
char *buf = (char *) irb->pfMap/*XXX use region->map*/ + \
(intel->drawY * pitch + intel->drawX) * irb->region->cpp;
#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
/**
** 16-bit depthbuffer functions.
**/
#define WRITE_DEPTH( _x, _y, d ) \
((GLushort *)buf)[(_x) + (_y) * pitch] = d;
#define READ_DEPTH( d, _x, _y ) \
d = ((GLushort *)buf)[(_x) + (_y) * pitch];
#define TAG(x) intel##x##_z16
#include "depthtmp.h"
/**
** 24/8-bit interleaved depth/stencil functions
** Note: we're actually reading back combined depth+stencil values.
** The wrappers in main/depthstencil.c are used to extract the depth
** and stencil values.
**/
/* Change ZZZS -> SZZZ */
#define WRITE_DEPTH( _x, _y, d ) { \
GLuint tmp = ((d) >> 8) | ((d) << 24); \
((GLuint *)buf)[(_x) + (_y) * pitch] = tmp; \
}
/* Change SZZZ -> ZZZS */
#define READ_DEPTH( d, _x, _y ) { \
GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
d = (tmp << 8) | (tmp >> 24); \
}
#define TAG(x) intel##x##_z24_s8
#include "depthtmp.h"
/**
** 8-bit stencil function (XXX FBO: This is obsolete)
**/
#define WRITE_STENCIL( _x, _y, d ) { \
GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
tmp &= 0xffffff; \
tmp |= ((d) << 24); \
((GLuint *) buf)[(_x) + (_y) * pitch] = tmp; \
}
#define READ_STENCIL( d, _x, _y ) \
d = ((GLuint *)buf)[(_x) + (_y) * pitch] >> 24;
#define TAG(x) intel##x##_z24_s8
#include "stenciltmp.h"
/**
* Map or unmap all the renderbuffers which we may need during
* software rendering.
* XXX in the future, we could probably convey extra information to
* reduce the number of mappings needed. I.e. if doing a glReadPixels
* from the depth buffer, we really only need one mapping.
*
* XXX Rewrite this function someday.
* We can probably just loop over all the renderbuffer attachments,
* map/unmap all of them, and not worry about the _ColorDrawBuffers
* _ColorReadBuffer, _DepthBuffer or _StencilBuffer fields.
*/
static void
intel_map_unmap_buffers(struct intel_context *intel, GLboolean map)
{
GLcontext *ctx = &intel->ctx;
GLuint i, j;
struct intel_renderbuffer *irb;
/* color draw buffers */
for (i = 0; i < ctx->Const.MaxDrawBuffers; i++) {
for (j = 0; j < ctx->DrawBuffer->_NumColorDrawBuffers[i]; j++) {
struct gl_renderbuffer *rb =
ctx->DrawBuffer->_ColorDrawBuffers[i][j];
irb = intel_renderbuffer(rb);
if (irb) {
/* this is a user-created intel_renderbuffer */
if (irb->region) {
if (map)
intel_region_map(intel->intelScreen, irb->region);
else
intel_region_unmap(intel->intelScreen, irb->region);
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
}
}
}
/* check for render to textures */
for (i = 0; i < BUFFER_COUNT; i++) {
struct gl_renderbuffer_attachment *att =
ctx->DrawBuffer->Attachment + i;
struct gl_texture_object *tex = att->Texture;
if (tex) {
/* render to texture */
ASSERT(att->Renderbuffer);
if (map) {
struct gl_texture_image *texImg;
texImg = tex->Image[att->CubeMapFace][att->TextureLevel];
intel_tex_map_images(intel, intel_texture_object(tex));
}
else {
intel_tex_unmap_images(intel, intel_texture_object(tex));
}
}
}
/* color read buffers */
irb = intel_renderbuffer(ctx->ReadBuffer->_ColorReadBuffer);
if (irb && irb->region) {
if (map)
intel_region_map(intel->intelScreen, irb->region);
else
intel_region_unmap(intel->intelScreen, irb->region);
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
/* Account for front/back color page flipping.
* The span routines use the pfMap and pfPitch fields which will
* swap the front/back region map/pitch if we're page flipped.
* Do this after mapping, above, so the map field is valid.
*/
#if 0
if (map && ctx->DrawBuffer->Name == 0) {
struct intel_renderbuffer *irbFront
= intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_FRONT_LEFT);
struct intel_renderbuffer *irbBack
= intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_BACK_LEFT);
if (irbBack) {
/* double buffered */
if (intel->sarea->pf_current_page == 0) {
irbFront->pfMap = irbFront->region->map;
irbFront->pfPitch = irbFront->region->pitch;
irbBack->pfMap = irbBack->region->map;
irbBack->pfPitch = irbBack->region->pitch;
}
else {
irbFront->pfMap = irbBack->region->map;
irbFront->pfPitch = irbBack->region->pitch;
irbBack->pfMap = irbFront->region->map;
irbBack->pfPitch = irbFront->region->pitch;
}
}
}
#endif
/* depth buffer (Note wrapper!) */
if (ctx->DrawBuffer->_DepthBuffer) {
irb = intel_renderbuffer(ctx->DrawBuffer->_DepthBuffer->Wrapped);
if (irb && irb->region && irb->Base.Name != 0) {
if (map) {
intel_region_map(intel->intelScreen, irb->region);
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
else {
intel_region_unmap(intel->intelScreen, irb->region);
irb->pfMap = NULL;
irb->pfPitch = 0;
}
}
}
/* stencil buffer (Note wrapper!) */
if (ctx->DrawBuffer->_StencilBuffer) {
irb = intel_renderbuffer(ctx->DrawBuffer->_StencilBuffer->Wrapped);
if (irb && irb->region && irb->Base.Name != 0) {
if (map) {
intel_region_map(intel->intelScreen, irb->region);
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
else {
intel_region_unmap(intel->intelScreen, irb->region);
irb->pfMap = NULL;
irb->pfPitch = 0;
}
}
}
}
/**
* Prepare for softare rendering. Map current read/draw framebuffers'
* renderbuffes and all currently bound texture objects.
*
* Old note: Moved locking out to get reasonable span performance.
*/
void
intelSpanRenderStart(GLcontext * ctx)
{
struct intel_context *intel = intel_context(ctx);
GLuint i;
intelFinish(&intel->ctx);
LOCK_HARDWARE(intel);
#if 0
/* Just map the framebuffer and all textures. Bufmgr code will
* take care of waiting on the necessary fences:
*/
intel_region_map(intel->intelScreen, intel->front_region);
intel_region_map(intel->intelScreen, intel->back_region);
intel_region_map(intel->intelScreen, intel->intelScreen->depth_region);
#endif
for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
if (ctx->Texture.Unit[i]._ReallyEnabled) {
struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
intel_tex_map_images(intel, intel_texture_object(texObj));
}
}
intel_map_unmap_buffers(intel, GL_TRUE);
}
/**
* Called when done softare rendering. Unmap the buffers we mapped in
* the above function.
*/
void
intelSpanRenderFinish(GLcontext * ctx)
{
struct intel_context *intel = intel_context(ctx);
GLuint i;
_swrast_flush(ctx);
/* Now unmap the framebuffer:
*/
#if 0
intel_region_unmap(intel, intel->front_region);
intel_region_unmap(intel, intel->back_region);
intel_region_unmap(intel, intel->intelScreen->depth_region);
#endif
for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
if (ctx->Texture.Unit[i]._ReallyEnabled) {
struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
intel_tex_unmap_images(intel, intel_texture_object(texObj));
}
}
intel_map_unmap_buffers(intel, GL_FALSE);
UNLOCK_HARDWARE(intel);
}
void
intelInitSpanFuncs(GLcontext * ctx)
{
struct swrast_device_driver *swdd = _swrast_GetDeviceDriverReference(ctx);
swdd->SpanRenderStart = intelSpanRenderStart;
swdd->SpanRenderFinish = intelSpanRenderFinish;
}
/**
* Plug in appropriate span read/write functions for the given renderbuffer.
* These are used for the software fallbacks.
*/
void
intel_set_span_functions(struct gl_renderbuffer *rb)
{
if (rb->_ActualFormat == GL_RGB5) {
/* 565 RGB */
intelInitPointers_RGB565(rb);
}
else if (rb->_ActualFormat == GL_RGBA8) {
/* 8888 RGBA */
intelInitPointers_ARGB8888(rb);
}
else if (rb->_ActualFormat == GL_DEPTH_COMPONENT16) {
intelInitDepthPointers_z16(rb);
}
else if (rb->_ActualFormat == GL_DEPTH_COMPONENT24 || /* XXX FBO remove */
rb->_ActualFormat == GL_DEPTH24_STENCIL8_EXT) {
intelInitDepthPointers_z24_s8(rb);
}
else if (rb->_ActualFormat == GL_STENCIL_INDEX8_EXT) { /* XXX FBO remove */
intelInitStencilPointers_z24_s8(rb);
}
else {
_mesa_problem(NULL,
"Unexpected _ActualFormat in intelSetSpanFunctions");
}
}

Some files were not shown because too many files have changed in this diff Show more