mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-02-22 18:30:31 +01:00
r300: bo and cs abstraction.
This abstract memory management and command stream building so we can use different backend either legacy one which use old pathway or a new one like with a new memory manager. This works was done by : Nicolai Haehnle Dave Airlie Jerome Glisse
This commit is contained in:
parent
80d6379722
commit
e5d5dab8c0
36 changed files with 3393 additions and 3376 deletions
|
|
@ -21,13 +21,14 @@ COMMON_SOURCES = \
|
|||
../common/dri_util.c
|
||||
|
||||
DRIVER_SOURCES = \
|
||||
radeon_bo_legacy.c \
|
||||
radeon_cs_legacy.c \
|
||||
radeon_screen.c \
|
||||
radeon_context.c \
|
||||
radeon_ioctl.c \
|
||||
radeon_lock.c \
|
||||
radeon_span.c \
|
||||
radeon_state.c \
|
||||
r300_mem.c \
|
||||
r300_context.c \
|
||||
r300_ioctl.c \
|
||||
r300_cmdbuf.c \
|
||||
|
|
@ -36,6 +37,7 @@ DRIVER_SOURCES = \
|
|||
r300_texmem.c \
|
||||
r300_tex.c \
|
||||
r300_texstate.c \
|
||||
r300_mipmap_tree.c \
|
||||
radeon_program.c \
|
||||
radeon_program_alu.c \
|
||||
radeon_program_pair.c \
|
||||
|
|
@ -54,7 +56,10 @@ DRIVER_SOURCES = \
|
|||
C_SOURCES = $(COMMON_SOURCES) $(DRIVER_SOURCES)
|
||||
|
||||
DRIVER_DEFINES = -DCOMPILE_R300 -DR200_MERGED=0 \
|
||||
-DRADEON_COMMON=1 -DRADEON_COMMON_FOR_R300
|
||||
-DRADEON_COMMON=1 -DRADEON_COMMON_FOR_R300 \
|
||||
# -DRADEON_BO_TRACK_OPEN \
|
||||
# -DRADEON_BO_TRACK_REF \
|
||||
-Wall
|
||||
|
||||
SYMLINKS = \
|
||||
server/radeon_dri.c \
|
||||
|
|
@ -68,7 +73,12 @@ COMMON_SYMLINKS = \
|
|||
radeon_chipset.h \
|
||||
radeon_screen.c \
|
||||
radeon_screen.h \
|
||||
radeon_span.h
|
||||
radeon_span.h \
|
||||
radeon_buffer.h \
|
||||
radeon_bo_legacy.c \
|
||||
radeon_cs_legacy.c \
|
||||
radeon_bo_legacy.h \
|
||||
radeon_cs_legacy.h
|
||||
|
||||
##### TARGETS #####
|
||||
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "drm.h"
|
||||
#include "radeon_drm.h"
|
||||
|
||||
#include "radeon_buffer.h"
|
||||
#include "radeon_ioctl.h"
|
||||
#include "r300_context.h"
|
||||
#include "r300_ioctl.h"
|
||||
|
|
@ -51,62 +52,33 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "r300_reg.h"
|
||||
#include "r300_cmdbuf.h"
|
||||
#include "r300_emit.h"
|
||||
#include "r300_mipmap_tree.h"
|
||||
#include "r300_state.h"
|
||||
#include "radeon_cs_legacy.h"
|
||||
|
||||
// Set this to 1 for extremely verbose debugging of command buffers
|
||||
#define DEBUG_CMDBUF 0
|
||||
|
||||
/** # of dwords reserved for additional instructions that may need to be written
|
||||
* during flushing.
|
||||
*/
|
||||
#define SPACE_FOR_FLUSHING 4
|
||||
|
||||
/**
|
||||
* Send the current command buffer via ioctl to the hardware.
|
||||
*/
|
||||
int r300FlushCmdBufLocked(r300ContextPtr r300, const char *caller)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
drm_radeon_cmd_buffer_t cmd;
|
||||
int start;
|
||||
|
||||
if (r300->radeon.lost_context) {
|
||||
start = 0;
|
||||
r300->radeon.lost_context = GL_FALSE;
|
||||
} else
|
||||
start = r300->cmdbuf.count_reemit;
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_IOCTL) {
|
||||
fprintf(stderr, "%s from %s - %i cliprects\n",
|
||||
__FUNCTION__, caller, r300->radeon.numClipRects);
|
||||
|
||||
if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_VERBOSE)
|
||||
for (i = start; i < r300->cmdbuf.count_used; ++i)
|
||||
fprintf(stderr, "%d: %08x\n", i,
|
||||
r300->cmdbuf.cmd_buf[i]);
|
||||
if (r300->cmdbuf.flushing) {
|
||||
fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
cmd.buf = (char *)(r300->cmdbuf.cmd_buf + start);
|
||||
cmd.bufsz = (r300->cmdbuf.count_used - start) * 4;
|
||||
|
||||
if (r300->radeon.state.scissor.enabled) {
|
||||
cmd.nbox = r300->radeon.state.scissor.numClipRects;
|
||||
cmd.boxes =
|
||||
(drm_clip_rect_t *) r300->radeon.state.scissor.pClipRects;
|
||||
} else {
|
||||
cmd.nbox = r300->radeon.numClipRects;
|
||||
cmd.boxes = (drm_clip_rect_t *) r300->radeon.pClipRects;
|
||||
}
|
||||
|
||||
ret = drmCommandWrite(r300->radeon.dri.fd,
|
||||
DRM_RADEON_CMDBUF, &cmd, sizeof(cmd));
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_SYNC) {
|
||||
fprintf(stderr, "Syncing in %s (from %s)\n\n",
|
||||
__FUNCTION__, caller);
|
||||
radeonWaitForIdleLocked(&r300->radeon);
|
||||
}
|
||||
|
||||
r300->dma.nr_released_bufs = 0;
|
||||
r300->cmdbuf.count_used = 0;
|
||||
r300->cmdbuf.count_reemit = 0;
|
||||
|
||||
r300->cmdbuf.flushing = 1;
|
||||
ret = radeon_cs_emit(r300->cmdbuf.cs);
|
||||
radeon_cs_erase(r300->cmdbuf.cs);
|
||||
r300->cmdbuf.flushing = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -115,9 +87,7 @@ int r300FlushCmdBuf(r300ContextPtr r300, const char *caller)
|
|||
int ret;
|
||||
|
||||
LOCK_HARDWARE(&r300->radeon);
|
||||
|
||||
ret = r300FlushCmdBufLocked(r300, caller);
|
||||
|
||||
UNLOCK_HARDWARE(&r300->radeon);
|
||||
|
||||
if (ret) {
|
||||
|
|
@ -128,13 +98,44 @@ int r300FlushCmdBuf(r300ContextPtr r300, const char *caller)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void r300PrintStateAtom(r300ContextPtr r300, struct r300_state_atom *state)
|
||||
/**
|
||||
* Make sure that enough space is available in the command buffer
|
||||
* by flushing if necessary.
|
||||
*
|
||||
* \param dwords The number of dwords we need to be free on the command buffer
|
||||
*/
|
||||
void r300EnsureCmdBufSpace(r300ContextPtr r300, int dwords, const char *caller)
|
||||
{
|
||||
assert(dwords < r300->cmdbuf.size);
|
||||
|
||||
if ((r300->cmdbuf.cs->cdw + dwords + 128) > r300->cmdbuf.size ||
|
||||
radeon_cs_need_flush(r300->cmdbuf.cs)) {
|
||||
r300FlushCmdBuf(r300, caller);
|
||||
}
|
||||
}
|
||||
|
||||
void r300BeginBatch(r300ContextPtr r300, int n,
|
||||
int dostate,
|
||||
const char *file,
|
||||
const char *function,
|
||||
int line)
|
||||
{
|
||||
r300EnsureCmdBufSpace(r300, n, function);
|
||||
if (!r300->cmdbuf.cs->cdw && dostate) {
|
||||
if (RADEON_DEBUG & DEBUG_IOCTL)
|
||||
fprintf(stderr, "Reemit state after flush (from %s)\n", function);
|
||||
r300EmitState(r300);
|
||||
}
|
||||
radeon_cs_begin(r300->cmdbuf.cs, n, file, function, line);
|
||||
}
|
||||
|
||||
static void r300PrintStateAtom(r300ContextPtr r300,
|
||||
struct r300_state_atom *state)
|
||||
{
|
||||
int i;
|
||||
int dwords = (*state->check) (r300, state);
|
||||
|
||||
fprintf(stderr, " emit %s %d/%d\n", state->name, dwords,
|
||||
state->cmd_size);
|
||||
fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_VERBOSE) {
|
||||
for (i = 0; i < dwords; i++) {
|
||||
|
|
@ -152,33 +153,18 @@ static void r300PrintStateAtom(r300ContextPtr r300, struct r300_state_atom *stat
|
|||
*/
|
||||
static INLINE void r300EmitAtoms(r300ContextPtr r300, GLboolean dirty)
|
||||
{
|
||||
BATCH_LOCALS(r300);
|
||||
struct r300_state_atom *atom;
|
||||
uint32_t *dest;
|
||||
int dwords;
|
||||
|
||||
dest = r300->cmdbuf.cmd_buf + r300->cmdbuf.count_used;
|
||||
|
||||
/* Emit WAIT */
|
||||
*dest = cmdwait(R300_WAIT_3D | R300_WAIT_3D_CLEAN);
|
||||
dest++;
|
||||
r300->cmdbuf.count_used++;
|
||||
|
||||
/* Emit cache flush */
|
||||
*dest = cmdpacket0(R300_TX_INVALTAGS, 1);
|
||||
dest++;
|
||||
r300->cmdbuf.count_used++;
|
||||
|
||||
*dest = R300_TX_FLUSH;
|
||||
dest++;
|
||||
r300->cmdbuf.count_used++;
|
||||
|
||||
/* Emit END3D */
|
||||
*dest = cmdpacify();
|
||||
dest++;
|
||||
r300->cmdbuf.count_used++;
|
||||
BEGIN_BATCH_NO_AUTOSTATE(4);
|
||||
OUT_BATCH(cmdwait(R300_WAIT_3D | R300_WAIT_3D_CLEAN));
|
||||
OUT_BATCH(cmdpacket0(R300_TX_INVALTAGS, 1));
|
||||
OUT_BATCH(R300_TX_FLUSH);
|
||||
OUT_BATCH(cmdpacify());
|
||||
END_BATCH();
|
||||
|
||||
/* Emit actual atoms */
|
||||
|
||||
foreach(atom, &r300->hw.atomlist) {
|
||||
if ((atom->dirty || r300->hw.all_dirty) == dirty) {
|
||||
dwords = (*atom->check) (r300, atom);
|
||||
|
|
@ -186,9 +172,13 @@ static INLINE void r300EmitAtoms(r300ContextPtr r300, GLboolean dirty)
|
|||
if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
|
||||
r300PrintStateAtom(r300, atom);
|
||||
}
|
||||
memcpy(dest, atom->cmd, dwords * 4);
|
||||
dest += dwords;
|
||||
r300->cmdbuf.count_used += dwords;
|
||||
if (atom->emit) {
|
||||
(*atom->emit)(r300);
|
||||
} else {
|
||||
BEGIN_BATCH_NO_AUTOSTATE(dwords);
|
||||
OUT_BATCH_TABLE(atom->cmd, dwords);
|
||||
END_BATCH();
|
||||
}
|
||||
atom->dirty = GL_FALSE;
|
||||
} else {
|
||||
if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
|
||||
|
|
@ -198,6 +188,8 @@ static INLINE void r300EmitAtoms(r300ContextPtr r300, GLboolean dirty)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
COMMIT_BATCH();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -211,31 +203,26 @@ void r300EmitState(r300ContextPtr r300)
|
|||
if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_PRIMS))
|
||||
fprintf(stderr, "%s\n", __FUNCTION__);
|
||||
|
||||
if (r300->cmdbuf.count_used && !r300->hw.is_dirty
|
||||
if (r300->cmdbuf.cs->cdw && !r300->hw.is_dirty
|
||||
&& !r300->hw.all_dirty)
|
||||
return;
|
||||
|
||||
/* To avoid going across the entire set of states multiple times, just check
|
||||
* for enough space for the case of emitting all state, and inline the
|
||||
* r300AllocCmdBuf code here without all the checks.
|
||||
* for enough space for the case of emitting all state.
|
||||
*/
|
||||
r300EnsureCmdBufSpace(r300, r300->hw.max_state_size, __FUNCTION__);
|
||||
|
||||
if (!r300->cmdbuf.count_used) {
|
||||
if (!r300->cmdbuf.cs->cdw) {
|
||||
if (RADEON_DEBUG & DEBUG_STATE)
|
||||
fprintf(stderr, "Begin reemit state\n");
|
||||
|
||||
r300EmitAtoms(r300, GL_FALSE);
|
||||
r300->cmdbuf.count_reemit = r300->cmdbuf.count_used;
|
||||
}
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_STATE)
|
||||
fprintf(stderr, "Begin dirty state\n");
|
||||
|
||||
r300EmitAtoms(r300, GL_TRUE);
|
||||
|
||||
assert(r300->cmdbuf.count_used < r300->cmdbuf.size);
|
||||
|
||||
r300->hw.is_dirty = GL_FALSE;
|
||||
r300->hw.all_dirty = GL_FALSE;
|
||||
}
|
||||
|
|
@ -244,6 +231,84 @@ void r300EmitState(r300ContextPtr r300)
|
|||
#define vpu_count(ptr) (((drm_r300_cmd_header_t*)(ptr))->vpu.count)
|
||||
#define r500fp_count(ptr) (((drm_r300_cmd_header_t*)(ptr))->r500fp.count)
|
||||
|
||||
static void emit_tex_offsets(r300ContextPtr r300)
|
||||
{
|
||||
BATCH_LOCALS(r300);
|
||||
int numtmus = packet0_count(r300->hw.tex.offset.cmd);
|
||||
|
||||
if (numtmus) {
|
||||
int i;
|
||||
|
||||
for(i = 0; i < numtmus; ++i) {
|
||||
BEGIN_BATCH(2);
|
||||
OUT_BATCH_REGSEQ(R300_TX_OFFSET_0 + (i * 4), 1);
|
||||
r300TexObj *t = r300->hw.textures[i];
|
||||
if (t && !t->image_override) {
|
||||
OUT_BATCH_RELOC(t->tile_bits, t->mt->bo, 0, 0);
|
||||
} else if (!t) {
|
||||
OUT_BATCH(r300->radeon.radeonScreen->texOffset[0]);
|
||||
} else {
|
||||
OUT_BATCH(t->override_offset);
|
||||
}
|
||||
END_BATCH();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void emit_cb_offset(r300ContextPtr r300)
|
||||
{
|
||||
BATCH_LOCALS(r300);
|
||||
struct radeon_renderbuffer *rrb;
|
||||
uint32_t cbpitch;
|
||||
|
||||
rrb = r300->radeon.state.color.rrb;
|
||||
if (!rrb) {
|
||||
fprintf(stderr, "no rrb\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cbpitch = rrb->pitch;
|
||||
if (rrb->cpp == 4)
|
||||
cbpitch |= R300_COLOR_FORMAT_ARGB8888;
|
||||
else
|
||||
cbpitch |= R300_COLOR_FORMAT_RGB565;
|
||||
|
||||
if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE)
|
||||
cbpitch |= R300_COLOR_TILE_ENABLE;
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH_REGSEQ(R300_RB3D_COLOROFFSET0, 1);
|
||||
OUT_BATCH_RELOC(0, rrb->bo, 0, 0);
|
||||
OUT_BATCH_REGSEQ(R300_RB3D_COLORPITCH0, 1);
|
||||
OUT_BATCH(cbpitch);
|
||||
END_BATCH();
|
||||
}
|
||||
|
||||
static void emit_zb_offset(r300ContextPtr r300)
|
||||
{
|
||||
BATCH_LOCALS(r300);
|
||||
struct radeon_renderbuffer *rrb;
|
||||
uint32_t zbpitch;
|
||||
|
||||
rrb = r300->radeon.state.depth_buffer;
|
||||
if (!rrb)
|
||||
return;
|
||||
|
||||
zbpitch = rrb->pitch;
|
||||
if (rrb->bo->flags & RADEON_BO_FLAGS_MACRO_TILE) {
|
||||
zbpitch |= R300_DEPTHMACROTILE_ENABLE;
|
||||
}
|
||||
if (r300->radeon.glCtx->Visual.depthBits == 24) {
|
||||
zbpitch |= R300_DEPTHMICROTILE_TILED;
|
||||
}
|
||||
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH_REGSEQ(R300_ZB_DEPTHOFFSET, 1);
|
||||
OUT_BATCH_RELOC(0, rrb->bo, 0, 0);
|
||||
OUT_BATCH_REGVAL(R300_ZB_DEPTHPITCH, zbpitch);
|
||||
END_BATCH();
|
||||
}
|
||||
|
||||
static int check_always(r300ContextPtr r300, struct r300_state_atom *atom)
|
||||
{
|
||||
return atom->cmd_size;
|
||||
|
|
@ -480,8 +545,7 @@ void r300InitCmdBuf(r300ContextPtr r300)
|
|||
ALLOC_STATE(rop, always, 2, 0);
|
||||
r300->hw.rop.cmd[0] = cmdpacket0(R300_RB3D_ROPCNTL, 1);
|
||||
ALLOC_STATE(cb, always, R300_CB_CMDSIZE, 0);
|
||||
r300->hw.cb.cmd[R300_CB_CMD_0] = cmdpacket0(R300_RB3D_COLOROFFSET0, 1);
|
||||
r300->hw.cb.cmd[R300_CB_CMD_1] = cmdpacket0(R300_RB3D_COLORPITCH0, 1);
|
||||
r300->hw.cb.emit = &emit_cb_offset;
|
||||
ALLOC_STATE(rb3d_dither_ctl, always, 10, 0);
|
||||
r300->hw.rb3d_dither_ctl.cmd[0] = cmdpacket0(R300_RB3D_DITHER_CTL, 9);
|
||||
ALLOC_STATE(rb3d_aaresolve_ctl, always, 2, 0);
|
||||
|
|
@ -495,7 +559,7 @@ void r300InitCmdBuf(r300ContextPtr r300)
|
|||
r300->hw.zstencil_format.cmd[0] =
|
||||
cmdpacket0(R300_ZB_FORMAT, 4);
|
||||
ALLOC_STATE(zb, always, R300_ZB_CMDSIZE, 0);
|
||||
r300->hw.zb.cmd[R300_ZB_CMD_0] = cmdpacket0(R300_ZB_DEPTHOFFSET, 2);
|
||||
r300->hw.zb.emit = emit_zb_offset;
|
||||
ALLOC_STATE(zb_depthclearvalue, always, 2, 0);
|
||||
r300->hw.zb_depthclearvalue.cmd[0] = cmdpacket0(R300_ZB_DEPTHCLEARVALUE, 1);
|
||||
ALLOC_STATE(unk4F30, always, 3, 0);
|
||||
|
|
@ -562,9 +626,10 @@ void r300InitCmdBuf(r300ContextPtr r300)
|
|||
ALLOC_STATE(tex.pitch, variable, mtu + 1, 0);
|
||||
r300->hw.tex.pitch.cmd[R300_TEX_CMD_0] = cmdpacket0(R300_TX_FORMAT2_0, 0);
|
||||
|
||||
ALLOC_STATE(tex.offset, variable, mtu + 1, 0);
|
||||
ALLOC_STATE(tex.offset, variable, 1, 0);
|
||||
r300->hw.tex.offset.cmd[R300_TEX_CMD_0] =
|
||||
cmdpacket0(R300_TX_OFFSET_0, 0);
|
||||
r300->hw.tex.offset.emit = &emit_tex_offsets;
|
||||
|
||||
ALLOC_STATE(tex.chroma_key, variable, mtu + 1, 0);
|
||||
r300->hw.tex.chroma_key.cmd[R300_TEX_CMD_0] =
|
||||
|
|
@ -587,6 +652,7 @@ void r300InitCmdBuf(r300ContextPtr r300)
|
|||
if (size > 64 * 256)
|
||||
size = 64 * 256;
|
||||
|
||||
size = 64 * 1024 / 4;
|
||||
if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
|
||||
fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
|
||||
sizeof(drm_r300_cmd_header_t));
|
||||
|
|
@ -597,10 +663,14 @@ void r300InitCmdBuf(r300ContextPtr r300)
|
|||
size * 4, r300->hw.max_state_size * 4);
|
||||
}
|
||||
|
||||
r300->cmdbuf.csm = radeon_cs_manager_legacy(&r300->radeon);
|
||||
if (r300->cmdbuf.csm == NULL) {
|
||||
/* FIXME: fatal error */
|
||||
return;
|
||||
}
|
||||
r300->cmdbuf.cs = radeon_cs_create(r300->cmdbuf.csm, size);
|
||||
assert(r300->cmdbuf.cs != NULL);
|
||||
r300->cmdbuf.size = size;
|
||||
r300->cmdbuf.cmd_buf = (uint32_t *) CALLOC(size * 4);
|
||||
r300->cmdbuf.count_used = 0;
|
||||
r300->cmdbuf.count_reemit = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -610,66 +680,8 @@ void r300DestroyCmdBuf(r300ContextPtr r300)
|
|||
{
|
||||
struct r300_state_atom *atom;
|
||||
|
||||
FREE(r300->cmdbuf.cmd_buf);
|
||||
|
||||
radeon_cs_destroy(r300->cmdbuf.cs);
|
||||
foreach(atom, &r300->hw.atomlist) {
|
||||
FREE(atom->cmd);
|
||||
}
|
||||
}
|
||||
|
||||
void r300EmitBlit(r300ContextPtr rmesa,
|
||||
GLuint color_fmt,
|
||||
GLuint src_pitch,
|
||||
GLuint src_offset,
|
||||
GLuint dst_pitch,
|
||||
GLuint dst_offset,
|
||||
GLint srcx, GLint srcy,
|
||||
GLint dstx, GLint dsty, GLuint w, GLuint h)
|
||||
{
|
||||
drm_r300_cmd_header_t *cmd;
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_IOCTL)
|
||||
fprintf(stderr,
|
||||
"%s src %x/%x %d,%d dst: %x/%x %d,%d sz: %dx%d\n",
|
||||
__FUNCTION__, src_pitch, src_offset, srcx, srcy,
|
||||
dst_pitch, dst_offset, dstx, dsty, w, h);
|
||||
|
||||
assert((src_pitch & 63) == 0);
|
||||
assert((dst_pitch & 63) == 0);
|
||||
assert((src_offset & 1023) == 0);
|
||||
assert((dst_offset & 1023) == 0);
|
||||
assert(w < (1 << 16));
|
||||
assert(h < (1 << 16));
|
||||
|
||||
cmd = (drm_r300_cmd_header_t *) r300AllocCmdBuf(rmesa, 8, __FUNCTION__);
|
||||
|
||||
cmd[0].header.cmd_type = R300_CMD_PACKET3;
|
||||
cmd[0].header.pad0 = R300_CMD_PACKET3_RAW;
|
||||
cmd[1].u = R300_CP_CMD_BITBLT_MULTI | (5 << 16);
|
||||
cmd[2].u = (RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
|
||||
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
|
||||
RADEON_GMC_BRUSH_NONE |
|
||||
(color_fmt << 8) |
|
||||
RADEON_GMC_SRC_DATATYPE_COLOR |
|
||||
RADEON_ROP3_S |
|
||||
RADEON_DP_SRC_SOURCE_MEMORY |
|
||||
RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
|
||||
|
||||
cmd[3].u = ((src_pitch / 64) << 22) | (src_offset >> 10);
|
||||
cmd[4].u = ((dst_pitch / 64) << 22) | (dst_offset >> 10);
|
||||
cmd[5].u = (srcx << 16) | srcy;
|
||||
cmd[6].u = (dstx << 16) | dsty; /* dst */
|
||||
cmd[7].u = (w << 16) | h;
|
||||
}
|
||||
|
||||
void r300EmitWait(r300ContextPtr rmesa, GLuint flags)
|
||||
{
|
||||
drm_r300_cmd_header_t *cmd;
|
||||
|
||||
assert(!(flags & ~(R300_WAIT_2D | R300_WAIT_3D)));
|
||||
|
||||
cmd = (drm_r300_cmd_header_t *) r300AllocCmdBuf(rmesa, 1, __FUNCTION__);
|
||||
cmd[0].u = 0;
|
||||
cmd[0].wait.cmd_type = R300_CMD_WAIT;
|
||||
cmd[0].wait.flags = flags;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#define __R300_CMDBUF_H__
|
||||
|
||||
#include "r300_context.h"
|
||||
#include "radeon_cs.h"
|
||||
|
||||
extern int r300FlushCmdBufLocked(r300ContextPtr r300, const char *caller);
|
||||
extern int r300FlushCmdBuf(r300ContextPtr r300, const char *caller);
|
||||
|
|
@ -45,72 +46,77 @@ extern void r300EmitState(r300ContextPtr r300);
|
|||
|
||||
extern void r300InitCmdBuf(r300ContextPtr r300);
|
||||
extern void r300DestroyCmdBuf(r300ContextPtr r300);
|
||||
extern void r300EnsureCmdBufSpace(r300ContextPtr r300, int dwords, const char *caller);
|
||||
|
||||
void r300BeginBatch(r300ContextPtr r300,
|
||||
int n,
|
||||
int dostate,
|
||||
const char *file,
|
||||
const char *function,
|
||||
int line);
|
||||
|
||||
/**
|
||||
* Make sure that enough space is available in the command buffer
|
||||
* by flushing if necessary.
|
||||
*
|
||||
* \param dwords The number of dwords we need to be free on the command buffer
|
||||
* Every function writing to the command buffer needs to declare this
|
||||
* to get the necessary local variables.
|
||||
*/
|
||||
static INLINE void r300EnsureCmdBufSpace(r300ContextPtr r300,
|
||||
int dwords, const char *caller)
|
||||
{
|
||||
assert(dwords < r300->cmdbuf.size);
|
||||
|
||||
if (r300->cmdbuf.count_used + dwords > r300->cmdbuf.size)
|
||||
r300FlushCmdBuf(r300, caller);
|
||||
}
|
||||
#define BATCH_LOCALS(r300) \
|
||||
const r300ContextPtr b_l_r300 = r300
|
||||
|
||||
/**
|
||||
* Allocate the given number of dwords in the command buffer and return
|
||||
* a pointer to the allocated area.
|
||||
* When necessary, these functions cause a flush. r300AllocCmdBuf() also
|
||||
* causes state reemission after a flush. This is necessary to ensure
|
||||
* correct hardware state after an unlock.
|
||||
* Prepare writing n dwords to the command buffer,
|
||||
* including producing any necessary state emits on buffer wraparound.
|
||||
*/
|
||||
static INLINE uint32_t *r300RawAllocCmdBuf(r300ContextPtr r300,
|
||||
int dwords, const char *caller)
|
||||
{
|
||||
uint32_t *ptr;
|
||||
#define BEGIN_BATCH(n) r300BeginBatch(b_l_r300, n, 1, __FILE__, __FUNCTION__, __LINE__)
|
||||
|
||||
r300EnsureCmdBufSpace(r300, dwords, caller);
|
||||
/**
|
||||
* Same as BEGIN_BATCH, but do not cause automatic state emits.
|
||||
*/
|
||||
#define BEGIN_BATCH_NO_AUTOSTATE(n) r300BeginBatch(b_l_r300, n, 0, __FILE__, __FUNCTION__, __LINE__)
|
||||
|
||||
ptr = &r300->cmdbuf.cmd_buf[r300->cmdbuf.count_used];
|
||||
r300->cmdbuf.count_used += dwords;
|
||||
return ptr;
|
||||
}
|
||||
/**
|
||||
* Write one dword to the command buffer.
|
||||
*/
|
||||
#define OUT_BATCH(data) \
|
||||
do { \
|
||||
radeon_cs_write_dword(b_l_r300->cmdbuf.cs, data);\
|
||||
} while(0)
|
||||
|
||||
static INLINE uint32_t *r300AllocCmdBuf(r300ContextPtr r300,
|
||||
int dwords, const char *caller)
|
||||
{
|
||||
uint32_t *ptr;
|
||||
/**
|
||||
* Write a relocated dword to the command buffer.
|
||||
*/
|
||||
#define OUT_BATCH_RELOC(data, bo, offset, flags) \
|
||||
do { \
|
||||
radeon_cs_write_dword(b_l_r300->cmdbuf.cs, offset);\
|
||||
radeon_cs_write_reloc(b_l_r300->cmdbuf.cs,bo,0,(bo)->size,flags);\
|
||||
} while(0)
|
||||
|
||||
r300EnsureCmdBufSpace(r300, dwords, caller);
|
||||
/**
|
||||
* Write n dwords from ptr to the command buffer.
|
||||
*/
|
||||
#define OUT_BATCH_TABLE(ptr,n) \
|
||||
do { \
|
||||
int _i; \
|
||||
for (_i=0; _i < n; _i++) {\
|
||||
radeon_cs_write_dword(b_l_r300->cmdbuf.cs, ptr[_i]);\
|
||||
}\
|
||||
} while(0)
|
||||
|
||||
if (!r300->cmdbuf.count_used) {
|
||||
if (RADEON_DEBUG & DEBUG_IOCTL)
|
||||
fprintf(stderr,
|
||||
"Reemit state after flush (from %s)\n", caller);
|
||||
r300EmitState(r300);
|
||||
}
|
||||
/**
|
||||
* Finish writing dwords to the command buffer.
|
||||
* The number of (direct or indirect) OUT_BATCH calls between the previous
|
||||
* BEGIN_BATCH and END_BATCH must match the number specified at BEGIN_BATCH time.
|
||||
*/
|
||||
#define END_BATCH() \
|
||||
do { \
|
||||
radeon_cs_end(b_l_r300->cmdbuf.cs, __FILE__, __FUNCTION__, __LINE__);\
|
||||
} while(0)
|
||||
|
||||
ptr = &r300->cmdbuf.cmd_buf[r300->cmdbuf.count_used];
|
||||
r300->cmdbuf.count_used += dwords;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
extern void r300EmitBlit(r300ContextPtr rmesa,
|
||||
GLuint color_fmt,
|
||||
GLuint src_pitch,
|
||||
GLuint src_offset,
|
||||
GLuint dst_pitch,
|
||||
GLuint dst_offset,
|
||||
GLint srcx, GLint srcy,
|
||||
GLint dstx, GLint dsty, GLuint w, GLuint h);
|
||||
|
||||
extern void r300EmitWait(r300ContextPtr rmesa, GLuint flags);
|
||||
extern void r300EmitLOAD_VBPNTR(r300ContextPtr rmesa, int start);
|
||||
extern void r300EmitVertexShader(r300ContextPtr rmesa);
|
||||
extern void r300EmitPixelShader(r300ContextPtr rmesa);
|
||||
/**
|
||||
* After the last END_BATCH() of rendering, this indicates that flushing
|
||||
* the command buffer now is okay.
|
||||
*/
|
||||
#define COMMIT_BATCH() \
|
||||
do { \
|
||||
} while(0)
|
||||
|
||||
#endif /* __R300_CMDBUF_H__ */
|
||||
|
|
|
|||
|
|
@ -59,15 +59,13 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "radeon_span.h"
|
||||
#include "r300_context.h"
|
||||
#include "r300_cmdbuf.h"
|
||||
#include "r300_mipmap_tree.h"
|
||||
#include "r300_state.h"
|
||||
#include "r300_ioctl.h"
|
||||
#include "r300_tex.h"
|
||||
#include "r300_emit.h"
|
||||
#include "r300_swtcl.h"
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
#include "r300_mem.h"
|
||||
#endif
|
||||
|
||||
#include "vblank.h"
|
||||
#include "utils.h"
|
||||
|
|
@ -189,7 +187,7 @@ GLboolean r300CreateContext(const __GLcontextModes * glVisual,
|
|||
struct dd_function_table functions;
|
||||
r300ContextPtr r300;
|
||||
GLcontext *ctx;
|
||||
int tcl_mode, i;
|
||||
int tcl_mode;
|
||||
|
||||
assert(glVisual);
|
||||
assert(driContextPriv);
|
||||
|
|
@ -221,10 +219,6 @@ GLboolean r300CreateContext(const __GLcontextModes * glVisual,
|
|||
r300InitTextureFuncs(&functions);
|
||||
r300InitShaderFuncs(&functions);
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
r300_mem_init(r300);
|
||||
#endif
|
||||
|
||||
if (!radeonInitContext(&r300->radeon, &functions,
|
||||
glVisual, driContextPriv,
|
||||
sharedContextPrivate)) {
|
||||
|
|
@ -233,33 +227,6 @@ GLboolean r300CreateContext(const __GLcontextModes * glVisual,
|
|||
}
|
||||
|
||||
/* Init r300 context data */
|
||||
r300->dma.buf0_address =
|
||||
r300->radeon.radeonScreen->buffers->list[0].address;
|
||||
|
||||
(void)memset(r300->texture_heaps, 0, sizeof(r300->texture_heaps));
|
||||
make_empty_list(&r300->swapped);
|
||||
|
||||
r300->nr_heaps = 1 /* screen->numTexHeaps */ ;
|
||||
assert(r300->nr_heaps < RADEON_NR_TEX_HEAPS);
|
||||
for (i = 0; i < r300->nr_heaps; i++) {
|
||||
/* *INDENT-OFF* */
|
||||
r300->texture_heaps[i] = driCreateTextureHeap(i, r300,
|
||||
screen->
|
||||
texSize[i], 12,
|
||||
RADEON_NR_TEX_REGIONS,
|
||||
(drmTextureRegionPtr)
|
||||
r300->radeon.sarea->
|
||||
tex_list[i],
|
||||
&r300->radeon.sarea->
|
||||
tex_age[i],
|
||||
&r300->swapped,
|
||||
sizeof
|
||||
(r300TexObj),
|
||||
(destroy_texture_object_t
|
||||
*)
|
||||
r300DestroyTexObj);
|
||||
/* *INDENT-ON* */
|
||||
}
|
||||
r300->texture_depth = driQueryOptioni(&r300->radeon.optionCache,
|
||||
"texture_depth");
|
||||
if (r300->texture_depth == DRI_CONF_TEXTURE_DEPTH_FB)
|
||||
|
|
@ -298,12 +265,10 @@ GLboolean r300CreateContext(const __GLcontextModes * glVisual,
|
|||
ctx->Const.MaxLineWidth = R300_LINESIZE_MAX;
|
||||
ctx->Const.MaxLineWidthAA = R300_LINESIZE_MAX;
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
/* Needs further modifications */
|
||||
#if 0
|
||||
ctx->Const.MaxArrayLockSize =
|
||||
( /*512 */ RADEON_BUFFER_SIZE * 16 * 1024) / (4 * 4);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Initialize the software rasterizer and helper modules.
|
||||
|
|
@ -406,72 +371,6 @@ GLboolean r300CreateContext(const __GLcontextModes * glVisual,
|
|||
return GL_TRUE;
|
||||
}
|
||||
|
||||
static void r300FreeGartAllocations(r300ContextPtr r300)
|
||||
{
|
||||
int i, ret, tries = 0, done_age, in_use = 0;
|
||||
drm_radeon_mem_free_t memfree;
|
||||
|
||||
memfree.region = RADEON_MEM_REGION_GART;
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
for (i = r300->rmm->u_last; i > 0; i--) {
|
||||
if (r300->rmm->u_list[i].ptr == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* check whether this buffer is still in use */
|
||||
if (r300->rmm->u_list[i].pending) {
|
||||
in_use++;
|
||||
}
|
||||
}
|
||||
/* Cannot flush/lock if no context exists. */
|
||||
if (in_use)
|
||||
r300FlushCmdBuf(r300, __FUNCTION__);
|
||||
|
||||
done_age = radeonGetAge((radeonContextPtr) r300);
|
||||
|
||||
for (i = r300->rmm->u_last; i > 0; i--) {
|
||||
if (r300->rmm->u_list[i].ptr == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* check whether this buffer is still in use */
|
||||
if (!r300->rmm->u_list[i].pending) {
|
||||
continue;
|
||||
}
|
||||
|
||||
assert(r300->rmm->u_list[i].h_pending == 0);
|
||||
|
||||
tries = 0;
|
||||
while (r300->rmm->u_list[i].age > done_age && tries++ < 1000) {
|
||||
usleep(10);
|
||||
done_age = radeonGetAge((radeonContextPtr) r300);
|
||||
}
|
||||
if (tries >= 1000) {
|
||||
WARN_ONCE("Failed to idle region!");
|
||||
}
|
||||
|
||||
memfree.region_offset = (char *)r300->rmm->u_list[i].ptr -
|
||||
(char *)r300->radeon.radeonScreen->gartTextures.map;
|
||||
|
||||
ret = drmCommandWrite(r300->radeon.radeonScreen->driScreen->fd,
|
||||
DRM_RADEON_FREE, &memfree,
|
||||
sizeof(memfree));
|
||||
if (ret) {
|
||||
fprintf(stderr, "Failed to free at %p\nret = %s\n",
|
||||
r300->rmm->u_list[i].ptr, strerror(-ret));
|
||||
} else {
|
||||
if (i == r300->rmm->u_last)
|
||||
r300->rmm->u_last--;
|
||||
|
||||
r300->rmm->u_list[i].pending = 0;
|
||||
r300->rmm->u_list[i].ptr = NULL;
|
||||
}
|
||||
}
|
||||
r300->rmm->u_head = i;
|
||||
#endif /* USER_BUFFERS */
|
||||
}
|
||||
|
||||
/* Destroy the device specific context.
|
||||
*/
|
||||
void r300DestroyContext(__DRIcontextPrivate * driContextPriv)
|
||||
|
|
@ -495,23 +394,12 @@ void r300DestroyContext(__DRIcontextPrivate * driContextPriv)
|
|||
assert(r300); /* should never be null */
|
||||
|
||||
if (r300) {
|
||||
GLboolean release_texture_heaps;
|
||||
|
||||
release_texture_heaps =
|
||||
(r300->radeon.glCtx->Shared->RefCount == 1);
|
||||
_swsetup_DestroyContext(r300->radeon.glCtx);
|
||||
_tnl_DestroyContext(r300->radeon.glCtx);
|
||||
_vbo_DestroyContext(r300->radeon.glCtx);
|
||||
_swrast_DestroyContext(r300->radeon.glCtx);
|
||||
|
||||
if (r300->dma.current.buf) {
|
||||
r300ReleaseDmaRegion(r300, &r300->dma.current,
|
||||
__FUNCTION__);
|
||||
#ifndef USER_BUFFERS
|
||||
r300FlushCmdBuf(r300, __FUNCTION__);
|
||||
#endif
|
||||
}
|
||||
r300FreeGartAllocations(r300);
|
||||
r300FlushCmdBuf(r300, __FUNCTION__);
|
||||
r300DestroyCmdBuf(r300);
|
||||
|
||||
if (radeon->state.scissor.pClipRects) {
|
||||
|
|
@ -519,28 +407,11 @@ void r300DestroyContext(__DRIcontextPrivate * driContextPriv)
|
|||
radeon->state.scissor.pClipRects = NULL;
|
||||
}
|
||||
|
||||
if (release_texture_heaps) {
|
||||
/* This share group is about to go away, free our private
|
||||
* texture object data.
|
||||
*/
|
||||
int i;
|
||||
|
||||
for (i = 0; i < r300->nr_heaps; i++) {
|
||||
driDestroyTextureHeap(r300->texture_heaps[i]);
|
||||
r300->texture_heaps[i] = NULL;
|
||||
}
|
||||
|
||||
assert(is_empty_list(&r300->swapped));
|
||||
}
|
||||
|
||||
radeonCleanupContext(&r300->radeon);
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
/* the memory manager might be accessed when Mesa frees the shared
|
||||
* state, so don't destroy it earlier
|
||||
*/
|
||||
r300_mem_destroy(r300);
|
||||
#endif
|
||||
|
||||
/* free the option cache */
|
||||
driDestroyOptionCache(&r300->radeon.optionCache);
|
||||
|
|
|
|||
|
|
@ -42,13 +42,12 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "radeon_drm.h"
|
||||
#include "dri_util.h"
|
||||
#include "texmem.h"
|
||||
#include "radeon_bo.h"
|
||||
|
||||
#include "main/macros.h"
|
||||
#include "main/mtypes.h"
|
||||
#include "main/colormac.h"
|
||||
|
||||
#define USER_BUFFERS
|
||||
|
||||
struct r300_context;
|
||||
typedef struct r300_context r300ContextRec;
|
||||
typedef struct r300_context *r300ContextPtr;
|
||||
|
|
@ -122,68 +121,50 @@ static INLINE uint32_t r300PackFloat24(float f)
|
|||
|
||||
/************ DMA BUFFERS **************/
|
||||
|
||||
/* Need refcounting on dma buffers:
|
||||
*/
|
||||
struct r300_dma_buffer {
|
||||
int refcount; /**< the number of retained regions in buf */
|
||||
drmBufPtr buf;
|
||||
int id;
|
||||
};
|
||||
#undef GET_START
|
||||
#ifdef USER_BUFFERS
|
||||
#define GET_START(rvb) (r300GartOffsetFromVirtual(rmesa, (rvb)->address+(rvb)->start))
|
||||
#else
|
||||
#define GET_START(rvb) (rmesa->radeon.radeonScreen->gart_buffer_offset + \
|
||||
(rvb)->address - rmesa->dma.buf0_address + \
|
||||
(rvb)->start)
|
||||
#endif
|
||||
/* A retained region, eg vertices for indexed vertices.
|
||||
*/
|
||||
struct r300_dma_region {
|
||||
struct r300_dma_buffer *buf;
|
||||
char *address; /* == buf->address */
|
||||
int start, end, ptr; /* offsets from start of buf */
|
||||
|
||||
int aos_offset; /* address in GART memory */
|
||||
int aos_stride; /* distance between elements, in dwords */
|
||||
int aos_size; /* number of components (1-4) */
|
||||
};
|
||||
|
||||
struct r300_dma {
|
||||
/* Active dma region. Allocations for vertices and retained
|
||||
* regions come from here. Also used for emitting random vertices,
|
||||
* these may be flushed by calling flush_current();
|
||||
*/
|
||||
struct r300_dma_region current;
|
||||
|
||||
void (*flush) (r300ContextPtr);
|
||||
|
||||
char *buf0_address; /* start of buf[0], for index calcs */
|
||||
|
||||
/* Number of "in-flight" DMA buffers, i.e. the number of buffers
|
||||
* for which a DISCARD command is currently queued in the command buffer.
|
||||
*/
|
||||
GLuint nr_released_bufs;
|
||||
};
|
||||
|
||||
/* Texture related */
|
||||
|
||||
/* Texture related */
|
||||
typedef struct r300_tex_obj r300TexObj, *r300TexObjPtr;
|
||||
typedef struct _r300_texture_image r300_texture_image;
|
||||
|
||||
|
||||
struct _r300_texture_image {
|
||||
struct gl_texture_image base;
|
||||
|
||||
/**
|
||||
* If mt != 0, the image is stored in hardware format in the
|
||||
* given mipmap tree. In this case, base.Data may point into the
|
||||
* mapping of the buffer object that contains the mipmap tree.
|
||||
*
|
||||
* If mt == 0, the image is stored in normal memory pointed to
|
||||
* by base.Data.
|
||||
*/
|
||||
struct _r300_mipmap_tree *mt;
|
||||
|
||||
int mtlevel; /** if mt != 0, this is the image's level in the mipmap tree */
|
||||
int mtface; /** if mt != 0, this is the image's face in the mipmap tree */
|
||||
};
|
||||
|
||||
static INLINE r300_texture_image *get_r300_texture_image(struct gl_texture_image *image)
|
||||
{
|
||||
return (r300_texture_image*)image;
|
||||
}
|
||||
|
||||
|
||||
/* Texture object in locally shared texture space.
|
||||
*/
|
||||
struct r300_tex_obj {
|
||||
driTextureObject base;
|
||||
struct gl_texture_object base;
|
||||
struct _r300_mipmap_tree *mt;
|
||||
|
||||
GLuint bufAddr; /* Offset to start of locally
|
||||
shared texture block */
|
||||
|
||||
drm_radeon_tex_image_t image[6][RADEON_MAX_TEXTURE_LEVELS];
|
||||
/* Six, for the cube faces */
|
||||
/**
|
||||
* This is true if we've verified that the mipmap tree above is complete
|
||||
* and so on.
|
||||
*/
|
||||
GLboolean validated;
|
||||
|
||||
GLboolean image_override; /* Image overridden by GLX_EXT_tfp */
|
||||
GLuint override_offset;
|
||||
|
||||
GLuint pitch; /* this isn't sent to hardware just used in calculations */
|
||||
/* hardware register values */
|
||||
/* Note that R200 has 8 registers per texture and R300 only 7 */
|
||||
GLuint filter;
|
||||
|
|
@ -191,30 +172,16 @@ struct r300_tex_obj {
|
|||
GLuint pitch_reg;
|
||||
GLuint size; /* npot only */
|
||||
GLuint format;
|
||||
GLuint offset; /* Image location in the card's address space.
|
||||
All cube faces follow. */
|
||||
GLuint unknown4;
|
||||
GLuint unknown5;
|
||||
/* end hardware registers */
|
||||
|
||||
/* registers computed by r200 code - keep them here to
|
||||
compare against what is actually written.
|
||||
|
||||
to be removed later.. */
|
||||
GLuint pp_border_color;
|
||||
GLuint pp_cubic_faces; /* cube face 1,2,3,4 log2 sizes */
|
||||
GLuint format_x;
|
||||
|
||||
GLboolean border_fallback;
|
||||
/* end hardware registers */
|
||||
|
||||
GLuint tile_bits; /* hw texture tile bits used on this texture */
|
||||
};
|
||||
|
||||
struct r300_texture_env_state {
|
||||
r300TexObjPtr texobj;
|
||||
GLenum format;
|
||||
GLenum envMode;
|
||||
};
|
||||
static INLINE r300TexObj* r300_tex_obj(struct gl_texture_object *texObj)
|
||||
{
|
||||
return (r300TexObj*)texObj;
|
||||
}
|
||||
|
||||
/* The blit width for texture uploads
|
||||
*/
|
||||
|
|
@ -222,7 +189,6 @@ struct r300_texture_env_state {
|
|||
#define R300_MAX_TEXTURE_UNITS 8
|
||||
|
||||
struct r300_texture_state {
|
||||
struct r300_texture_env_state unit[R300_MAX_TEXTURE_UNITS];
|
||||
int tc_count; /* number of incoming texture coordinates from VAP */
|
||||
};
|
||||
|
||||
|
|
@ -242,6 +208,7 @@ struct r300_state_atom {
|
|||
GLboolean dirty;
|
||||
|
||||
int (*check) (r300ContextPtr, struct r300_state_atom * atom);
|
||||
void (*emit) (r300ContextPtr);
|
||||
};
|
||||
|
||||
#define R300_VPT_CMD_0 0
|
||||
|
|
@ -549,6 +516,8 @@ struct r300_hw_state {
|
|||
struct r300_state_atom border_color;
|
||||
} tex;
|
||||
struct r300_state_atom txe; /* tex enable (4104) */
|
||||
|
||||
r300TexObj *textures[R300_MAX_TEXTURE_UNITS];
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -559,10 +528,10 @@ struct r300_hw_state {
|
|||
* otherwise.
|
||||
*/
|
||||
struct r300_cmdbuf {
|
||||
int size; /* DWORDs allocated for buffer */
|
||||
uint32_t *cmd_buf;
|
||||
int count_used; /* DWORDs filled so far */
|
||||
int count_reemit; /* size of re-emission batch */
|
||||
struct radeon_cs_manager *csm;
|
||||
struct radeon_cs *cs;
|
||||
int size; /** # of dwords total */
|
||||
unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -811,18 +780,25 @@ struct r500_fragment_program {
|
|||
#define REG_COLOR0 1
|
||||
#define REG_TEX0 2
|
||||
|
||||
struct r300_aos {
|
||||
struct radeon_bo *bo; /** Buffer object where vertex data is stored */
|
||||
int offset; /** Offset into buffer object, in bytes */
|
||||
int components; /** Number of components per vertex */
|
||||
int stride; /** Stride in dwords (may be 0 for repeating) */
|
||||
int count; /** Number of vertices */
|
||||
};
|
||||
|
||||
struct r300_state {
|
||||
struct r300_depthbuffer_state depth;
|
||||
struct r300_texture_state texture;
|
||||
int sw_tcl_inputs[VERT_ATTRIB_MAX];
|
||||
struct r300_vertex_shader_state vertex_shader;
|
||||
struct r300_dma_region aos[R300_MAX_AOS_ARRAYS];
|
||||
struct r300_aos aos[R300_MAX_AOS_ARRAYS];
|
||||
int aos_count;
|
||||
|
||||
GLuint *Elts;
|
||||
struct r300_dma_region elt_dma;
|
||||
struct radeon_bo *elt_dma_bo; /** Buffer object that contains element indices */
|
||||
int elt_dma_offset; /** Offset into this buffer object, in bytes */
|
||||
|
||||
struct r300_dma_region swtcl_dma;
|
||||
DECLARE_RENDERINPUTS(render_inputs_bitset); /* actual render inputs that R300 was configured for.
|
||||
They are the same as tnl->render_inputs for fixed pipeline */
|
||||
|
||||
|
|
@ -881,12 +857,8 @@ struct r300_swtcl_info {
|
|||
*/
|
||||
GLuint specoffset;
|
||||
|
||||
/**
|
||||
* Should Mesa project vertex data or will the hardware do it?
|
||||
*/
|
||||
GLboolean needproj;
|
||||
|
||||
struct r300_dma_region indexed_verts;
|
||||
struct radeon_bo *bo;
|
||||
void (*flush) (r300ContextPtr);
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -904,26 +876,11 @@ struct r300_context {
|
|||
|
||||
/* Vertex buffers
|
||||
*/
|
||||
struct r300_dma dma;
|
||||
GLboolean save_on_next_unlock;
|
||||
GLuint NewGLState;
|
||||
|
||||
/* Texture object bookkeeping
|
||||
*/
|
||||
unsigned nr_heaps;
|
||||
driTexHeap *texture_heaps[RADEON_NR_TEX_HEAPS];
|
||||
driTextureObject swapped;
|
||||
int texture_depth;
|
||||
float initialMaxAnisotropy;
|
||||
|
||||
/* Clientdata textures;
|
||||
*/
|
||||
GLuint prefer_gart_client_texturing;
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
struct r300_memory_manager *rmm;
|
||||
#endif
|
||||
|
||||
GLvector4f dummy_attrib[_TNL_ATTRIB_MAX];
|
||||
GLvector4f *temp_attrib[_TNL_ATTRIB_MAX];
|
||||
|
||||
|
|
|
|||
|
|
@ -51,9 +51,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "r300_emit.h"
|
||||
#include "r300_ioctl.h"
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
#include "r300_mem.h"
|
||||
#endif
|
||||
|
||||
#if SWIZZLE_X != R300_INPUT_ROUTE_SELECT_X || \
|
||||
SWIZZLE_Y != R300_INPUT_ROUTE_SELECT_Y || \
|
||||
|
|
@ -86,11 +83,9 @@ do { \
|
|||
} while (0)
|
||||
#endif
|
||||
|
||||
static void r300EmitVec4(GLcontext * ctx, struct r300_dma_region *rvb,
|
||||
GLvoid * data, int stride, int count)
|
||||
static void r300EmitVec4(uint32_t *out, GLvoid * data, int stride, int count)
|
||||
{
|
||||
int i;
|
||||
int *out = (int *)(rvb->address + rvb->start);
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_VERTS)
|
||||
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
|
||||
|
|
@ -106,11 +101,9 @@ static void r300EmitVec4(GLcontext * ctx, struct r300_dma_region *rvb,
|
|||
}
|
||||
}
|
||||
|
||||
static void r300EmitVec8(GLcontext * ctx, struct r300_dma_region *rvb,
|
||||
GLvoid * data, int stride, int count)
|
||||
static void r300EmitVec8(uint32_t *out, GLvoid * data, int stride, int count)
|
||||
{
|
||||
int i;
|
||||
int *out = (int *)(rvb->address + rvb->start);
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_VERTS)
|
||||
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
|
||||
|
|
@ -127,18 +120,17 @@ static void r300EmitVec8(GLcontext * ctx, struct r300_dma_region *rvb,
|
|||
}
|
||||
}
|
||||
|
||||
static void r300EmitVec12(GLcontext * ctx, struct r300_dma_region *rvb,
|
||||
GLvoid * data, int stride, int count)
|
||||
static void r300EmitVec12(uint32_t *out, GLvoid * data, int stride, int count)
|
||||
{
|
||||
int i;
|
||||
int *out = (int *)(rvb->address + rvb->start);
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_VERTS)
|
||||
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
|
||||
__FUNCTION__, count, stride, (void *)out, (void *)data);
|
||||
|
||||
if (stride == 12)
|
||||
if (stride == 12) {
|
||||
COPY_DWORDS(out, data, count * 3);
|
||||
}
|
||||
else
|
||||
for (i = 0; i < count; i++) {
|
||||
out[0] = *(int *)data;
|
||||
|
|
@ -149,11 +141,9 @@ static void r300EmitVec12(GLcontext * ctx, struct r300_dma_region *rvb,
|
|||
}
|
||||
}
|
||||
|
||||
static void r300EmitVec16(GLcontext * ctx, struct r300_dma_region *rvb,
|
||||
GLvoid * data, int stride, int count)
|
||||
static void r300EmitVec16(uint32_t *out, GLvoid * data, int stride, int count)
|
||||
{
|
||||
int i;
|
||||
int *out = (int *)(rvb->address + rvb->start);
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_VERTS)
|
||||
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
|
||||
|
|
@ -172,39 +162,39 @@ static void r300EmitVec16(GLcontext * ctx, struct r300_dma_region *rvb,
|
|||
}
|
||||
}
|
||||
|
||||
static void r300EmitVec(GLcontext * ctx, struct r300_dma_region *rvb,
|
||||
static void r300EmitVec(GLcontext * ctx, struct r300_aos *aos,
|
||||
GLvoid * data, int size, int stride, int count)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
uint32_t *out;
|
||||
uint32_t bo_size;
|
||||
|
||||
if (stride == 0) {
|
||||
r300AllocDmaRegion(rmesa, rvb, size * 4, 4);
|
||||
bo_size = size * 4;
|
||||
count = 1;
|
||||
rvb->aos_offset = GET_START(rvb);
|
||||
rvb->aos_stride = 0;
|
||||
aos->stride = 0;
|
||||
} else {
|
||||
r300AllocDmaRegion(rmesa, rvb, size * count * 4, 4);
|
||||
rvb->aos_offset = GET_START(rvb);
|
||||
rvb->aos_stride = size;
|
||||
bo_size = size * count * 4;
|
||||
aos->stride = size;
|
||||
}
|
||||
aos->bo = radeon_bo_open(rmesa->radeon.radeonScreen->bom,
|
||||
0, bo_size, 32, RADEON_GEM_DOMAIN_GTT);
|
||||
aos->offset = 0;
|
||||
aos->components = size;
|
||||
aos->count = count;
|
||||
|
||||
radeon_bo_map(aos->bo, 1);
|
||||
out = (uint32_t*)((char*)aos->bo->ptr + aos->offset);
|
||||
switch (size) {
|
||||
case 1:
|
||||
r300EmitVec4(ctx, rvb, data, stride, count);
|
||||
break;
|
||||
case 2:
|
||||
r300EmitVec8(ctx, rvb, data, stride, count);
|
||||
break;
|
||||
case 3:
|
||||
r300EmitVec12(ctx, rvb, data, stride, count);
|
||||
break;
|
||||
case 4:
|
||||
r300EmitVec16(ctx, rvb, data, stride, count);
|
||||
break;
|
||||
case 1: r300EmitVec4(out, data, stride, count); break;
|
||||
case 2: r300EmitVec8(out, data, stride, count); break;
|
||||
case 3: r300EmitVec12(out, data, stride, count); break;
|
||||
case 4: r300EmitVec16(out, data, stride, count); break;
|
||||
default:
|
||||
assert(0);
|
||||
break;
|
||||
}
|
||||
radeon_bo_unmap(aos->bo);
|
||||
}
|
||||
|
||||
#define DW_SIZE(x) ((inputs[tab[(x)]] << R300_DST_VEC_LOC_SHIFT) | \
|
||||
|
|
@ -314,10 +304,6 @@ GLuint r300VAPOutputCntl0(GLcontext * ctx, GLuint OutputsWritten)
|
|||
R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT |
|
||||
R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT;
|
||||
|
||||
#if 0
|
||||
if (OutputsWritten & (1 << VERT_RESULT_FOGC)) ;
|
||||
#endif
|
||||
|
||||
if (OutputsWritten & (1 << VERT_RESULT_PSIZ))
|
||||
ret |= R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT;
|
||||
|
||||
|
|
@ -371,7 +357,6 @@ int r300EmitArrays(GLcontext * ctx)
|
|||
|
||||
assert(RENDERINPUTS_TEST(render_inputs_bitset, _TNL_ATTRIB_POS));
|
||||
assert(RENDERINPUTS_TEST(render_inputs_bitset, _TNL_ATTRIB_NORMAL) == 0);
|
||||
//assert(RENDERINPUTS_TEST(render_inputs_bitset, _TNL_ATTRIB_COLOR0));
|
||||
|
||||
if (RENDERINPUTS_TEST(render_inputs_bitset, _TNL_ATTRIB_POS)) {
|
||||
InputsRead |= 1 << VERT_ATTRIB_POS;
|
||||
|
|
@ -433,7 +418,7 @@ int r300EmitArrays(GLcontext * ctx)
|
|||
}
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
int ci, fix, found = 0;
|
||||
int ci;
|
||||
|
||||
swizzle[i][0] = SWIZZLE_ZERO;
|
||||
swizzle[i][1] = SWIZZLE_ZERO;
|
||||
|
|
@ -444,48 +429,10 @@ int r300EmitArrays(GLcontext * ctx)
|
|||
swizzle[i][ci] = ci;
|
||||
}
|
||||
|
||||
if (r300IsGartMemory(rmesa, vb->AttribPtr[tab[i]]->data, 4)) {
|
||||
if (vb->AttribPtr[tab[i]]->stride % 4) {
|
||||
return R300_FALLBACK_TCL;
|
||||
}
|
||||
rmesa->state.aos[i].address = (void *)(vb->AttribPtr[tab[i]]->data);
|
||||
rmesa->state.aos[i].start = 0;
|
||||
rmesa->state.aos[i].aos_offset = r300GartOffsetFromVirtual(rmesa, vb->AttribPtr[tab[i]]->data);
|
||||
rmesa->state.aos[i].aos_stride = vb->AttribPtr[tab[i]]->stride / 4;
|
||||
rmesa->state.aos[i].aos_size = vb->AttribPtr[tab[i]]->size;
|
||||
} else {
|
||||
r300EmitVec(ctx, &rmesa->state.aos[i],
|
||||
vb->AttribPtr[tab[i]]->data,
|
||||
vb->AttribPtr[tab[i]]->size,
|
||||
vb->AttribPtr[tab[i]]->stride, count);
|
||||
}
|
||||
|
||||
rmesa->state.aos[i].aos_size = vb->AttribPtr[tab[i]]->size;
|
||||
|
||||
for (fix = 0; fix <= 4 - vb->AttribPtr[tab[i]]->size; fix++) {
|
||||
if ((rmesa->state.aos[i].aos_offset - _mesa_sizeof_type(GL_FLOAT) * fix) % 4) {
|
||||
continue;
|
||||
}
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (found) {
|
||||
if (fix > 0) {
|
||||
WARN_ONCE("Feeling lucky?\n");
|
||||
}
|
||||
rmesa->state.aos[i].aos_offset -= _mesa_sizeof_type(GL_FLOAT) * fix;
|
||||
for (ci = 0; ci < vb->AttribPtr[tab[i]]->size; ci++) {
|
||||
swizzle[i][ci] += fix;
|
||||
}
|
||||
} else {
|
||||
WARN_ONCE
|
||||
("Cannot handle offset %x with stride %d, comp %d\n",
|
||||
rmesa->state.aos[i].aos_offset,
|
||||
rmesa->state.aos[i].aos_stride,
|
||||
vb->AttribPtr[tab[i]]->size);
|
||||
return R300_FALLBACK_TCL;
|
||||
}
|
||||
r300EmitVec(ctx, &rmesa->state.aos[i],
|
||||
vb->AttribPtr[tab[i]]->data,
|
||||
vb->AttribPtr[tab[i]]->size,
|
||||
vb->AttribPtr[tab[i]]->stride, count);
|
||||
}
|
||||
|
||||
/* Setup INPUT_ROUTE. */
|
||||
|
|
@ -515,45 +462,34 @@ int r300EmitArrays(GLcontext * ctx)
|
|||
return R300_FALLBACK_NONE;
|
||||
}
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
void r300UseArrays(GLcontext * ctx)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
int i;
|
||||
|
||||
if (rmesa->state.elt_dma.buf)
|
||||
r300_mem_use(rmesa, rmesa->state.elt_dma.buf->id);
|
||||
|
||||
for (i = 0; i < rmesa->state.aos_count; i++) {
|
||||
if (rmesa->state.aos[i].buf)
|
||||
r300_mem_use(rmesa, rmesa->state.aos[i].buf->id);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void r300ReleaseArrays(GLcontext * ctx)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
int i;
|
||||
|
||||
r300ReleaseDmaRegion(rmesa, &rmesa->state.elt_dma, __FUNCTION__);
|
||||
if (rmesa->state.elt_dma_bo) {
|
||||
radeon_bo_unref(rmesa->state.elt_dma_bo);
|
||||
rmesa->state.elt_dma_bo = 0;
|
||||
}
|
||||
for (i = 0; i < rmesa->state.aos_count; i++) {
|
||||
r300ReleaseDmaRegion(rmesa, &rmesa->state.aos[i], __FUNCTION__);
|
||||
if (rmesa->state.aos[i].bo) {
|
||||
radeon_bo_unref(rmesa->state.aos[i].bo);
|
||||
rmesa->state.aos[i].bo = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void r300EmitCacheFlush(r300ContextPtr rmesa)
|
||||
{
|
||||
int cmd_reserved = 0;
|
||||
int cmd_written = 0;
|
||||
BATCH_LOCALS(rmesa);
|
||||
|
||||
drm_radeon_cmd_header_t *cmd = NULL;
|
||||
|
||||
reg_start(R300_RB3D_DSTCACHE_CTLSTAT, 0);
|
||||
e32(R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_FREE_3D_TAGS |
|
||||
R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D);
|
||||
|
||||
reg_start(R300_ZB_ZCACHE_CTLSTAT, 0);
|
||||
e32(R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE |
|
||||
R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
|
||||
BEGIN_BATCH(4);
|
||||
OUT_BATCH_REGVAL(R300_RB3D_DSTCACHE_CTLSTAT,
|
||||
R300_RB3D_DSTCACHE_CTLSTAT_DC_FREE_FREE_3D_TAGS |
|
||||
R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D);
|
||||
OUT_BATCH_REGVAL(R300_ZB_ZCACHE_CTLSTAT,
|
||||
R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE |
|
||||
R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
|
||||
END_BATCH();
|
||||
COMMIT_BATCH();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -127,130 +127,62 @@ static INLINE uint32_t cmdpacify(void)
|
|||
return cmd.u;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare to write a register value to register at address reg.
|
||||
* If num_extra > 0 then the following extra values are written
|
||||
* to registers with address +4, +8 and so on..
|
||||
*/
|
||||
#define reg_start(reg, num_extra) \
|
||||
do { \
|
||||
int _n; \
|
||||
_n=(num_extra); \
|
||||
cmd = (drm_radeon_cmd_header_t*) \
|
||||
r300AllocCmdBuf(rmesa, \
|
||||
(_n+2), \
|
||||
__FUNCTION__); \
|
||||
cmd_reserved=_n+2; \
|
||||
cmd_written=1; \
|
||||
cmd[0].i=cmdpacket0((reg), _n+1); \
|
||||
} while (0);
|
||||
|
||||
/** Single register write to command buffer; requires 2 dwords. */
|
||||
#define OUT_BATCH_REGVAL(reg, val) \
|
||||
OUT_BATCH(cmdpacket0((reg), 1)); \
|
||||
OUT_BATCH((val))
|
||||
|
||||
/** Continuous register range write to command buffer; requires 1 dword,
|
||||
* expects count dwords afterwards for register contents. */
|
||||
#define OUT_BATCH_REGSEQ(reg, count) \
|
||||
OUT_BATCH(cmdpacket0((reg), (count)));
|
||||
|
||||
/** Write a 32 bit float to the ring; requires 1 dword. */
|
||||
#define OUT_BATCH_FLOAT32(f) \
|
||||
OUT_BATCH(r300PackFloat32((f)));
|
||||
|
||||
/**
|
||||
* Emit GLuint freestyle
|
||||
* Write the header of a packet3 to the command buffer.
|
||||
* Outputs 2 dwords and expects (num_extra+1) additional dwords afterwards.
|
||||
*/
|
||||
#define e32(dword) \
|
||||
do { \
|
||||
if(cmd_written<cmd_reserved) { \
|
||||
cmd[cmd_written].i=(dword); \
|
||||
cmd_written++; \
|
||||
} else { \
|
||||
fprintf(stderr, \
|
||||
"e32 but no previous packet " \
|
||||
"declaration.\n" \
|
||||
"Aborting! in %s::%s at line %d, " \
|
||||
"cmd_written=%d cmd_reserved=%d\n", \
|
||||
__FILE__, __FUNCTION__, __LINE__, \
|
||||
cmd_written, cmd_reserved); \
|
||||
_mesa_exit(-1); \
|
||||
} \
|
||||
#define OUT_BATCH_PACKET3(packet, num_extra) do {\
|
||||
OUT_BATCH(cmdpacket3(R300_CMD_PACKET3_RAW)); \
|
||||
OUT_BATCH(CP_PACKET3((packet), (num_extra))); \
|
||||
} while(0)
|
||||
|
||||
#define efloat(f) e32(r300PackFloat32(f))
|
||||
|
||||
#define vsf_start_fragment(dest, length) \
|
||||
do { \
|
||||
int _n; \
|
||||
_n = (length); \
|
||||
cmd = (drm_radeon_cmd_header_t*) \
|
||||
r300AllocCmdBuf(rmesa, \
|
||||
(_n+1), \
|
||||
__FUNCTION__); \
|
||||
cmd_reserved = _n+2; \
|
||||
cmd_written =1; \
|
||||
cmd[0].i = cmdvpu((dest), _n/4); \
|
||||
} while (0);
|
||||
|
||||
#define r500fp_start_fragment(dest, length) \
|
||||
do { \
|
||||
int _n; \
|
||||
_n = (length); \
|
||||
cmd = (drm_radeon_cmd_header_t*) \
|
||||
r300AllocCmdBuf(rmesa, \
|
||||
(_n+1), \
|
||||
__FUNCTION__); \
|
||||
cmd_reserved = _n+1; \
|
||||
cmd_written =1; \
|
||||
cmd[0].i = cmdr500fp((dest), _n/6, 0, 0); \
|
||||
} while (0);
|
||||
|
||||
#define start_packet3(packet, count) \
|
||||
{ \
|
||||
int _n; \
|
||||
GLuint _p; \
|
||||
_n = (count); \
|
||||
_p = (packet); \
|
||||
cmd = (drm_radeon_cmd_header_t*) \
|
||||
r300AllocCmdBuf(rmesa, \
|
||||
(_n+3), \
|
||||
__FUNCTION__); \
|
||||
cmd_reserved = _n+3; \
|
||||
cmd_written = 2; \
|
||||
if(_n > 0x3fff) { \
|
||||
fprintf(stderr,"Too big packet3 %08x: cannot " \
|
||||
"store %d dwords\n", \
|
||||
_p, _n); \
|
||||
_mesa_exit(-1); \
|
||||
} \
|
||||
cmd[0].i = cmdpacket3(R300_CMD_PACKET3_RAW); \
|
||||
cmd[1].i = _p | ((_n & 0x3fff)<<16); \
|
||||
}
|
||||
|
||||
/**
|
||||
* Must be sent to switch to 2d commands
|
||||
*/
|
||||
void static INLINE end_3d(r300ContextPtr rmesa)
|
||||
{
|
||||
drm_radeon_cmd_header_t *cmd = NULL;
|
||||
BATCH_LOCALS(rmesa);
|
||||
|
||||
cmd =
|
||||
(drm_radeon_cmd_header_t *) r300AllocCmdBuf(rmesa, 1, __FUNCTION__);
|
||||
cmd[0].header.cmd_type = R300_CMD_END3D;
|
||||
BEGIN_BATCH(1);
|
||||
OUT_BATCH(cmdpacify());
|
||||
END_BATCH();
|
||||
}
|
||||
|
||||
void static INLINE cp_delay(r300ContextPtr rmesa, unsigned short count)
|
||||
{
|
||||
drm_radeon_cmd_header_t *cmd = NULL;
|
||||
BATCH_LOCALS(rmesa);
|
||||
|
||||
cmd =
|
||||
(drm_radeon_cmd_header_t *) r300AllocCmdBuf(rmesa, 1, __FUNCTION__);
|
||||
cmd[0].i = cmdcpdelay(count);
|
||||
BEGIN_BATCH(1);
|
||||
OUT_BATCH(cmdcpdelay(count));
|
||||
END_BATCH();
|
||||
}
|
||||
|
||||
void static INLINE cp_wait(r300ContextPtr rmesa, unsigned char flags)
|
||||
{
|
||||
drm_radeon_cmd_header_t *cmd = NULL;
|
||||
BATCH_LOCALS(rmesa);
|
||||
|
||||
cmd =
|
||||
(drm_radeon_cmd_header_t *) r300AllocCmdBuf(rmesa, 1, __FUNCTION__);
|
||||
cmd[0].i = cmdwait(flags);
|
||||
BEGIN_BATCH(1);
|
||||
OUT_BATCH(cmdwait(flags));
|
||||
END_BATCH();
|
||||
}
|
||||
|
||||
extern int r300EmitArrays(GLcontext * ctx);
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
void r300UseArrays(GLcontext * ctx);
|
||||
#endif
|
||||
|
||||
extern void r300ReleaseArrays(GLcontext * ctx);
|
||||
extern int r300PrimitiveType(r300ContextPtr rmesa, int prim);
|
||||
extern int r300NumVerts(r300ContextPtr rmesa, int num_verts, int prim);
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -39,20 +39,11 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "r300_context.h"
|
||||
#include "radeon_drm.h"
|
||||
|
||||
extern GLboolean r300IsGartMemory(r300ContextPtr rmesa,
|
||||
const GLvoid * pointer, GLint size);
|
||||
|
||||
extern GLuint r300GartOffsetFromVirtual(r300ContextPtr rmesa,
|
||||
const GLvoid * pointer);
|
||||
|
||||
extern void r300Flush(GLcontext * ctx);
|
||||
|
||||
extern void r300ReleaseDmaRegion(r300ContextPtr rmesa,
|
||||
struct r300_dma_region *region,
|
||||
const char *caller);
|
||||
extern void r300AllocDmaRegion(r300ContextPtr rmesa,
|
||||
struct r300_dma_region *region, int bytes,
|
||||
int alignment);
|
||||
struct radeon_bo **pbo, int *poffset,
|
||||
int bytes, int alignment);
|
||||
|
||||
extern void r300InitIoctlFuncs(struct dd_function_table *functions);
|
||||
|
||||
|
|
|
|||
|
|
@ -1,385 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2005 Aapo Tahkola.
|
||||
*
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* \file
|
||||
*
|
||||
* \author Aapo Tahkola <aet@rasterburn.org>
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include "r300_context.h"
|
||||
#include "r300_cmdbuf.h"
|
||||
#include "r300_ioctl.h"
|
||||
#include "r300_mem.h"
|
||||
#include "radeon_ioctl.h"
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
|
||||
static void resize_u_list(r300ContextPtr rmesa)
|
||||
{
|
||||
void *temp;
|
||||
int nsize;
|
||||
|
||||
temp = rmesa->rmm->u_list;
|
||||
nsize = rmesa->rmm->u_size * 2;
|
||||
|
||||
rmesa->rmm->u_list = _mesa_malloc(nsize * sizeof(*rmesa->rmm->u_list));
|
||||
_mesa_memset(rmesa->rmm->u_list, 0,
|
||||
nsize * sizeof(*rmesa->rmm->u_list));
|
||||
|
||||
if (temp) {
|
||||
r300FlushCmdBuf(rmesa, __FUNCTION__);
|
||||
|
||||
_mesa_memcpy(rmesa->rmm->u_list, temp,
|
||||
rmesa->rmm->u_size * sizeof(*rmesa->rmm->u_list));
|
||||
_mesa_free(temp);
|
||||
}
|
||||
|
||||
rmesa->rmm->u_size = nsize;
|
||||
}
|
||||
|
||||
void r300_mem_init(r300ContextPtr rmesa)
|
||||
{
|
||||
rmesa->rmm = malloc(sizeof(struct r300_memory_manager));
|
||||
memset(rmesa->rmm, 0, sizeof(struct r300_memory_manager));
|
||||
|
||||
rmesa->rmm->u_size = 128;
|
||||
resize_u_list(rmesa);
|
||||
}
|
||||
|
||||
void r300_mem_destroy(r300ContextPtr rmesa)
|
||||
{
|
||||
_mesa_free(rmesa->rmm->u_list);
|
||||
rmesa->rmm->u_list = NULL;
|
||||
|
||||
_mesa_free(rmesa->rmm);
|
||||
rmesa->rmm = NULL;
|
||||
}
|
||||
|
||||
void *r300_mem_ptr(r300ContextPtr rmesa, int id)
|
||||
{
|
||||
assert(id <= rmesa->rmm->u_last);
|
||||
return rmesa->rmm->u_list[id].ptr;
|
||||
}
|
||||
|
||||
int r300_mem_find(r300ContextPtr rmesa, void *ptr)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 1; i < rmesa->rmm->u_size + 1; i++)
|
||||
if (rmesa->rmm->u_list[i].ptr &&
|
||||
ptr >= rmesa->rmm->u_list[i].ptr &&
|
||||
ptr <
|
||||
rmesa->rmm->u_list[i].ptr + rmesa->rmm->u_list[i].size)
|
||||
break;
|
||||
|
||||
if (i < rmesa->rmm->u_size + 1)
|
||||
return i;
|
||||
|
||||
fprintf(stderr, "%p failed\n", ptr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
//#define MM_DEBUG
|
||||
int r300_mem_alloc(r300ContextPtr rmesa, int alignment, int size)
|
||||
{
|
||||
drm_radeon_mem_alloc_t alloc;
|
||||
int offset = 0, ret;
|
||||
int i, free = -1;
|
||||
int done_age;
|
||||
drm_radeon_mem_free_t memfree;
|
||||
int tries = 0;
|
||||
static int bytes_wasted = 0, allocated = 0;
|
||||
|
||||
if (size < 4096)
|
||||
bytes_wasted += 4096 - size;
|
||||
|
||||
allocated += size;
|
||||
|
||||
#if 0
|
||||
static int t = 0;
|
||||
if (t != time(NULL)) {
|
||||
t = time(NULL);
|
||||
fprintf(stderr, "slots used %d, wasted %d kb, allocated %d\n",
|
||||
rmesa->rmm->u_last, bytes_wasted / 1024,
|
||||
allocated / 1024);
|
||||
}
|
||||
#endif
|
||||
|
||||
memfree.region = RADEON_MEM_REGION_GART;
|
||||
|
||||
again:
|
||||
|
||||
done_age = radeonGetAge((radeonContextPtr) rmesa);
|
||||
|
||||
if (rmesa->rmm->u_last + 1 >= rmesa->rmm->u_size)
|
||||
resize_u_list(rmesa);
|
||||
|
||||
for (i = rmesa->rmm->u_last + 1; i > 0; i--) {
|
||||
if (rmesa->rmm->u_list[i].ptr == NULL) {
|
||||
free = i;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (rmesa->rmm->u_list[i].h_pending == 0 &&
|
||||
rmesa->rmm->u_list[i].pending
|
||||
&& rmesa->rmm->u_list[i].age <= done_age) {
|
||||
memfree.region_offset =
|
||||
(char *)rmesa->rmm->u_list[i].ptr -
|
||||
(char *)rmesa->radeon.radeonScreen->gartTextures.
|
||||
map;
|
||||
|
||||
ret =
|
||||
drmCommandWrite(rmesa->radeon.radeonScreen->
|
||||
driScreen->fd, DRM_RADEON_FREE,
|
||||
&memfree, sizeof(memfree));
|
||||
|
||||
if (ret) {
|
||||
fprintf(stderr, "Failed to free at %p\n",
|
||||
rmesa->rmm->u_list[i].ptr);
|
||||
fprintf(stderr, "ret = %s\n", strerror(-ret));
|
||||
exit(1);
|
||||
} else {
|
||||
#ifdef MM_DEBUG
|
||||
fprintf(stderr, "really freed %d at age %x\n",
|
||||
i,
|
||||
radeonGetAge((radeonContextPtr) rmesa));
|
||||
#endif
|
||||
if (i == rmesa->rmm->u_last)
|
||||
rmesa->rmm->u_last--;
|
||||
|
||||
if (rmesa->rmm->u_list[i].size < 4096)
|
||||
bytes_wasted -=
|
||||
4096 - rmesa->rmm->u_list[i].size;
|
||||
|
||||
allocated -= rmesa->rmm->u_list[i].size;
|
||||
rmesa->rmm->u_list[i].pending = 0;
|
||||
rmesa->rmm->u_list[i].ptr = NULL;
|
||||
free = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
rmesa->rmm->u_head = i;
|
||||
|
||||
if (free == -1) {
|
||||
WARN_ONCE("Ran out of slots!\n");
|
||||
//usleep(100);
|
||||
r300FlushCmdBuf(rmesa, __FUNCTION__);
|
||||
tries++;
|
||||
if (tries > 100) {
|
||||
WARN_ONCE("Ran out of slots!\n");
|
||||
exit(1);
|
||||
}
|
||||
goto again;
|
||||
}
|
||||
|
||||
alloc.region = RADEON_MEM_REGION_GART;
|
||||
alloc.alignment = alignment;
|
||||
alloc.size = size;
|
||||
alloc.region_offset = &offset;
|
||||
|
||||
ret =
|
||||
drmCommandWriteRead(rmesa->radeon.dri.fd, DRM_RADEON_ALLOC, &alloc,
|
||||
sizeof(alloc));
|
||||
if (ret) {
|
||||
#if 0
|
||||
WARN_ONCE("Ran out of mem!\n");
|
||||
r300FlushCmdBuf(rmesa, __FUNCTION__);
|
||||
//usleep(100);
|
||||
tries2++;
|
||||
tries = 0;
|
||||
if (tries2 > 100) {
|
||||
WARN_ONCE("Ran out of GART memory!\n");
|
||||
exit(1);
|
||||
}
|
||||
goto again;
|
||||
#else
|
||||
WARN_ONCE
|
||||
("Ran out of GART memory (for %d)!\nPlease consider adjusting GARTSize option.\n",
|
||||
size);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
i = free;
|
||||
|
||||
if (i > rmesa->rmm->u_last)
|
||||
rmesa->rmm->u_last = i;
|
||||
|
||||
rmesa->rmm->u_list[i].ptr =
|
||||
((GLubyte *) rmesa->radeon.radeonScreen->gartTextures.map) + offset;
|
||||
rmesa->rmm->u_list[i].size = size;
|
||||
rmesa->rmm->u_list[i].age = 0;
|
||||
//fprintf(stderr, "alloc %p at id %d\n", rmesa->rmm->u_list[i].ptr, i);
|
||||
|
||||
#ifdef MM_DEBUG
|
||||
fprintf(stderr, "allocated %d at age %x\n", i,
|
||||
radeonGetAge((radeonContextPtr) rmesa));
|
||||
#endif
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
void r300_mem_use(r300ContextPtr rmesa, int id)
|
||||
{
|
||||
uint64_t ull;
|
||||
#ifdef MM_DEBUG
|
||||
fprintf(stderr, "%s: %d at age %x\n", __FUNCTION__, id,
|
||||
radeonGetAge((radeonContextPtr) rmesa));
|
||||
#endif
|
||||
drm_r300_cmd_header_t *cmd;
|
||||
|
||||
assert(id <= rmesa->rmm->u_last);
|
||||
|
||||
if (id == 0)
|
||||
return;
|
||||
|
||||
cmd =
|
||||
(drm_r300_cmd_header_t *) r300AllocCmdBuf(rmesa,
|
||||
2 + sizeof(ull) / 4,
|
||||
__FUNCTION__);
|
||||
cmd[0].scratch.cmd_type = R300_CMD_SCRATCH;
|
||||
cmd[0].scratch.reg = R300_MEM_SCRATCH;
|
||||
cmd[0].scratch.n_bufs = 1;
|
||||
cmd[0].scratch.flags = 0;
|
||||
cmd++;
|
||||
|
||||
ull = (uint64_t) (intptr_t) & rmesa->rmm->u_list[id].age;
|
||||
_mesa_memcpy(cmd, &ull, sizeof(ull));
|
||||
cmd += sizeof(ull) / 4;
|
||||
|
||||
cmd[0].u = /*id */ 0;
|
||||
|
||||
LOCK_HARDWARE(&rmesa->radeon); /* Protect from DRM. */
|
||||
rmesa->rmm->u_list[id].h_pending++;
|
||||
UNLOCK_HARDWARE(&rmesa->radeon);
|
||||
}
|
||||
|
||||
unsigned long r300_mem_offset(r300ContextPtr rmesa, int id)
|
||||
{
|
||||
unsigned long offset;
|
||||
|
||||
assert(id <= rmesa->rmm->u_last);
|
||||
|
||||
offset = (char *)rmesa->rmm->u_list[id].ptr -
|
||||
(char *)rmesa->radeon.radeonScreen->gartTextures.map;
|
||||
offset += rmesa->radeon.radeonScreen->gart_texture_offset;
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
void *r300_mem_map(r300ContextPtr rmesa, int id, int access)
|
||||
{
|
||||
#ifdef MM_DEBUG
|
||||
fprintf(stderr, "%s: %d at age %x\n", __FUNCTION__, id,
|
||||
radeonGetAge((radeonContextPtr) rmesa));
|
||||
#endif
|
||||
void *ptr;
|
||||
int tries = 0;
|
||||
|
||||
assert(id <= rmesa->rmm->u_last);
|
||||
|
||||
if (access == R300_MEM_R) {
|
||||
|
||||
if (rmesa->rmm->u_list[id].mapped == 1)
|
||||
WARN_ONCE("buffer %d already mapped\n", id);
|
||||
|
||||
rmesa->rmm->u_list[id].mapped = 1;
|
||||
ptr = r300_mem_ptr(rmesa, id);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
if (rmesa->rmm->u_list[id].h_pending)
|
||||
r300FlushCmdBuf(rmesa, __FUNCTION__);
|
||||
|
||||
if (rmesa->rmm->u_list[id].h_pending) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
while (rmesa->rmm->u_list[id].age >
|
||||
radeonGetAge((radeonContextPtr) rmesa) && tries++ < 1000)
|
||||
usleep(10);
|
||||
|
||||
if (tries >= 1000) {
|
||||
fprintf(stderr, "Idling failed (%x vs %x)\n",
|
||||
rmesa->rmm->u_list[id].age,
|
||||
radeonGetAge((radeonContextPtr) rmesa));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (rmesa->rmm->u_list[id].mapped == 1)
|
||||
WARN_ONCE("buffer %d already mapped\n", id);
|
||||
|
||||
rmesa->rmm->u_list[id].mapped = 1;
|
||||
ptr = r300_mem_ptr(rmesa, id);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void r300_mem_unmap(r300ContextPtr rmesa, int id)
|
||||
{
|
||||
#ifdef MM_DEBUG
|
||||
fprintf(stderr, "%s: %d at age %x\n", __FUNCTION__, id,
|
||||
radeonGetAge((radeonContextPtr) rmesa));
|
||||
#endif
|
||||
|
||||
assert(id <= rmesa->rmm->u_last);
|
||||
|
||||
if (rmesa->rmm->u_list[id].mapped == 0)
|
||||
WARN_ONCE("buffer %d not mapped\n", id);
|
||||
|
||||
rmesa->rmm->u_list[id].mapped = 0;
|
||||
}
|
||||
|
||||
void r300_mem_free(r300ContextPtr rmesa, int id)
|
||||
{
|
||||
#ifdef MM_DEBUG
|
||||
fprintf(stderr, "%s: %d at age %x\n", __FUNCTION__, id,
|
||||
radeonGetAge((radeonContextPtr) rmesa));
|
||||
#endif
|
||||
|
||||
assert(id <= rmesa->rmm->u_last);
|
||||
|
||||
if (id == 0)
|
||||
return;
|
||||
|
||||
if (rmesa->rmm->u_list[id].ptr == NULL) {
|
||||
WARN_ONCE("Not allocated!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (rmesa->rmm->u_list[id].pending) {
|
||||
WARN_ONCE("%p already pended!\n", rmesa->rmm->u_list[id].ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
rmesa->rmm->u_list[id].pending = 1;
|
||||
}
|
||||
#endif
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
#ifndef __R300_MEM_H__
|
||||
#define __R300_MEM_H__
|
||||
|
||||
//#define R300_MEM_PDL 0
|
||||
#define R300_MEM_UL 1
|
||||
|
||||
#define R300_MEM_R 1
|
||||
#define R300_MEM_W 2
|
||||
#define R300_MEM_RW (R300_MEM_R | R300_MEM_W)
|
||||
|
||||
#define R300_MEM_SCRATCH 2
|
||||
|
||||
struct r300_memory_manager {
|
||||
struct {
|
||||
void *ptr;
|
||||
uint32_t size;
|
||||
uint32_t age;
|
||||
uint32_t h_pending;
|
||||
int pending;
|
||||
int mapped;
|
||||
} *u_list;
|
||||
int u_head, u_size, u_last;
|
||||
|
||||
};
|
||||
|
||||
extern void r300_mem_init(r300ContextPtr rmesa);
|
||||
extern void r300_mem_destroy(r300ContextPtr rmesa);
|
||||
extern void *r300_mem_ptr(r300ContextPtr rmesa, int id);
|
||||
extern int r300_mem_find(r300ContextPtr rmesa, void *ptr);
|
||||
extern int r300_mem_alloc(r300ContextPtr rmesa, int alignment, int size);
|
||||
extern void r300_mem_use(r300ContextPtr rmesa, int id);
|
||||
extern unsigned long r300_mem_offset(r300ContextPtr rmesa, int id);
|
||||
extern void *r300_mem_map(r300ContextPtr rmesa, int id, int access);
|
||||
extern void r300_mem_unmap(r300ContextPtr rmesa, int id);
|
||||
extern void r300_mem_free(r300ContextPtr rmesa, int id);
|
||||
|
||||
#endif
|
||||
316
src/mesa/drivers/dri/r300/r300_mipmap_tree.c
Normal file
316
src/mesa/drivers/dri/r300/r300_mipmap_tree.c
Normal file
|
|
@ -0,0 +1,316 @@
|
|||
/*
|
||||
* Copyright (C) 2008 Nicolai Haehnle.
|
||||
*
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "r300_mipmap_tree.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "main/simple_list.h"
|
||||
#include "main/texcompress.h"
|
||||
#include "main/texformat.h"
|
||||
|
||||
#include "radeon_buffer.h"
|
||||
|
||||
static GLuint r300_compressed_texture_size(GLcontext *ctx,
|
||||
GLsizei width, GLsizei height, GLsizei depth,
|
||||
GLuint mesaFormat)
|
||||
{
|
||||
GLuint size = _mesa_compressed_texture_size(ctx, width, height, depth, mesaFormat);
|
||||
|
||||
if (mesaFormat == MESA_FORMAT_RGB_DXT1 ||
|
||||
mesaFormat == MESA_FORMAT_RGBA_DXT1) {
|
||||
if (width + 3 < 8) /* width one block */
|
||||
size = size * 4;
|
||||
else if (width + 3 < 16)
|
||||
size = size * 2;
|
||||
} else {
|
||||
/* DXT3/5, 16 bytes per block */
|
||||
WARN_ONCE("DXT 3/5 suffers from multitexturing problems!\n");
|
||||
if (width + 3 < 8)
|
||||
size = size * 2;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute sizes and fill in offset and blit information for the given
|
||||
* image (determined by \p face and \p level).
|
||||
*
|
||||
* \param curOffset points to the offset at which the image is to be stored
|
||||
* and is updated by this function according to the size of the image.
|
||||
*/
|
||||
static void compute_tex_image_offset(r300_mipmap_tree *mt,
|
||||
GLuint face, GLuint level, GLuint* curOffset)
|
||||
{
|
||||
r300_mipmap_level *lvl = &mt->levels[level];
|
||||
|
||||
/* Find image size in bytes */
|
||||
if (mt->compressed) {
|
||||
/* TODO: Is this correct? Need test cases for compressed textures! */
|
||||
GLuint align;
|
||||
|
||||
if (mt->target == GL_TEXTURE_RECTANGLE_NV)
|
||||
align = 64 / mt->bpp;
|
||||
else
|
||||
align = 32 / mt->bpp;
|
||||
lvl->rowstride = (lvl->width + align - 1) & ~(align - 1);
|
||||
lvl->size = r300_compressed_texture_size(mt->r300->radeon.glCtx,
|
||||
lvl->width, lvl->height, lvl->depth, mt->compressed);
|
||||
} else if (mt->target == GL_TEXTURE_RECTANGLE_NV) {
|
||||
lvl->rowstride = (lvl->width * mt->bpp + 63) & ~63;
|
||||
lvl->size = lvl->rowstride * lvl->height;
|
||||
} else if (mt->tilebits & R300_TXO_MICRO_TILE) {
|
||||
/* tile pattern is 16 bytes x2. mipmaps stay 32 byte aligned,
|
||||
* though the actual offset may be different (if texture is less than
|
||||
* 32 bytes width) to the untiled case */
|
||||
lvl->rowstride = (lvl->width * mt->bpp * 2 + 31) & ~31;
|
||||
lvl->size = lvl->rowstride * ((lvl->height + 1) / 2) * lvl->depth;
|
||||
} else {
|
||||
lvl->rowstride = (lvl->width * mt->bpp + 31) & ~31;
|
||||
lvl->size = lvl->rowstride * lvl->height * lvl->depth;
|
||||
}
|
||||
assert(lvl->size > 0);
|
||||
|
||||
/* All images are aligned to a 32-byte offset */
|
||||
*curOffset = (*curOffset + 0x1f) & ~0x1f;
|
||||
lvl->faces[face].offset = *curOffset;
|
||||
*curOffset += lvl->size;
|
||||
}
|
||||
|
||||
static GLuint minify(GLuint size, GLuint levels)
|
||||
{
|
||||
size = size >> levels;
|
||||
if (size < 1)
|
||||
size = 1;
|
||||
return size;
|
||||
}
|
||||
|
||||
static void calculate_miptree_layout(r300_mipmap_tree *mt)
|
||||
{
|
||||
GLuint curOffset;
|
||||
GLuint numLevels;
|
||||
GLuint i;
|
||||
|
||||
numLevels = mt->lastLevel - mt->firstLevel + 1;
|
||||
assert(numLevels <= RADEON_MAX_TEXTURE_LEVELS);
|
||||
|
||||
curOffset = 0;
|
||||
for(i = 0; i < numLevels; i++) {
|
||||
GLuint face;
|
||||
|
||||
mt->levels[i].width = minify(mt->width0, i);
|
||||
mt->levels[i].height = minify(mt->height0, i);
|
||||
mt->levels[i].depth = minify(mt->depth0, i);
|
||||
|
||||
for(face = 0; face < mt->faces; face++)
|
||||
compute_tex_image_offset(mt, face, i, &curOffset);
|
||||
}
|
||||
|
||||
/* Note the required size in memory */
|
||||
mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create a new mipmap tree, calculate its layout and allocate memory.
|
||||
*/
|
||||
r300_mipmap_tree* r300_miptree_create(r300ContextPtr rmesa, r300TexObj *t,
|
||||
GLenum target, GLuint firstLevel, GLuint lastLevel,
|
||||
GLuint width0, GLuint height0, GLuint depth0,
|
||||
GLuint bpp, GLuint tilebits, GLuint compressed)
|
||||
{
|
||||
r300_mipmap_tree *mt = CALLOC_STRUCT(_r300_mipmap_tree);
|
||||
|
||||
mt->r300 = rmesa;
|
||||
mt->refcount = 1;
|
||||
mt->t = t;
|
||||
mt->target = target;
|
||||
mt->faces = (target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
|
||||
mt->firstLevel = firstLevel;
|
||||
mt->lastLevel = lastLevel;
|
||||
mt->width0 = width0;
|
||||
mt->height0 = height0;
|
||||
mt->depth0 = depth0;
|
||||
mt->bpp = bpp;
|
||||
mt->tilebits = tilebits;
|
||||
mt->compressed = compressed;
|
||||
|
||||
calculate_miptree_layout(mt);
|
||||
|
||||
mt->bo = radeon_bo_open(rmesa->radeon.radeonScreen->bom, 0, mt->totalsize, 1024, 0);
|
||||
|
||||
return mt;
|
||||
}
|
||||
|
||||
void r300_miptree_reference(r300_mipmap_tree *mt)
|
||||
{
|
||||
mt->refcount++;
|
||||
assert(mt->refcount > 0);
|
||||
}
|
||||
|
||||
void r300_miptree_unreference(r300_mipmap_tree *mt)
|
||||
{
|
||||
if (!mt)
|
||||
return;
|
||||
|
||||
assert(mt->refcount > 0);
|
||||
mt->refcount--;
|
||||
if (!mt->refcount) {
|
||||
radeon_bo_unref(mt->bo);
|
||||
free(mt);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void calculate_first_last_level(struct gl_texture_object *tObj,
|
||||
GLuint *pfirstLevel, GLuint *plastLevel)
|
||||
{
|
||||
const struct gl_texture_image * const baseImage =
|
||||
tObj->Image[0][tObj->BaseLevel];
|
||||
|
||||
/* These must be signed values. MinLod and MaxLod can be negative numbers,
|
||||
* and having firstLevel and lastLevel as signed prevents the need for
|
||||
* extra sign checks.
|
||||
*/
|
||||
int firstLevel;
|
||||
int lastLevel;
|
||||
|
||||
/* Yes, this looks overly complicated, but it's all needed.
|
||||
*/
|
||||
switch (tObj->Target) {
|
||||
case GL_TEXTURE_1D:
|
||||
case GL_TEXTURE_2D:
|
||||
case GL_TEXTURE_3D:
|
||||
case GL_TEXTURE_CUBE_MAP:
|
||||
if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) {
|
||||
/* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
|
||||
*/
|
||||
firstLevel = lastLevel = tObj->BaseLevel;
|
||||
} else {
|
||||
firstLevel = tObj->BaseLevel + (GLint)(tObj->MinLod + 0.5);
|
||||
firstLevel = MAX2(firstLevel, tObj->BaseLevel);
|
||||
firstLevel = MIN2(firstLevel, tObj->BaseLevel + baseImage->MaxLog2);
|
||||
lastLevel = tObj->BaseLevel + (GLint)(tObj->MaxLod + 0.5);
|
||||
lastLevel = MAX2(lastLevel, tObj->BaseLevel);
|
||||
lastLevel = MIN2(lastLevel, tObj->BaseLevel + baseImage->MaxLog2);
|
||||
lastLevel = MIN2(lastLevel, tObj->MaxLevel);
|
||||
lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */
|
||||
}
|
||||
break;
|
||||
case GL_TEXTURE_RECTANGLE_NV:
|
||||
case GL_TEXTURE_4D_SGIS:
|
||||
firstLevel = lastLevel = 0;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
/* save these values */
|
||||
*pfirstLevel = firstLevel;
|
||||
*plastLevel = lastLevel;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Checks whether the given miptree can hold the given texture image at the
|
||||
* given face and level.
|
||||
*/
|
||||
GLboolean r300_miptree_matches_image(r300_mipmap_tree *mt,
|
||||
struct gl_texture_image *texImage, GLuint face, GLuint level)
|
||||
{
|
||||
r300_mipmap_level *lvl;
|
||||
|
||||
if (face >= mt->faces || level < mt->firstLevel || level > mt->lastLevel)
|
||||
return GL_FALSE;
|
||||
|
||||
if (texImage->TexFormat->TexelBytes != mt->bpp)
|
||||
return GL_FALSE;
|
||||
|
||||
lvl = &mt->levels[level - mt->firstLevel];
|
||||
if (lvl->width != texImage->Width ||
|
||||
lvl->height != texImage->Height ||
|
||||
lvl->depth != texImage->Depth)
|
||||
return GL_FALSE;
|
||||
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Checks whether the given miptree has the right format to store the given texture object.
|
||||
*/
|
||||
GLboolean r300_miptree_matches_texture(r300_mipmap_tree *mt, struct gl_texture_object *texObj)
|
||||
{
|
||||
struct gl_texture_image *firstImage;
|
||||
GLuint compressed;
|
||||
GLuint numfaces = 1;
|
||||
GLuint firstLevel, lastLevel;
|
||||
|
||||
calculate_first_last_level(texObj, &firstLevel, &lastLevel);
|
||||
if (texObj->Target == GL_TEXTURE_CUBE_MAP)
|
||||
numfaces = 6;
|
||||
|
||||
firstImage = texObj->Image[0][firstLevel];
|
||||
compressed = firstImage->IsCompressed ? firstImage->TexFormat->MesaFormat : 0;
|
||||
|
||||
return (mt->firstLevel == firstLevel &&
|
||||
mt->lastLevel == lastLevel &&
|
||||
mt->width0 == firstImage->Width &&
|
||||
mt->height0 == firstImage->Height &&
|
||||
mt->depth0 == firstImage->Depth &&
|
||||
mt->bpp == firstImage->TexFormat->TexelBytes &&
|
||||
mt->compressed == compressed);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Try to allocate a mipmap tree for the given texture that will fit the
|
||||
* given image in the given position.
|
||||
*/
|
||||
void r300_try_alloc_miptree(r300ContextPtr rmesa, r300TexObj *t,
|
||||
struct gl_texture_image *texImage, GLuint face, GLuint level)
|
||||
{
|
||||
GLuint compressed = texImage->IsCompressed ? texImage->TexFormat->MesaFormat : 0;
|
||||
GLuint numfaces = 1;
|
||||
GLuint firstLevel, lastLevel;
|
||||
|
||||
assert(!t->mt);
|
||||
|
||||
calculate_first_last_level(&t->base, &firstLevel, &lastLevel);
|
||||
if (t->base.Target == GL_TEXTURE_CUBE_MAP)
|
||||
numfaces = 6;
|
||||
|
||||
if (level != firstLevel || face >= numfaces)
|
||||
return;
|
||||
|
||||
t->mt = r300_miptree_create(rmesa, t, t->base.Target,
|
||||
firstLevel, lastLevel,
|
||||
texImage->Width, texImage->Height, texImage->Depth,
|
||||
texImage->TexFormat->TexelBytes, t->tile_bits, compressed);
|
||||
}
|
||||
97
src/mesa/drivers/dri/r300/r300_mipmap_tree.h
Normal file
97
src/mesa/drivers/dri/r300/r300_mipmap_tree.h
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Copyright (C) 2008 Nicolai Haehnle.
|
||||
*
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __R300_MIPMAP_TREE_H_
|
||||
#define __R300_MIPMAP_TREE_H_
|
||||
|
||||
#include "r300_context.h"
|
||||
|
||||
typedef struct _r300_mipmap_tree r300_mipmap_tree;
|
||||
typedef struct _r300_mipmap_level r300_mipmap_level;
|
||||
typedef struct _r300_mipmap_image r300_mipmap_image;
|
||||
|
||||
struct _r300_mipmap_image {
|
||||
GLuint offset; /** Offset of this image from the start of mipmap tree buffer, in bytes */
|
||||
};
|
||||
|
||||
struct _r300_mipmap_level {
|
||||
GLuint width;
|
||||
GLuint height;
|
||||
GLuint depth;
|
||||
GLuint size; /** Size of each image, in bytes */
|
||||
GLuint rowstride; /** in bytes */
|
||||
r300_mipmap_image faces[6];
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* A mipmap tree contains texture images in the layout that the hardware
|
||||
* expects.
|
||||
*
|
||||
* The meta-data of mipmap trees is immutable, i.e. you cannot change the
|
||||
* layout on-the-fly; however, the texture contents (i.e. texels) can be
|
||||
* changed.
|
||||
*/
|
||||
struct _r300_mipmap_tree {
|
||||
r300ContextPtr r300;
|
||||
r300TexObj *t;
|
||||
struct radeon_bo *bo;
|
||||
GLuint refcount;
|
||||
|
||||
GLuint totalsize; /** total size of the miptree, in bytes */
|
||||
|
||||
GLenum target; /** GL_TEXTURE_xxx */
|
||||
GLuint faces; /** # of faces: 6 for cubemaps, 1 otherwise */
|
||||
GLuint firstLevel; /** First mip level stored in this mipmap tree */
|
||||
GLuint lastLevel; /** Last mip level stored in this mipmap tree */
|
||||
|
||||
GLuint width0; /** Width of firstLevel image */
|
||||
GLuint height0; /** Height of firstLevel image */
|
||||
GLuint depth0; /** Depth of firstLevel image */
|
||||
|
||||
GLuint bpp; /** Bytes per texel */
|
||||
GLuint tilebits; /** R300_TXO_xxx_TILE */
|
||||
GLuint compressed; /** MESA_FORMAT_xxx indicating a compressed format, or 0 if uncompressed */
|
||||
|
||||
r300_mipmap_level levels[RADEON_MAX_TEXTURE_LEVELS];
|
||||
};
|
||||
|
||||
r300_mipmap_tree* r300_miptree_create(r300ContextPtr rmesa, r300TexObj *t,
|
||||
GLenum target, GLuint firstLevel, GLuint lastLevel,
|
||||
GLuint width0, GLuint height0, GLuint depth0,
|
||||
GLuint bpp, GLuint tilebits, GLuint compressed);
|
||||
void r300_miptree_reference(r300_mipmap_tree *mt);
|
||||
void r300_miptree_unreference(r300_mipmap_tree *mt);
|
||||
|
||||
GLboolean r300_miptree_matches_image(r300_mipmap_tree *mt,
|
||||
struct gl_texture_image *texImage, GLuint face, GLuint level);
|
||||
GLboolean r300_miptree_matches_texture(r300_mipmap_tree *mt, struct gl_texture_object *texObj);
|
||||
void r300_try_alloc_miptree(r300ContextPtr rmesa, r300TexObj *t,
|
||||
struct gl_texture_image *texImage, GLuint face, GLuint level);
|
||||
|
||||
|
||||
#endif /* __R300_MIPMAP_TREE_H_ */
|
||||
|
|
@ -1525,6 +1525,13 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
# define R500_SEL_FILTER4_TC3 (3 << 18)
|
||||
|
||||
#define R300_TX_OFFSET_0 0x4540
|
||||
#define R300_TX_OFFSET_1 0x4544
|
||||
#define R300_TX_OFFSET_2 0x4548
|
||||
#define R300_TX_OFFSET_3 0x454C
|
||||
#define R300_TX_OFFSET_4 0x4550
|
||||
#define R300_TX_OFFSET_5 0x4554
|
||||
#define R300_TX_OFFSET_6 0x4558
|
||||
#define R300_TX_OFFSET_7 0x455C
|
||||
/* BEGIN: Guess from R200 */
|
||||
# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
|
||||
# define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
|
||||
|
|
|
|||
|
|
@ -175,89 +175,91 @@ int r300NumVerts(r300ContextPtr rmesa, int num_verts, int prim)
|
|||
static void r300EmitElts(GLcontext * ctx, void *elts, unsigned long n_elts)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
struct r300_dma_region *rvb = &rmesa->state.elt_dma;
|
||||
void *out;
|
||||
|
||||
if (r300IsGartMemory(rmesa, elts, n_elts * 4)) {
|
||||
rvb->address = rmesa->radeon.radeonScreen->gartTextures.map;
|
||||
rvb->start = ((char *)elts) - rvb->address;
|
||||
rvb->aos_offset =
|
||||
rmesa->radeon.radeonScreen->gart_texture_offset +
|
||||
rvb->start;
|
||||
return;
|
||||
} else if (r300IsGartMemory(rmesa, elts, 1)) {
|
||||
WARN_ONCE("Pointer not within GART memory!\n");
|
||||
_mesa_exit(-1);
|
||||
}
|
||||
|
||||
r300AllocDmaRegion(rmesa, rvb, n_elts * 4, 4);
|
||||
rvb->aos_offset = GET_START(rvb);
|
||||
|
||||
out = rvb->address + rvb->start;
|
||||
rmesa->state.elt_dma_bo = radeon_bo_open(rmesa->radeon.radeonScreen->bom,
|
||||
0, n_elts * 4, 4,
|
||||
RADEON_GEM_DOMAIN_GTT);
|
||||
rmesa->state.elt_dma_offset = 0;
|
||||
radeon_bo_map(rmesa->state.elt_dma_bo, 1);
|
||||
out = rmesa->state.elt_dma_bo->ptr + rmesa->state.elt_dma_offset;
|
||||
memcpy(out, elts, n_elts * 4);
|
||||
radeon_bo_unmap(rmesa->state.elt_dma_bo);
|
||||
}
|
||||
|
||||
static void r300FireEB(r300ContextPtr rmesa, unsigned long addr,
|
||||
int vertex_count, int type)
|
||||
static void r300FireEB(r300ContextPtr rmesa, int vertex_count, int type)
|
||||
{
|
||||
int cmd_reserved = 0;
|
||||
int cmd_written = 0;
|
||||
drm_radeon_cmd_header_t *cmd = NULL;
|
||||
BATCH_LOCALS(rmesa);
|
||||
|
||||
start_packet3(CP_PACKET3(R300_PACKET3_3D_DRAW_INDX_2, 0), 0);
|
||||
e32(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (vertex_count << 16) | type | R300_VAP_VF_CNTL__INDEX_SIZE_32bit);
|
||||
if (vertex_count > 0) {
|
||||
BEGIN_BATCH(8);
|
||||
OUT_BATCH_PACKET3(R300_PACKET3_3D_DRAW_INDX_2, 0);
|
||||
OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_INDICES |
|
||||
((vertex_count + 0) << 16) |
|
||||
type |
|
||||
R300_VAP_VF_CNTL__INDEX_SIZE_32bit);
|
||||
|
||||
start_packet3(CP_PACKET3(R300_PACKET3_INDX_BUFFER, 2), 2);
|
||||
e32(R300_EB_UNK1 | (0 << 16) | R300_EB_UNK2);
|
||||
e32(addr);
|
||||
e32(vertex_count);
|
||||
OUT_BATCH_PACKET3(R300_PACKET3_INDX_BUFFER, 2);
|
||||
OUT_BATCH(R300_EB_UNK1 | (0 << 16) | R300_EB_UNK2);
|
||||
OUT_BATCH_RELOC(0, rmesa->state.elt_dma_bo,
|
||||
rmesa->state.elt_dma_offset, 0);
|
||||
OUT_BATCH(vertex_count);
|
||||
END_BATCH();
|
||||
}
|
||||
}
|
||||
|
||||
static void r300EmitAOS(r300ContextPtr rmesa, GLuint nr, GLuint offset)
|
||||
{
|
||||
BATCH_LOCALS(rmesa);
|
||||
uint32_t voffset;
|
||||
int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
|
||||
int i;
|
||||
int cmd_reserved = 0;
|
||||
int cmd_written = 0;
|
||||
drm_radeon_cmd_header_t *cmd = NULL;
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_VERTS)
|
||||
fprintf(stderr, "%s: nr=%d, ofs=0x%08x\n", __FUNCTION__, nr,
|
||||
offset);
|
||||
|
||||
start_packet3(CP_PACKET3(R300_PACKET3_3D_LOAD_VBPNTR, sz - 1), sz - 1);
|
||||
e32(nr);
|
||||
BEGIN_BATCH(sz+2);
|
||||
OUT_BATCH_PACKET3(R300_PACKET3_3D_LOAD_VBPNTR, sz - 1);
|
||||
OUT_BATCH(nr);
|
||||
|
||||
for (i = 0; i + 1 < nr; i += 2) {
|
||||
e32((rmesa->state.aos[i].aos_size << 0) |
|
||||
(rmesa->state.aos[i].aos_stride << 8) |
|
||||
(rmesa->state.aos[i + 1].aos_size << 16) |
|
||||
(rmesa->state.aos[i + 1].aos_stride << 24));
|
||||
OUT_BATCH((rmesa->state.aos[i].components << 0) |
|
||||
(rmesa->state.aos[i].stride << 8) |
|
||||
(rmesa->state.aos[i + 1].components << 16) |
|
||||
(rmesa->state.aos[i + 1].stride << 24));
|
||||
|
||||
e32(rmesa->state.aos[i].aos_offset + offset * 4 * rmesa->state.aos[i].aos_stride);
|
||||
e32(rmesa->state.aos[i + 1].aos_offset + offset * 4 * rmesa->state.aos[i + 1].aos_stride);
|
||||
voffset = rmesa->state.aos[i + 0].offset +
|
||||
offset * 4 * rmesa->state.aos[i + 0].stride;
|
||||
OUT_BATCH_RELOC(0, rmesa->state.aos[i].bo, voffset, 0);
|
||||
voffset = rmesa->state.aos[i + 1].offset +
|
||||
offset * 4 * rmesa->state.aos[i + 1].stride;
|
||||
OUT_BATCH_RELOC(0, rmesa->state.aos[i+1].bo, voffset, 0);
|
||||
}
|
||||
|
||||
if (nr & 1) {
|
||||
e32((rmesa->state.aos[nr - 1].aos_size << 0) |
|
||||
(rmesa->state.aos[nr - 1].aos_stride << 8));
|
||||
e32(rmesa->state.aos[nr - 1].aos_offset + offset * 4 * rmesa->state.aos[nr - 1].aos_stride);
|
||||
OUT_BATCH((rmesa->state.aos[nr - 1].components << 0) |
|
||||
(rmesa->state.aos[nr - 1].stride << 8));
|
||||
OUT_BATCH_RELOC(0, rmesa->state.aos[nr - 1].bo,
|
||||
rmesa->state.aos[nr - 1].offset + offset * 4 * rmesa->state.aos[nr - 1].stride, 0);
|
||||
}
|
||||
END_BATCH();
|
||||
}
|
||||
|
||||
static void r300FireAOS(r300ContextPtr rmesa, int vertex_count, int type)
|
||||
{
|
||||
int cmd_reserved = 0;
|
||||
int cmd_written = 0;
|
||||
drm_radeon_cmd_header_t *cmd = NULL;
|
||||
BATCH_LOCALS(rmesa);
|
||||
|
||||
start_packet3(CP_PACKET3(R300_PACKET3_3D_DRAW_VBUF_2, 0), 0);
|
||||
e32(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (vertex_count << 16) | type);
|
||||
BEGIN_BATCH(3);
|
||||
OUT_BATCH_PACKET3(R300_PACKET3_3D_DRAW_VBUF_2, 0);
|
||||
OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (vertex_count << 16) | type);
|
||||
END_BATCH();
|
||||
}
|
||||
|
||||
static void r300RunRenderPrimitive(r300ContextPtr rmesa, GLcontext * ctx,
|
||||
int start, int end, int prim)
|
||||
{
|
||||
BATCH_LOCALS(rmesa);
|
||||
int type, num_verts;
|
||||
TNLcontext *tnl = TNL_CONTEXT(ctx);
|
||||
struct vertex_buffer *vb = &tnl->vb;
|
||||
|
|
@ -268,6 +270,12 @@ static void r300RunRenderPrimitive(r300ContextPtr rmesa, GLcontext * ctx,
|
|||
if (type < 0 || num_verts <= 0)
|
||||
return;
|
||||
|
||||
/* Make space for at least 64 dwords.
|
||||
* This is supposed to ensure that we can get all rendering
|
||||
* commands into a single command buffer.
|
||||
*/
|
||||
r300EnsureCmdBufSpace(rmesa, 64, __FUNCTION__);
|
||||
|
||||
if (vb->Elts) {
|
||||
if (num_verts > 65535) {
|
||||
/* not implemented yet */
|
||||
|
|
@ -287,11 +295,12 @@ static void r300RunRenderPrimitive(r300ContextPtr rmesa, GLcontext * ctx,
|
|||
*/
|
||||
r300EmitElts(ctx, vb->Elts, num_verts);
|
||||
r300EmitAOS(rmesa, rmesa->state.aos_count, start);
|
||||
r300FireEB(rmesa, rmesa->state.elt_dma.aos_offset, num_verts, type);
|
||||
r300FireEB(rmesa, num_verts, type);
|
||||
} else {
|
||||
r300EmitAOS(rmesa, rmesa->state.aos_count, start);
|
||||
r300FireAOS(rmesa, num_verts, type);
|
||||
}
|
||||
COMMIT_BATCH();
|
||||
}
|
||||
|
||||
static GLboolean r300RunRender(GLcontext * ctx,
|
||||
|
|
@ -302,7 +311,6 @@ static GLboolean r300RunRender(GLcontext * ctx,
|
|||
TNLcontext *tnl = TNL_CONTEXT(ctx);
|
||||
struct vertex_buffer *vb = &tnl->vb;
|
||||
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_PRIMS)
|
||||
fprintf(stderr, "%s\n", __FUNCTION__);
|
||||
|
||||
|
|
@ -324,10 +332,6 @@ static GLboolean r300RunRender(GLcontext * ctx,
|
|||
|
||||
r300EmitCacheFlush(rmesa);
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
r300UseArrays(ctx);
|
||||
#endif
|
||||
|
||||
r300ReleaseArrays(ctx);
|
||||
|
||||
return GL_FALSE;
|
||||
|
|
|
|||
|
|
@ -55,6 +55,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
|
||||
#include "radeon_ioctl.h"
|
||||
#include "radeon_state.h"
|
||||
#include "radeon_buffer.h"
|
||||
#include "r300_context.h"
|
||||
#include "r300_ioctl.h"
|
||||
#include "r300_state.h"
|
||||
|
|
@ -1144,41 +1145,26 @@ void r300UpdateViewportOffset(GLcontext * ctx)
|
|||
void r300UpdateDrawBuffer(GLcontext * ctx)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
r300ContextPtr r300 = rmesa;
|
||||
struct gl_framebuffer *fb = ctx->DrawBuffer;
|
||||
driRenderbuffer *drb;
|
||||
struct radeon_renderbuffer *rrb;
|
||||
|
||||
if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
|
||||
/* draw to front */
|
||||
drb =
|
||||
(driRenderbuffer *) fb->Attachment[BUFFER_FRONT_LEFT].
|
||||
Renderbuffer;
|
||||
rrb =
|
||||
(void *) fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
|
||||
} else if (fb->_ColorDrawBufferIndexes[0] == BUFFER_BACK_LEFT) {
|
||||
/* draw to back */
|
||||
drb =
|
||||
(driRenderbuffer *) fb->Attachment[BUFFER_BACK_LEFT].
|
||||
Renderbuffer;
|
||||
rrb = (void *) fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
|
||||
} else {
|
||||
/* drawing to multiple buffers, or none */
|
||||
return;
|
||||
}
|
||||
|
||||
assert(drb);
|
||||
assert(drb->flippedPitch);
|
||||
assert(rrb);
|
||||
assert(rrb->pitch);
|
||||
|
||||
R300_STATECHANGE(rmesa, cb);
|
||||
|
||||
r300->hw.cb.cmd[R300_CB_OFFSET] = drb->flippedOffset + //r300->radeon.state.color.drawOffset +
|
||||
r300->radeon.radeonScreen->fbLocation;
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] = drb->flippedPitch; //r300->radeon.state.color.drawPitch;
|
||||
|
||||
if (r300->radeon.radeonScreen->cpp == 4)
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] |= R300_COLOR_FORMAT_ARGB8888;
|
||||
else
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] |= R300_COLOR_FORMAT_RGB565;
|
||||
|
||||
if (r300->radeon.sarea->tiling_enabled)
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] |= R300_COLOR_TILE_ENABLE;
|
||||
#if 0
|
||||
R200_STATECHANGE(rmesa, ctx);
|
||||
|
||||
|
|
@ -1497,14 +1483,9 @@ static void r300SetupTextures(GLcontext * ctx)
|
|||
/* We cannot let disabled tmu offsets pass DRM */
|
||||
for (i = 0; i < mtu; i++) {
|
||||
if (ctx->Texture.Unit[i]._ReallyEnabled) {
|
||||
|
||||
#if 0 /* Enables old behaviour */
|
||||
hw_tmu = i;
|
||||
#endif
|
||||
tmu_mappings[i] = hw_tmu;
|
||||
|
||||
t = r300->state.texture.unit[i].texobj;
|
||||
/* XXX questionable fix for bug 9170: */
|
||||
t = r300_tex_obj(ctx->Texture.Unit[i]._Current);
|
||||
if (!t)
|
||||
continue;
|
||||
|
||||
|
|
@ -1530,21 +1511,20 @@ static void r300SetupTextures(GLcontext * ctx)
|
|||
*/
|
||||
r300->hw.tex.filter_1.cmd[R300_TEX_VALUE_0 + hw_tmu] =
|
||||
t->filter_1 |
|
||||
translate_lod_bias(ctx->Texture.Unit[i].LodBias + t->base.tObj->LodBias);
|
||||
translate_lod_bias(ctx->Texture.Unit[i].LodBias + t->base.LodBias);
|
||||
r300->hw.tex.size.cmd[R300_TEX_VALUE_0 + hw_tmu] =
|
||||
t->size;
|
||||
r300->hw.tex.format.cmd[R300_TEX_VALUE_0 +
|
||||
hw_tmu] = t->format;
|
||||
r300->hw.tex.pitch.cmd[R300_TEX_VALUE_0 + hw_tmu] =
|
||||
t->pitch_reg;
|
||||
r300->hw.tex.offset.cmd[R300_TEX_VALUE_0 +
|
||||
hw_tmu] = t->offset;
|
||||
r300->hw.textures[hw_tmu] = t;
|
||||
|
||||
if (t->offset & R300_TXO_MACRO_TILE) {
|
||||
if (t->tile_bits & R300_TXO_MACRO_TILE) {
|
||||
WARN_ONCE("macro tiling enabled!\n");
|
||||
}
|
||||
|
||||
if (t->offset & R300_TXO_MICRO_TILE) {
|
||||
if (t->tile_bits & R300_TXO_MICRO_TILE) {
|
||||
WARN_ONCE("micro tiling enabled!\n");
|
||||
}
|
||||
|
||||
|
|
@ -2223,8 +2203,6 @@ static void r300ResetHwState(r300ContextPtr r300)
|
|||
|
||||
r300UpdateCulling(ctx);
|
||||
|
||||
r300UpdateTextureState(ctx);
|
||||
|
||||
r300SetBlendState(ctx);
|
||||
r300SetLogicOpState(ctx);
|
||||
|
||||
|
|
@ -2371,20 +2349,6 @@ static void r300ResetHwState(r300ContextPtr r300)
|
|||
|
||||
r300BlendColor(ctx, ctx->Color.BlendColor);
|
||||
|
||||
/* Again, r300ClearBuffer uses this */
|
||||
r300->hw.cb.cmd[R300_CB_OFFSET] =
|
||||
r300->radeon.state.color.drawOffset +
|
||||
r300->radeon.radeonScreen->fbLocation;
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] = r300->radeon.state.color.drawPitch;
|
||||
|
||||
if (r300->radeon.radeonScreen->cpp == 4)
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] |= R300_COLOR_FORMAT_ARGB8888;
|
||||
else
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] |= R300_COLOR_FORMAT_RGB565;
|
||||
|
||||
if (r300->radeon.sarea->tiling_enabled)
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] |= R300_COLOR_TILE_ENABLE;
|
||||
|
||||
r300->hw.rb3d_dither_ctl.cmd[1] = 0;
|
||||
r300->hw.rb3d_dither_ctl.cmd[2] = 0;
|
||||
r300->hw.rb3d_dither_ctl.cmd[3] = 0;
|
||||
|
|
@ -2400,10 +2364,6 @@ static void r300ResetHwState(r300ContextPtr r300)
|
|||
r300->hw.rb3d_discard_src_pixel_lte_threshold.cmd[1] = 0x00000000;
|
||||
r300->hw.rb3d_discard_src_pixel_lte_threshold.cmd[2] = 0xffffffff;
|
||||
|
||||
r300->hw.zb.cmd[R300_ZB_OFFSET] =
|
||||
r300->radeon.radeonScreen->depthOffset +
|
||||
r300->radeon.radeonScreen->fbLocation;
|
||||
r300->hw.zb.cmd[R300_ZB_PITCH] = r300->radeon.radeonScreen->depthPitch;
|
||||
|
||||
if (r300->radeon.sarea->tiling_enabled) {
|
||||
/* XXX: Turn off when clearing buffers ? */
|
||||
|
|
@ -2675,7 +2635,7 @@ void r300UpdateShaderStates(r300ContextPtr rmesa)
|
|||
GLcontext *ctx;
|
||||
ctx = rmesa->radeon.glCtx;
|
||||
|
||||
r300UpdateTextureState(ctx);
|
||||
r300ValidateTextures(ctx);
|
||||
r300SetEarlyZState(ctx);
|
||||
|
||||
GLuint fgdepthsrc = R300_FG_DEPTH_SRC_SCAN;
|
||||
|
|
|
|||
|
|
@ -39,8 +39,6 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
|
||||
#define R300_NEWPRIM( rmesa ) \
|
||||
do { \
|
||||
if ( rmesa->dma.flush ) \
|
||||
rmesa->dma.flush( rmesa ); \
|
||||
} while (0)
|
||||
|
||||
#define R300_STATECHANGE(r300, atom) \
|
||||
|
|
@ -57,13 +55,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
TODO: This has not been implemented yet
|
||||
*/
|
||||
#define R300_FIREVERTICES( r300 ) \
|
||||
do { \
|
||||
\
|
||||
if ( (r300)->cmdbuf.count_used || (r300)->dma.flush ) { \
|
||||
r300Flush( (r300)->radeon.glCtx ); \
|
||||
} \
|
||||
\
|
||||
} while (0)
|
||||
do { \
|
||||
r300Flush( (r300)->radeon.glCtx ); \
|
||||
} while (0)
|
||||
|
||||
// r300_state.c
|
||||
extern int future_hw_tcl_on;
|
||||
|
|
|
|||
|
|
@ -56,12 +56,11 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "r300_state.h"
|
||||
#include "r300_ioctl.h"
|
||||
#include "r300_emit.h"
|
||||
#include "r300_mem.h"
|
||||
|
||||
static void flush_last_swtcl_prim( r300ContextPtr rmesa );
|
||||
|
||||
|
||||
void r300EmitVertexAOS(r300ContextPtr rmesa, GLuint vertex_size, GLuint offset);
|
||||
void r300EmitVertexAOS(r300ContextPtr rmesa, GLuint vertex_size, struct radeon_bo *bo, GLuint offset);
|
||||
void r300EmitVbufPrim(r300ContextPtr rmesa, GLuint primitive, GLuint vertex_nr);
|
||||
#define EMIT_ATTR( ATTR, STYLE ) \
|
||||
do { \
|
||||
|
|
@ -86,7 +85,6 @@ static void r300SetVertexFormat( GLcontext *ctx )
|
|||
DECLARE_RENDERINPUTS(index_bitset);
|
||||
GLuint InputsRead = 0, OutputsWritten = 0;
|
||||
int vap_fmt_0 = 0;
|
||||
int vap_vte_cntl = 0;
|
||||
int offset = 0;
|
||||
int vte = 0;
|
||||
GLint inputs[VERT_ATTRIB_MAX];
|
||||
|
|
@ -175,7 +173,7 @@ static void r300SetVertexFormat( GLcontext *ctx )
|
|||
inputs[i] = -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Fixed, apply to vir0 only */
|
||||
if (InputsRead & (1 << VERT_ATTRIB_POS))
|
||||
inputs[VERT_ATTRIB_POS] = 0;
|
||||
|
|
@ -186,16 +184,16 @@ static void r300SetVertexFormat( GLcontext *ctx )
|
|||
for (i = VERT_ATTRIB_TEX0; i <= VERT_ATTRIB_TEX7; i++)
|
||||
if (InputsRead & (1 << i))
|
||||
inputs[i] = 6 + (i - VERT_ATTRIB_TEX0);
|
||||
|
||||
|
||||
for (i = 0, nr = 0; i < VERT_ATTRIB_MAX; i++) {
|
||||
if (InputsRead & (1 << i)) {
|
||||
tab[nr++] = i;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
int ci;
|
||||
|
||||
|
||||
swizzle[i][0] = SWIZZLE_ZERO;
|
||||
swizzle[i][1] = SWIZZLE_ZERO;
|
||||
swizzle[i][2] = SWIZZLE_ZERO;
|
||||
|
|
@ -215,21 +213,21 @@ static void r300SetVertexFormat( GLcontext *ctx )
|
|||
((drm_r300_cmd_header_t *) rmesa->hw.vir[1].cmd)->packet0.count =
|
||||
r300VAPInputRoute1(&rmesa->hw.vir[1].cmd[R300_VIR_CNTL_0], swizzle,
|
||||
nr);
|
||||
|
||||
|
||||
R300_STATECHANGE(rmesa, vic);
|
||||
rmesa->hw.vic.cmd[R300_VIC_CNTL_0] = r300VAPInputCntl0(ctx, InputsRead);
|
||||
rmesa->hw.vic.cmd[R300_VIC_CNTL_1] = r300VAPInputCntl1(ctx, InputsRead);
|
||||
|
||||
|
||||
R300_STATECHANGE(rmesa, vof);
|
||||
rmesa->hw.vof.cmd[R300_VOF_CNTL_0] = r300VAPOutputCntl0(ctx, OutputsWritten);
|
||||
rmesa->hw.vof.cmd[R300_VOF_CNTL_1] = vap_fmt_1;
|
||||
|
||||
|
||||
rmesa->swtcl.vertex_size =
|
||||
_tnl_install_attrs( ctx,
|
||||
rmesa->swtcl.vertex_attrs,
|
||||
rmesa->swtcl.vertex_attrs,
|
||||
rmesa->swtcl.vertex_attr_count,
|
||||
NULL, 0 );
|
||||
|
||||
|
||||
rmesa->swtcl.vertex_size /= 4;
|
||||
|
||||
RENDERINPUTS_COPY( rmesa->tnl_index_bitset, index_bitset );
|
||||
|
|
@ -247,37 +245,22 @@ static void flush_last_swtcl_prim( r300ContextPtr rmesa )
|
|||
{
|
||||
if (RADEON_DEBUG & DEBUG_IOCTL)
|
||||
fprintf(stderr, "%s\n", __FUNCTION__);
|
||||
|
||||
rmesa->dma.flush = NULL;
|
||||
|
||||
if (rmesa->dma.current.buf) {
|
||||
struct r300_dma_region *current = &rmesa->dma.current;
|
||||
GLuint current_offset = GET_START(current);
|
||||
|
||||
assert (current->start +
|
||||
rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
|
||||
current->ptr);
|
||||
|
||||
if (rmesa->dma.current.start != rmesa->dma.current.ptr) {
|
||||
|
||||
r300EnsureCmdBufSpace( rmesa, rmesa->hw.max_state_size + (12*sizeof(int)), __FUNCTION__);
|
||||
|
||||
r300EmitState(rmesa);
|
||||
|
||||
r300EmitVertexAOS( rmesa,
|
||||
rmesa->swtcl.vertex_size,
|
||||
current_offset);
|
||||
|
||||
r300EmitVbufPrim( rmesa,
|
||||
rmesa->swtcl.hw_primitive,
|
||||
rmesa->swtcl.numverts);
|
||||
|
||||
r300EmitCacheFlush(rmesa);
|
||||
}
|
||||
|
||||
rmesa->swtcl.numverts = 0;
|
||||
current->start = current->ptr;
|
||||
}
|
||||
rmesa->swtcl.flush = NULL;
|
||||
radeon_bo_unmap(rmesa->swtcl.bo);
|
||||
r300EnsureCmdBufSpace(rmesa,
|
||||
rmesa->hw.max_state_size + (12*sizeof(int)),
|
||||
__FUNCTION__);
|
||||
r300EmitState(rmesa);
|
||||
r300EmitVertexAOS(rmesa,
|
||||
rmesa->swtcl.vertex_size,
|
||||
rmesa->swtcl.bo,
|
||||
0);
|
||||
r300EmitVbufPrim(rmesa,
|
||||
rmesa->swtcl.hw_primitive,
|
||||
rmesa->swtcl.numverts);
|
||||
r300EmitCacheFlush(rmesa);
|
||||
COMMIT_BATCH();
|
||||
rmesa->swtcl.numverts = 0;
|
||||
}
|
||||
|
||||
/* Alloc space in the current dma region.
|
||||
|
|
@ -287,26 +270,14 @@ r300AllocDmaLowVerts( r300ContextPtr rmesa, int nverts, int vsize )
|
|||
{
|
||||
GLuint bytes = vsize * nverts;
|
||||
|
||||
if ( rmesa->dma.current.ptr + bytes > rmesa->dma.current.end )
|
||||
r300RefillCurrentDmaRegion( rmesa, bytes);
|
||||
|
||||
if (!rmesa->dma.flush) {
|
||||
rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
|
||||
rmesa->dma.flush = flush_last_swtcl_prim;
|
||||
}
|
||||
|
||||
ASSERT( vsize == rmesa->swtcl.vertex_size * 4 );
|
||||
ASSERT( rmesa->dma.flush == flush_last_swtcl_prim );
|
||||
ASSERT( rmesa->dma.current.start +
|
||||
rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
|
||||
rmesa->dma.current.ptr );
|
||||
|
||||
{
|
||||
GLubyte *head = (GLubyte *) (rmesa->dma.current.address + rmesa->dma.current.ptr);
|
||||
rmesa->dma.current.ptr += bytes;
|
||||
rmesa->swtcl.numverts += nverts;
|
||||
return head;
|
||||
}
|
||||
rmesa->swtcl.bo = radeon_bo_open(rmesa->radeon.radeonScreen->bom,
|
||||
0, bytes, 4, RADEON_GEM_DOMAIN_GTT);
|
||||
radeon_bo_map(rmesa->swtcl.bo, 1);
|
||||
if (rmesa->swtcl.flush == NULL) {
|
||||
rmesa->radeon.glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
|
||||
rmesa->swtcl.flush = flush_last_swtcl_prim;
|
||||
}
|
||||
return rmesa->swtcl.bo->ptr;
|
||||
}
|
||||
|
||||
static GLuint reduced_prim[] = {
|
||||
|
|
@ -352,7 +323,7 @@ static void r300RenderPrimitive( GLcontext *ctx, GLenum prim );
|
|||
r300ContextPtr rmesa = R300_CONTEXT(ctx); \
|
||||
const char *r300verts = (char *)rmesa->swtcl.verts;
|
||||
#define VERT(x) (r300Vertex *)(r300verts + ((x) * vertsize * sizeof(int)))
|
||||
#define VERTEX r300Vertex
|
||||
#define VERTEX r300Vertex
|
||||
#define DO_DEBUG_VERTS (1 && (RADEON_DEBUG & DEBUG_VERTS))
|
||||
#define PRINT_VERTEX(x)
|
||||
#undef TAG
|
||||
|
|
@ -572,18 +543,16 @@ static void r300RenderStart(GLcontext *ctx)
|
|||
r300ContextPtr rmesa = R300_CONTEXT( ctx );
|
||||
// fprintf(stderr, "%s\n", __FUNCTION__);
|
||||
|
||||
r300ChooseRenderState(ctx);
|
||||
r300ChooseRenderState(ctx);
|
||||
r300SetVertexFormat(ctx);
|
||||
|
||||
r300UpdateShaders(rmesa);
|
||||
r300UpdateShaderStates(rmesa);
|
||||
|
||||
r300EmitCacheFlush(rmesa);
|
||||
|
||||
if (rmesa->dma.flush != 0 &&
|
||||
rmesa->dma.flush != flush_last_swtcl_prim)
|
||||
rmesa->dma.flush( rmesa );
|
||||
|
||||
if (rmesa->swtcl.flush != NULL) {
|
||||
rmesa->swtcl.flush(rmesa);
|
||||
}
|
||||
}
|
||||
|
||||
static void r300RenderFinish(GLcontext *ctx)
|
||||
|
|
@ -593,7 +562,7 @@ static void r300RenderFinish(GLcontext *ctx)
|
|||
static void r300RasterPrimitive( GLcontext *ctx, GLuint hwprim )
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
|
||||
|
||||
if (rmesa->swtcl.hw_primitive != hwprim) {
|
||||
R300_NEWPRIM( rmesa );
|
||||
rmesa->swtcl.hw_primitive = hwprim;
|
||||
|
|
@ -611,7 +580,7 @@ static void r300RenderPrimitive(GLcontext *ctx, GLenum prim)
|
|||
|
||||
r300RasterPrimitive( ctx, reduced_prim[prim] );
|
||||
// fprintf(stderr, "%s\n", __FUNCTION__);
|
||||
|
||||
|
||||
}
|
||||
|
||||
static void r300ResetLineStipple(GLcontext *ctx)
|
||||
|
|
@ -625,12 +594,12 @@ void r300InitSwtcl(GLcontext *ctx)
|
|||
TNLcontext *tnl = TNL_CONTEXT(ctx);
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
static int firsttime = 1;
|
||||
|
||||
|
||||
if (firsttime) {
|
||||
init_rast_tab();
|
||||
firsttime = 0;
|
||||
}
|
||||
|
||||
|
||||
tnl->Driver.Render.Start = r300RenderStart;
|
||||
tnl->Driver.Render.Finish = r300RenderFinish;
|
||||
tnl->Driver.Render.PrimitiveNotify = r300RenderPrimitive;
|
||||
|
|
@ -638,15 +607,15 @@ void r300InitSwtcl(GLcontext *ctx)
|
|||
tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
|
||||
tnl->Driver.Render.CopyPV = _tnl_copy_pv;
|
||||
tnl->Driver.Render.Interp = _tnl_interp;
|
||||
|
||||
|
||||
/* FIXME: what are these numbers? */
|
||||
_tnl_init_vertices( ctx, ctx->Const.MaxArrayLockSize + 12,
|
||||
_tnl_init_vertices( ctx, ctx->Const.MaxArrayLockSize + 12,
|
||||
48 * sizeof(GLfloat) );
|
||||
|
||||
|
||||
rmesa->swtcl.verts = (GLubyte *)tnl->clipspace.vertex_buf;
|
||||
rmesa->swtcl.RenderIndex = ~0;
|
||||
rmesa->swtcl.render_primitive = GL_TRIANGLES;
|
||||
rmesa->swtcl.hw_primitive = 0;
|
||||
rmesa->swtcl.hw_primitive = 0;
|
||||
|
||||
_tnl_invalidate_vertex_state( ctx, ~0 );
|
||||
_tnl_invalidate_vertices( ctx, ~0 );
|
||||
|
|
@ -655,9 +624,9 @@ void r300InitSwtcl(GLcontext *ctx)
|
|||
_tnl_need_projected_coords( ctx, GL_FALSE );
|
||||
r300ChooseRenderState(ctx);
|
||||
|
||||
_mesa_validate_all_lighting_tables( ctx );
|
||||
_mesa_validate_all_lighting_tables( ctx );
|
||||
|
||||
tnl->Driver.NotifyMaterialChange =
|
||||
tnl->Driver.NotifyMaterialChange =
|
||||
_mesa_validate_all_lighting_tables;
|
||||
}
|
||||
|
||||
|
|
@ -665,33 +634,32 @@ void r300DestroySwtcl(GLcontext *ctx)
|
|||
{
|
||||
}
|
||||
|
||||
void r300EmitVertexAOS(r300ContextPtr rmesa, GLuint vertex_size, GLuint offset)
|
||||
void r300EmitVertexAOS(r300ContextPtr rmesa, GLuint vertex_size, struct radeon_bo *bo, GLuint offset)
|
||||
{
|
||||
int cmd_reserved = 0;
|
||||
int cmd_written = 0;
|
||||
BATCH_LOCALS(rmesa);
|
||||
|
||||
drm_radeon_cmd_header_t *cmd = NULL;
|
||||
if (RADEON_DEBUG & DEBUG_VERTS)
|
||||
fprintf(stderr, "%s: vertex_size %d, offset 0x%x \n",
|
||||
__FUNCTION__, vertex_size, offset);
|
||||
fprintf(stderr, "%s: vertex_size %d, offset 0x%x \n",
|
||||
__FUNCTION__, vertex_size, offset);
|
||||
|
||||
start_packet3(CP_PACKET3(R300_PACKET3_3D_LOAD_VBPNTR, 2), 2);
|
||||
e32(1);
|
||||
e32(vertex_size | (vertex_size << 8));
|
||||
e32(offset);
|
||||
BEGIN_BATCH(5);
|
||||
OUT_BATCH_PACKET3(R300_PACKET3_3D_LOAD_VBPNTR, 2);
|
||||
OUT_BATCH(1);
|
||||
OUT_BATCH(vertex_size | (vertex_size << 8));
|
||||
OUT_BATCH_RELOC(0, bo, offset, 0);
|
||||
END_BATCH();
|
||||
}
|
||||
|
||||
void r300EmitVbufPrim(r300ContextPtr rmesa, GLuint primitive, GLuint vertex_nr)
|
||||
{
|
||||
|
||||
int cmd_reserved = 0;
|
||||
int cmd_written = 0;
|
||||
BATCH_LOCALS(rmesa);
|
||||
int type, num_verts;
|
||||
drm_radeon_cmd_header_t *cmd = NULL;
|
||||
|
||||
type = r300PrimitiveType(rmesa, primitive);
|
||||
num_verts = r300NumVerts(rmesa, vertex_nr, primitive);
|
||||
|
||||
start_packet3(CP_PACKET3(R300_PACKET3_3D_DRAW_VBUF_2, 0), 0);
|
||||
e32(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (num_verts << 16) | type);
|
||||
|
||||
BEGIN_BATCH(3);
|
||||
OUT_BATCH_PACKET3(R300_PACKET3_3D_DRAW_VBUF_2, 0);
|
||||
OUT_BATCH(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST | (num_verts << 16) | type);
|
||||
END_BATCH();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "main/context.h"
|
||||
#include "main/enums.h"
|
||||
#include "main/image.h"
|
||||
#include "main/mipmap.h"
|
||||
#include "main/simple_list.h"
|
||||
#include "main/texformat.h"
|
||||
#include "main/texstore.h"
|
||||
|
|
@ -49,6 +50,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "r300_context.h"
|
||||
#include "r300_state.h"
|
||||
#include "r300_ioctl.h"
|
||||
#include "r300_mipmap_tree.h"
|
||||
#include "r300_tex.h"
|
||||
|
||||
#include "xmlpool.h"
|
||||
|
|
@ -79,7 +81,7 @@ static unsigned int translate_wrap_mode(GLenum wrapmode)
|
|||
*/
|
||||
static void r300UpdateTexWrap(r300TexObjPtr t)
|
||||
{
|
||||
struct gl_texture_object *tObj = t->base.tObj;
|
||||
struct gl_texture_object *tObj = &t->base;
|
||||
|
||||
t->filter &=
|
||||
~(R300_TX_WRAP_S_MASK | R300_TX_WRAP_T_MASK | R300_TX_WRAP_R_MASK);
|
||||
|
|
@ -119,6 +121,9 @@ static GLuint aniso_filter(GLfloat anisotropy)
|
|||
*/
|
||||
static void r300SetTexFilter(r300TexObjPtr t, GLenum minf, GLenum magf, GLfloat anisotropy)
|
||||
{
|
||||
/* Force revalidation to account for switches from/to mipmapping. */
|
||||
t->validated = GL_FALSE;
|
||||
|
||||
t->filter &= ~(R300_TX_MIN_FILTER_MASK | R300_TX_MIN_FILTER_MIP_MASK | R300_TX_MAG_FILTER_MASK | R300_TX_MAX_ANISO_MASK);
|
||||
t->filter_1 &= ~R300_EDGE_ANISO_EDGE_ONLY;
|
||||
|
||||
|
|
@ -176,39 +181,6 @@ static void r300SetTexBorderColor(r300TexObjPtr t, GLubyte c[4])
|
|||
t->pp_border_color = PACK_COLOR_8888(c[3], c[0], c[1], c[2]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate space for and load the mesa images into the texture memory block.
|
||||
* This will happen before drawing with a new texture, or drawing with a
|
||||
* texture after it was swapped out or teximaged again.
|
||||
*/
|
||||
|
||||
static r300TexObjPtr r300AllocTexObj(struct gl_texture_object *texObj)
|
||||
{
|
||||
r300TexObjPtr t;
|
||||
|
||||
t = CALLOC_STRUCT(r300_tex_obj);
|
||||
texObj->DriverData = t;
|
||||
if (t != NULL) {
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE) {
|
||||
fprintf(stderr, "%s( %p, %p )\n", __FUNCTION__,
|
||||
(void *)texObj, (void *)t);
|
||||
}
|
||||
|
||||
/* Initialize non-image-dependent parts of the state:
|
||||
*/
|
||||
t->base.tObj = texObj;
|
||||
t->border_fallback = GL_FALSE;
|
||||
|
||||
make_empty_list(&t->base);
|
||||
|
||||
r300UpdateTexWrap(t);
|
||||
r300SetTexFilter(t, texObj->MinFilter, texObj->MagFilter, texObj->MaxAnisotropy);
|
||||
r300SetTexBorderColor(t, texObj->_BorderChan);
|
||||
}
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
/* try to find a format which will only need a memcopy */
|
||||
static const struct gl_texture_format *r300Choose8888TexFormat(GLenum srcFormat,
|
||||
GLenum srcType)
|
||||
|
|
@ -434,97 +406,218 @@ static const struct gl_texture_format *r300ChooseTextureFormat(GLcontext * ctx,
|
|||
return NULL; /* never get here */
|
||||
}
|
||||
|
||||
static GLboolean
|
||||
r300ValidateClientStorage(GLcontext * ctx, GLenum target,
|
||||
GLint internalFormat,
|
||||
GLint srcWidth, GLint srcHeight,
|
||||
GLenum format, GLenum type, const void *pixels,
|
||||
const struct gl_pixelstore_attrib *packing,
|
||||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
|
||||
/**
|
||||
* Allocate an empty texture image object.
|
||||
*/
|
||||
static struct gl_texture_image *r300NewTextureImage(GLcontext *ctx)
|
||||
{
|
||||
return CALLOC(sizeof(r300_texture_image));
|
||||
}
|
||||
|
||||
/**
|
||||
* Free memory associated with this texture image.
|
||||
*/
|
||||
static void r300FreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
|
||||
{
|
||||
r300_texture_image* image = get_r300_texture_image(timage);
|
||||
|
||||
if (image->mt) {
|
||||
r300_miptree_unreference(image->mt);
|
||||
image->mt = 0;
|
||||
assert(!image->base.Data);
|
||||
} else {
|
||||
_mesa_free_texture_image_data(ctx, timage);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Set Data pointer and additional data for mapped texture image */
|
||||
static void teximage_set_map_data(r300_texture_image *image)
|
||||
{
|
||||
r300_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
|
||||
image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
|
||||
image->base.RowStride = lvl->rowstride / image->mt->bpp;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Map a single texture image for glTexImage and friends.
|
||||
*/
|
||||
static void r300_teximage_map(r300_texture_image *image, GLboolean write_enable)
|
||||
{
|
||||
if (image->mt) {
|
||||
assert(!image->base.Data);
|
||||
|
||||
radeon_bo_map(image->mt->bo, write_enable);
|
||||
teximage_set_map_data(image);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void r300_teximage_unmap(r300_texture_image *image)
|
||||
{
|
||||
if (image->mt) {
|
||||
assert(image->base.Data);
|
||||
|
||||
image->base.Data = 0;
|
||||
radeon_bo_unmap(image->mt->bo);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Map a validated texture for reading during software rendering.
|
||||
*/
|
||||
static void r300MapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
|
||||
{
|
||||
r300TexObj* t = r300_tex_obj(texObj);
|
||||
int face, level;
|
||||
|
||||
assert(texObj->_Complete);
|
||||
assert(t->mt);
|
||||
|
||||
radeon_bo_map(t->mt->bo, GL_FALSE);
|
||||
for(face = 0; face < t->mt->faces; ++face) {
|
||||
for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
|
||||
teximage_set_map_data(get_r300_texture_image(texObj->Image[face][level]));
|
||||
}
|
||||
}
|
||||
|
||||
static void r300UnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
|
||||
{
|
||||
r300TexObj* t = r300_tex_obj(texObj);
|
||||
int face, level;
|
||||
|
||||
assert(texObj->_Complete);
|
||||
assert(t->mt);
|
||||
|
||||
for(face = 0; face < t->mt->faces; ++face) {
|
||||
for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
|
||||
texObj->Image[face][level]->Data = 0;
|
||||
}
|
||||
radeon_bo_unmap(t->mt->bo);
|
||||
}
|
||||
|
||||
/**
|
||||
* All glTexImage calls go through this function.
|
||||
*/
|
||||
static void r300_teximage(
|
||||
GLcontext *ctx, int dims,
|
||||
GLint face, GLint level,
|
||||
GLint internalFormat,
|
||||
GLint width, GLint height, GLint depth,
|
||||
GLsizei imageSize,
|
||||
GLenum format, GLenum type, const GLvoid * pixels,
|
||||
const struct gl_pixelstore_attrib *packing,
|
||||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage,
|
||||
int compressed)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
r300TexObj* t = r300_tex_obj(texObj);
|
||||
r300_texture_image* image = get_r300_texture_image(texImage);
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "intformat %s format %s type %s\n",
|
||||
_mesa_lookup_enum_by_nr(internalFormat),
|
||||
_mesa_lookup_enum_by_nr(format),
|
||||
_mesa_lookup_enum_by_nr(type));
|
||||
R300_FIREVERTICES(rmesa);
|
||||
|
||||
if (!ctx->Unpack.ClientStorage)
|
||||
return 0;
|
||||
t->validated = GL_FALSE;
|
||||
|
||||
if (ctx->_ImageTransferState ||
|
||||
texImage->IsCompressed || texObj->GenerateMipmap)
|
||||
return 0;
|
||||
/* Choose and fill in the texture format for this image */
|
||||
texImage->TexFormat = r300ChooseTextureFormat(ctx, internalFormat, format, type);
|
||||
_mesa_set_fetch_functions(texImage, dims);
|
||||
|
||||
/* This list is incomplete, may be different on ppc???
|
||||
*/
|
||||
switch (internalFormat) {
|
||||
case GL_RGBA:
|
||||
if (format == GL_BGRA && type == GL_UNSIGNED_INT_8_8_8_8_REV) {
|
||||
texImage->TexFormat = _dri_texformat_argb8888;
|
||||
} else
|
||||
return 0;
|
||||
break;
|
||||
if (texImage->TexFormat->TexelBytes == 0) {
|
||||
texImage->IsCompressed = GL_TRUE;
|
||||
texImage->CompressedSize =
|
||||
ctx->Driver.CompressedTextureSize(ctx, texImage->Width,
|
||||
texImage->Height, texImage->Depth,
|
||||
texImage->TexFormat->MesaFormat);
|
||||
} else {
|
||||
texImage->IsCompressed = GL_FALSE;
|
||||
texImage->CompressedSize = 0;
|
||||
}
|
||||
|
||||
case GL_RGB:
|
||||
if (format == GL_RGB && type == GL_UNSIGNED_SHORT_5_6_5) {
|
||||
texImage->TexFormat = _dri_texformat_rgb565;
|
||||
} else
|
||||
return 0;
|
||||
break;
|
||||
/* Allocate memory for image */
|
||||
r300FreeTexImageData(ctx, texImage); /* Mesa core only clears texImage->Data but not image->mt */
|
||||
|
||||
case GL_YCBCR_MESA:
|
||||
if (format == GL_YCBCR_MESA &&
|
||||
type == GL_UNSIGNED_SHORT_8_8_REV_APPLE) {
|
||||
texImage->TexFormat = &_mesa_texformat_ycbcr_rev;
|
||||
} else if (format == GL_YCBCR_MESA &&
|
||||
(type == GL_UNSIGNED_SHORT_8_8_APPLE ||
|
||||
type == GL_UNSIGNED_BYTE)) {
|
||||
texImage->TexFormat = &_mesa_texformat_ycbcr;
|
||||
} else
|
||||
return 0;
|
||||
break;
|
||||
if (!t->mt)
|
||||
r300_try_alloc_miptree(rmesa, t, texImage, face, level);
|
||||
if (t->mt && r300_miptree_matches_image(t->mt, texImage, face, level)) {
|
||||
image->mt = t->mt;
|
||||
image->mtlevel = level - t->mt->firstLevel;
|
||||
image->mtface = face;
|
||||
r300_miptree_reference(t->mt);
|
||||
} else {
|
||||
int size;
|
||||
if (texImage->IsCompressed) {
|
||||
size = texImage->CompressedSize;
|
||||
} else {
|
||||
size = texImage->Width * texImage->Height * texImage->Depth * texImage->TexFormat->TexelBytes;
|
||||
}
|
||||
texImage->Data = _mesa_alloc_texmemory(size);
|
||||
}
|
||||
|
||||
/* Upload texture image; note that the spec allows pixels to be NULL */
|
||||
if (compressed) {
|
||||
pixels = _mesa_validate_pbo_compressed_teximage(
|
||||
ctx, imageSize, pixels, packing, "glCompressedTexImage");
|
||||
} else {
|
||||
pixels = _mesa_validate_pbo_teximage(
|
||||
ctx, dims, width, height, depth,
|
||||
format, type, pixels, packing, "glTexImage");
|
||||
}
|
||||
|
||||
if (pixels) {
|
||||
r300_teximage_map(image, GL_TRUE);
|
||||
|
||||
if (compressed) {
|
||||
memcpy(texImage->Data, pixels, imageSize);
|
||||
} else {
|
||||
GLuint dstRowStride;
|
||||
if (image->mt) {
|
||||
r300_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
|
||||
dstRowStride = lvl->rowstride;
|
||||
} else {
|
||||
dstRowStride = texImage->Width * texImage->TexFormat->TexelBytes;
|
||||
}
|
||||
if (!texImage->TexFormat->StoreImage(ctx, dims,
|
||||
texImage->_BaseFormat,
|
||||
texImage->TexFormat,
|
||||
texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */
|
||||
dstRowStride,
|
||||
texImage->ImageOffsets,
|
||||
width, height, depth,
|
||||
format, type, pixels, packing))
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
|
||||
}
|
||||
|
||||
r300_teximage_unmap(image);
|
||||
}
|
||||
|
||||
_mesa_unmap_teximage_pbo(ctx, packing);
|
||||
|
||||
/* SGIS_generate_mipmap */
|
||||
if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
|
||||
ctx->Driver.GenerateMipmap(ctx, texObj->Target, texObj);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static GLuint face_for_target(GLenum target)
|
||||
{
|
||||
switch (target) {
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
|
||||
return (GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Could deal with these packing issues, but currently don't:
|
||||
*/
|
||||
if (packing->SkipPixels ||
|
||||
packing->SkipRows || packing->SwapBytes || packing->LsbFirst) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
GLint srcRowStride = _mesa_image_row_stride(packing, srcWidth,
|
||||
format, type);
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: srcRowStride %d/%x\n",
|
||||
__FUNCTION__, srcRowStride, srcRowStride);
|
||||
|
||||
/* Could check this later in upload, pitch restrictions could be
|
||||
* relaxed, but would need to store the image pitch somewhere,
|
||||
* as packing details might change before image is uploaded:
|
||||
*/
|
||||
if (!r300IsGartMemory(rmesa, pixels, srcHeight * srcRowStride)
|
||||
|| (srcRowStride & 63))
|
||||
return 0;
|
||||
|
||||
/* Have validated that _mesa_transfer_teximage would be a straight
|
||||
* memcpy at this point. NOTE: future calls to TexSubImage will
|
||||
* overwrite the client data. This is explicitly mentioned in the
|
||||
* extension spec.
|
||||
*/
|
||||
texImage->Data = (void *)pixels;
|
||||
texImage->IsClientData = GL_TRUE;
|
||||
texImage->RowStride = srcRowStride / texImage->TexFormat->TexelBytes;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
static void r300TexImage1D(GLcontext * ctx, GLenum target, GLint level,
|
||||
GLint internalFormat,
|
||||
GLint width, GLint border,
|
||||
|
|
@ -533,53 +626,8 @@ static void r300TexImage1D(GLcontext * ctx, GLenum target, GLint level,
|
|||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
{
|
||||
driTextureObject *t = (driTextureObject *) texObj->DriverData;
|
||||
|
||||
if (t) {
|
||||
driSwapOutTextureObject(t);
|
||||
} else {
|
||||
t = (driTextureObject *) r300AllocTexObj(texObj);
|
||||
if (!t) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage1D");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Note, this will call ChooseTextureFormat */
|
||||
_mesa_store_teximage1d(ctx, target, level, internalFormat,
|
||||
width, border, format, type, pixels,
|
||||
&ctx->Unpack, texObj, texImage);
|
||||
|
||||
t->dirty_images[0] |= (1 << level);
|
||||
}
|
||||
|
||||
static void r300TexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
|
||||
GLint xoffset,
|
||||
GLsizei width,
|
||||
GLenum format, GLenum type,
|
||||
const GLvoid * pixels,
|
||||
const struct gl_pixelstore_attrib *packing,
|
||||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
{
|
||||
driTextureObject *t = (driTextureObject *) texObj->DriverData;
|
||||
|
||||
assert(t); /* this _should_ be true */
|
||||
if (t) {
|
||||
driSwapOutTextureObject(t);
|
||||
} else {
|
||||
t = (driTextureObject *) r300AllocTexObj(texObj);
|
||||
if (!t) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage1D");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
_mesa_store_texsubimage1d(ctx, target, level, xoffset, width,
|
||||
format, type, pixels, packing, texObj,
|
||||
texImage);
|
||||
|
||||
t->dirty_images[0] |= (1 << level);
|
||||
r300_teximage(ctx, 1, 0, level, internalFormat, width, 1, 1,
|
||||
0, format, type, pixels, packing, texObj, texImage, 0);
|
||||
}
|
||||
|
||||
static void r300TexImage2D(GLcontext * ctx, GLenum target, GLint level,
|
||||
|
|
@ -590,108 +638,10 @@ static void r300TexImage2D(GLcontext * ctx, GLenum target, GLint level,
|
|||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
{
|
||||
driTextureObject *t = (driTextureObject *) texObj->DriverData;
|
||||
GLuint face;
|
||||
GLuint face = face_for_target(target);
|
||||
|
||||
/* which cube face or ordinary 2D image */
|
||||
switch (target) {
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
|
||||
face =
|
||||
(GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X;
|
||||
ASSERT(face < 6);
|
||||
break;
|
||||
default:
|
||||
face = 0;
|
||||
}
|
||||
|
||||
if (t != NULL) {
|
||||
driSwapOutTextureObject(t);
|
||||
} else {
|
||||
t = (driTextureObject *) r300AllocTexObj(texObj);
|
||||
if (!t) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage2D");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
texImage->IsClientData = GL_FALSE;
|
||||
|
||||
if (r300ValidateClientStorage(ctx, target,
|
||||
internalFormat,
|
||||
width, height,
|
||||
format, type, pixels,
|
||||
packing, texObj, texImage)) {
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: Using client storage\n",
|
||||
__FUNCTION__);
|
||||
} else {
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: Using normal storage\n",
|
||||
__FUNCTION__);
|
||||
|
||||
/* Normal path: copy (to cached memory) and eventually upload
|
||||
* via another copy to GART memory and then a blit... Could
|
||||
* eliminate one copy by going straight to (permanent) GART.
|
||||
*
|
||||
* Note, this will call r300ChooseTextureFormat.
|
||||
*/
|
||||
_mesa_store_teximage2d(ctx, target, level, internalFormat,
|
||||
width, height, border, format, type,
|
||||
pixels, &ctx->Unpack, texObj, texImage);
|
||||
|
||||
t->dirty_images[face] |= (1 << level);
|
||||
}
|
||||
}
|
||||
|
||||
static void r300TexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
|
||||
GLint xoffset, GLint yoffset,
|
||||
GLsizei width, GLsizei height,
|
||||
GLenum format, GLenum type,
|
||||
const GLvoid * pixels,
|
||||
const struct gl_pixelstore_attrib *packing,
|
||||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
{
|
||||
driTextureObject *t = (driTextureObject *) texObj->DriverData;
|
||||
GLuint face;
|
||||
|
||||
/* which cube face or ordinary 2D image */
|
||||
switch (target) {
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
|
||||
face =
|
||||
(GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X;
|
||||
ASSERT(face < 6);
|
||||
break;
|
||||
default:
|
||||
face = 0;
|
||||
}
|
||||
|
||||
assert(t); /* this _should_ be true */
|
||||
if (t) {
|
||||
driSwapOutTextureObject(t);
|
||||
} else {
|
||||
t = (driTextureObject *) r300AllocTexObj(texObj);
|
||||
if (!t) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage2D");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
_mesa_store_texsubimage2d(ctx, target, level, xoffset, yoffset, width,
|
||||
height, format, type, pixels, packing, texObj,
|
||||
texImage);
|
||||
|
||||
t->dirty_images[face] |= (1 << level);
|
||||
r300_teximage(ctx, 2, face, level, internalFormat, width, height, 1,
|
||||
0, format, type, pixels, packing, texObj, texImage, 0);
|
||||
}
|
||||
|
||||
static void r300CompressedTexImage2D(GLcontext * ctx, GLenum target,
|
||||
|
|
@ -701,114 +651,10 @@ static void r300CompressedTexImage2D(GLcontext * ctx, GLenum target,
|
|||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
{
|
||||
driTextureObject *t = (driTextureObject *) texObj->DriverData;
|
||||
GLuint face;
|
||||
GLuint face = face_for_target(target);
|
||||
|
||||
/* which cube face or ordinary 2D image */
|
||||
switch (target) {
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
|
||||
face =
|
||||
(GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X;
|
||||
ASSERT(face < 6);
|
||||
break;
|
||||
default:
|
||||
face = 0;
|
||||
}
|
||||
|
||||
if (t != NULL) {
|
||||
driSwapOutTextureObject(t);
|
||||
} else {
|
||||
t = (driTextureObject *) r300AllocTexObj(texObj);
|
||||
if (!t) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY,
|
||||
"glCompressedTexImage2D");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
texImage->IsClientData = GL_FALSE;
|
||||
|
||||
/* can't call this, different parameters. Would never evaluate to true anyway currently */
|
||||
#if 0
|
||||
if (r300ValidateClientStorage(ctx, target,
|
||||
internalFormat,
|
||||
width, height,
|
||||
format, type, pixels,
|
||||
packing, texObj, texImage)) {
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: Using client storage\n",
|
||||
__FUNCTION__);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: Using normal storage\n",
|
||||
__FUNCTION__);
|
||||
|
||||
/* Normal path: copy (to cached memory) and eventually upload
|
||||
* via another copy to GART memory and then a blit... Could
|
||||
* eliminate one copy by going straight to (permanent) GART.
|
||||
*
|
||||
* Note, this will call r300ChooseTextureFormat.
|
||||
*/
|
||||
_mesa_store_compressed_teximage2d(ctx, target, level,
|
||||
internalFormat, width, height,
|
||||
border, imageSize, data,
|
||||
texObj, texImage);
|
||||
|
||||
t->dirty_images[face] |= (1 << level);
|
||||
}
|
||||
}
|
||||
|
||||
static void r300CompressedTexSubImage2D(GLcontext * ctx, GLenum target,
|
||||
GLint level, GLint xoffset,
|
||||
GLint yoffset, GLsizei width,
|
||||
GLsizei height, GLenum format,
|
||||
GLsizei imageSize, const GLvoid * data,
|
||||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
{
|
||||
driTextureObject *t = (driTextureObject *) texObj->DriverData;
|
||||
GLuint face;
|
||||
|
||||
/* which cube face or ordinary 2D image */
|
||||
switch (target) {
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
|
||||
case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
|
||||
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
|
||||
face =
|
||||
(GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X;
|
||||
ASSERT(face < 6);
|
||||
break;
|
||||
default:
|
||||
face = 0;
|
||||
}
|
||||
|
||||
assert(t); /* this _should_ be true */
|
||||
if (t) {
|
||||
driSwapOutTextureObject(t);
|
||||
} else {
|
||||
t = (driTextureObject *) r300AllocTexObj(texObj);
|
||||
if (!t) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY,
|
||||
"glCompressedTexSubImage3D");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
_mesa_store_compressed_texsubimage2d(ctx, target, level, xoffset,
|
||||
yoffset, width, height, format,
|
||||
imageSize, data, texObj, texImage);
|
||||
|
||||
t->dirty_images[face] |= (1 << level);
|
||||
r300_teximage(ctx, 2, face, level, internalFormat, width, height, 1,
|
||||
imageSize, 0, 0, data, 0, texObj, texImage, 1);
|
||||
}
|
||||
|
||||
static void r300TexImage3D(GLcontext * ctx, GLenum target, GLint level,
|
||||
|
|
@ -820,51 +666,100 @@ static void r300TexImage3D(GLcontext * ctx, GLenum target, GLint level,
|
|||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
{
|
||||
driTextureObject *t = (driTextureObject *) texObj->DriverData;
|
||||
r300_teximage(ctx, 3, 0, level, internalFormat, width, height, depth,
|
||||
0, format, type, pixels, packing, texObj, texImage, 0);
|
||||
}
|
||||
|
||||
if (t) {
|
||||
driSwapOutTextureObject(t);
|
||||
} else {
|
||||
t = (driTextureObject *) r300AllocTexObj(texObj);
|
||||
if (!t) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage3D");
|
||||
return;
|
||||
/**
|
||||
* Update a subregion of the given texture image.
|
||||
*/
|
||||
static void r300_texsubimage(GLcontext* ctx, int dims, int level,
|
||||
GLint xoffset, GLint yoffset, GLint zoffset,
|
||||
GLsizei width, GLsizei height, GLsizei depth,
|
||||
GLenum format, GLenum type,
|
||||
const GLvoid * pixels,
|
||||
const struct gl_pixelstore_attrib *packing,
|
||||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage,
|
||||
int compressed)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
r300_texture_image* image = get_r300_texture_image(texImage);
|
||||
|
||||
R300_FIREVERTICES(rmesa);
|
||||
|
||||
pixels = _mesa_validate_pbo_teximage(ctx, dims,
|
||||
width, height, depth, format, type, pixels, packing, "glTexSubImage1D");
|
||||
|
||||
if (pixels) {
|
||||
GLint dstRowStride;
|
||||
r300_teximage_map(image, GL_TRUE);
|
||||
|
||||
if (image->mt) {
|
||||
r300_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
|
||||
dstRowStride = lvl->rowstride;
|
||||
} else {
|
||||
dstRowStride = texImage->Width * texImage->TexFormat->TexelBytes;
|
||||
}
|
||||
|
||||
if (!texImage->TexFormat->StoreImage(ctx, dims, texImage->_BaseFormat,
|
||||
texImage->TexFormat, texImage->Data,
|
||||
xoffset, yoffset, zoffset,
|
||||
dstRowStride,
|
||||
texImage->ImageOffsets,
|
||||
width, height, depth,
|
||||
format, type, pixels, packing))
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
|
||||
|
||||
r300_teximage_unmap(image);
|
||||
}
|
||||
|
||||
texImage->IsClientData = GL_FALSE;
|
||||
_mesa_unmap_teximage_pbo(ctx, packing);
|
||||
|
||||
#if 0
|
||||
if (r300ValidateClientStorage(ctx, target,
|
||||
internalFormat,
|
||||
width, height,
|
||||
format, type, pixels,
|
||||
packing, texObj, texImage)) {
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: Using client storage\n",
|
||||
__FUNCTION__);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: Using normal storage\n",
|
||||
__FUNCTION__);
|
||||
|
||||
/* Normal path: copy (to cached memory) and eventually upload
|
||||
* via another copy to GART memory and then a blit... Could
|
||||
* eliminate one copy by going straight to (permanent) GART.
|
||||
*
|
||||
* Note, this will call r300ChooseTextureFormat.
|
||||
*/
|
||||
_mesa_store_teximage3d(ctx, target, level, internalFormat,
|
||||
width, height, depth, border,
|
||||
format, type, pixels,
|
||||
&ctx->Unpack, texObj, texImage);
|
||||
|
||||
t->dirty_images[0] |= (1 << level);
|
||||
/* GL_SGIS_generate_mipmap */
|
||||
if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
|
||||
ctx->Driver.GenerateMipmap(ctx, texObj->Target, texObj);
|
||||
}
|
||||
}
|
||||
|
||||
static void r300TexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
|
||||
GLint xoffset,
|
||||
GLsizei width,
|
||||
GLenum format, GLenum type,
|
||||
const GLvoid * pixels,
|
||||
const struct gl_pixelstore_attrib *packing,
|
||||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
{
|
||||
r300_texsubimage(ctx, 1, level, xoffset, 0, 0, width, 1, 1,
|
||||
format, type, pixels, packing, texObj, texImage, 0);
|
||||
}
|
||||
|
||||
static void r300TexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
|
||||
GLint xoffset, GLint yoffset,
|
||||
GLsizei width, GLsizei height,
|
||||
GLenum format, GLenum type,
|
||||
const GLvoid * pixels,
|
||||
const struct gl_pixelstore_attrib *packing,
|
||||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
{
|
||||
r300_texsubimage(ctx, 2, level, xoffset, yoffset, 0, width, height, 1,
|
||||
format, type, pixels, packing, texObj, texImage, 0);
|
||||
}
|
||||
|
||||
static void r300CompressedTexSubImage2D(GLcontext * ctx, GLenum target,
|
||||
GLint level, GLint xoffset,
|
||||
GLint yoffset, GLsizei width,
|
||||
GLsizei height, GLenum format,
|
||||
GLsizei imageSize, const GLvoid * data,
|
||||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
{
|
||||
r300_texsubimage(ctx, 2, level, xoffset, yoffset, 0, width, height, 1,
|
||||
format, 0, data, 0, texObj, texImage, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
r300TexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
|
||||
GLint xoffset, GLint yoffset, GLint zoffset,
|
||||
|
|
@ -875,30 +770,29 @@ r300TexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
|
|||
struct gl_texture_object *texObj,
|
||||
struct gl_texture_image *texImage)
|
||||
{
|
||||
driTextureObject *t = (driTextureObject *) texObj->DriverData;
|
||||
|
||||
/* fprintf(stderr, "%s\n", __FUNCTION__); */
|
||||
|
||||
assert(t); /* this _should_ be true */
|
||||
if (t) {
|
||||
driSwapOutTextureObject(t);
|
||||
} else {
|
||||
t = (driTextureObject *) r300AllocTexObj(texObj);
|
||||
if (!t) {
|
||||
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage3D");
|
||||
return;
|
||||
}
|
||||
texObj->DriverData = t;
|
||||
}
|
||||
|
||||
_mesa_store_texsubimage3d(ctx, target, level, xoffset, yoffset, zoffset,
|
||||
width, height, depth,
|
||||
format, type, pixels, packing, texObj,
|
||||
texImage);
|
||||
|
||||
t->dirty_images[0] |= (1 << level);
|
||||
r300_texsubimage(ctx, 3, level, xoffset, yoffset, zoffset, width, height, depth,
|
||||
format, type, pixels, packing, texObj, texImage, 0);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Wraps Mesa's implementation to ensure that the base level image is mapped.
|
||||
*
|
||||
* This relies on internal details of _mesa_generate_mipmap, in particular
|
||||
* the fact that the memory for recreated texture images is always freed.
|
||||
*/
|
||||
static void r300_generate_mipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
|
||||
{
|
||||
GLuint face = face_for_target(target);
|
||||
r300_texture_image *baseimage = get_r300_texture_image(texObj->Image[face][texObj->BaseLevel]);
|
||||
|
||||
r300_teximage_map(baseimage, GL_FALSE);
|
||||
_mesa_generate_mipmap(ctx, target, texObj);
|
||||
r300_teximage_unmap(baseimage);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Changes variables and flags for a state update, which will happen at the
|
||||
* next UpdateTextureState
|
||||
|
|
@ -908,7 +802,7 @@ static void r300TexParameter(GLcontext * ctx, GLenum target,
|
|||
struct gl_texture_object *texObj,
|
||||
GLenum pname, const GLfloat * params)
|
||||
{
|
||||
r300TexObjPtr t = (r300TexObjPtr) texObj->DriverData;
|
||||
r300TexObj* t = r300_tex_obj(texObj);
|
||||
|
||||
if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_TEXTURE)) {
|
||||
fprintf(stderr, "%s( %s )\n", __FUNCTION__,
|
||||
|
|
@ -941,7 +835,11 @@ static void r300TexParameter(GLcontext * ctx, GLenum target,
|
|||
* we just have to rely on loading the right subset of mipmap levels
|
||||
* to simulate a clamped LOD.
|
||||
*/
|
||||
driSwapOutTextureObject((driTextureObject *) t);
|
||||
if (t->mt) {
|
||||
r300_miptree_unreference(t->mt);
|
||||
t->mt = 0;
|
||||
t->validated = GL_FALSE;
|
||||
}
|
||||
break;
|
||||
|
||||
case GL_DEPTH_TEXTURE_MODE:
|
||||
|
|
@ -964,27 +862,10 @@ static void r300TexParameter(GLcontext * ctx, GLenum target,
|
|||
}
|
||||
}
|
||||
|
||||
static void r300BindTexture(GLcontext * ctx, GLenum target,
|
||||
struct gl_texture_object *texObj)
|
||||
{
|
||||
if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_TEXTURE)) {
|
||||
fprintf(stderr, "%s( %p ) unit=%d\n", __FUNCTION__,
|
||||
(void *)texObj, ctx->Texture.CurrentUnit);
|
||||
}
|
||||
|
||||
if ((target == GL_TEXTURE_1D)
|
||||
|| (target == GL_TEXTURE_2D)
|
||||
|| (target == GL_TEXTURE_3D)
|
||||
|| (target == GL_TEXTURE_CUBE_MAP)
|
||||
|| (target == GL_TEXTURE_RECTANGLE_NV)) {
|
||||
assert(texObj->DriverData != NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void r300DeleteTexture(GLcontext * ctx, struct gl_texture_object *texObj)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
driTextureObject *t = (driTextureObject *) texObj->DriverData;
|
||||
r300TexObj* t = r300_tex_obj(texObj);
|
||||
|
||||
if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_TEXTURE)) {
|
||||
fprintf(stderr, "%s( %p (target = %s) )\n", __FUNCTION__,
|
||||
|
|
@ -992,14 +873,19 @@ static void r300DeleteTexture(GLcontext * ctx, struct gl_texture_object *texObj)
|
|||
_mesa_lookup_enum_by_nr(texObj->Target));
|
||||
}
|
||||
|
||||
if (t != NULL) {
|
||||
if (rmesa) {
|
||||
R300_FIREVERTICES(rmesa);
|
||||
}
|
||||
if (rmesa) {
|
||||
int i;
|
||||
R300_FIREVERTICES(rmesa);
|
||||
|
||||
driDestroyTextureObject(t);
|
||||
for(i = 0; i < R300_MAX_TEXTURE_UNITS; ++i)
|
||||
if (rmesa->hw.textures[i] == t)
|
||||
rmesa->hw.textures[i] = 0;
|
||||
}
|
||||
|
||||
if (t->mt) {
|
||||
r300_miptree_unreference(t->mt);
|
||||
t->mt = 0;
|
||||
}
|
||||
/* Free mipmap images and the texture object itself */
|
||||
_mesa_delete_texture_object(ctx, texObj);
|
||||
}
|
||||
|
||||
|
|
@ -1008,8 +894,6 @@ static void r300DeleteTexture(GLcontext * ctx, struct gl_texture_object *texObj)
|
|||
* Called via ctx->Driver.NewTextureObject.
|
||||
* Note: this function will be called during context creation to
|
||||
* allocate the default texture objects.
|
||||
* Note: we could use containment here to 'derive' the driver-specific
|
||||
* texture object from the core mesa gl_texture_object. Not done at this time.
|
||||
* Fixup MaxAnisotropy according to user preference.
|
||||
*/
|
||||
static struct gl_texture_object *r300NewTextureObject(GLcontext * ctx,
|
||||
|
|
@ -1017,14 +901,23 @@ static struct gl_texture_object *r300NewTextureObject(GLcontext * ctx,
|
|||
GLenum target)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
struct gl_texture_object *obj;
|
||||
obj = _mesa_new_texture_object(ctx, name, target);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
obj->MaxAnisotropy = rmesa->initialMaxAnisotropy;
|
||||
r300TexObj* t = CALLOC_STRUCT(r300_tex_obj);
|
||||
|
||||
r300AllocTexObj(obj);
|
||||
return obj;
|
||||
|
||||
if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_TEXTURE)) {
|
||||
fprintf(stderr, "%s( %p (target = %s) )\n", __FUNCTION__,
|
||||
t, _mesa_lookup_enum_by_nr(target));
|
||||
}
|
||||
|
||||
_mesa_initialize_texture_object(&t->base, name, target);
|
||||
t->base.MaxAnisotropy = rmesa->initialMaxAnisotropy;
|
||||
|
||||
/* Initialize hardware state */
|
||||
r300UpdateTexWrap(t);
|
||||
r300SetTexFilter(t, t->base.MinFilter, t->base.MagFilter, t->base.MaxAnisotropy);
|
||||
r300SetTexBorderColor(t, t->base._BorderChan);
|
||||
|
||||
return &t->base;
|
||||
}
|
||||
|
||||
void r300InitTextureFuncs(struct dd_function_table *functions)
|
||||
|
|
@ -1032,6 +925,11 @@ void r300InitTextureFuncs(struct dd_function_table *functions)
|
|||
/* Note: we only plug in the functions we implement in the driver
|
||||
* since _mesa_init_driver_functions() was already called.
|
||||
*/
|
||||
functions->NewTextureImage = r300NewTextureImage;
|
||||
functions->FreeTexImageData = r300FreeTexImageData;
|
||||
functions->MapTexture = r300MapTexture;
|
||||
functions->UnmapTexture = r300UnmapTexture;
|
||||
|
||||
functions->ChooseTextureFormat = r300ChooseTextureFormat;
|
||||
functions->TexImage1D = r300TexImage1D;
|
||||
functions->TexImage2D = r300TexImage2D;
|
||||
|
|
@ -1040,7 +938,6 @@ void r300InitTextureFuncs(struct dd_function_table *functions)
|
|||
functions->TexSubImage2D = r300TexSubImage2D;
|
||||
functions->TexSubImage3D = r300TexSubImage3D;
|
||||
functions->NewTextureObject = r300NewTextureObject;
|
||||
functions->BindTexture = r300BindTexture;
|
||||
functions->DeleteTexture = r300DeleteTexture;
|
||||
functions->IsTextureResident = driIsTextureResident;
|
||||
|
||||
|
|
@ -1049,5 +946,7 @@ void r300InitTextureFuncs(struct dd_function_table *functions)
|
|||
functions->CompressedTexImage2D = r300CompressedTexImage2D;
|
||||
functions->CompressedTexSubImage2D = r300CompressedTexSubImage2D;
|
||||
|
||||
functions->GenerateMipmap = r300_generate_mipmap;
|
||||
|
||||
driInitTextureFormats();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,12 +41,7 @@ extern void r300SetTexOffset(__DRIcontext *pDRICtx, GLint texname,
|
|||
unsigned long long offset, GLint depth,
|
||||
GLuint pitch);
|
||||
|
||||
extern void r300UpdateTextureState(GLcontext * ctx);
|
||||
|
||||
extern int r300UploadTexImages(r300ContextPtr rmesa, r300TexObjPtr t,
|
||||
GLuint face);
|
||||
|
||||
extern void r300DestroyTexObj(r300ContextPtr rmesa, r300TexObjPtr t);
|
||||
extern void r300ValidateTextures(GLcontext * ctx);
|
||||
|
||||
extern void r300InitTextureFuncs(struct dd_function_table *functions);
|
||||
|
||||
|
|
|
|||
|
|
@ -48,520 +48,11 @@ SOFTWARE.
|
|||
#include "r300_context.h"
|
||||
#include "r300_state.h"
|
||||
#include "r300_cmdbuf.h"
|
||||
#include "r300_emit.h"
|
||||
#include "r300_mipmap_tree.h"
|
||||
#include "radeon_ioctl.h"
|
||||
#include "r300_tex.h"
|
||||
#include "r300_ioctl.h"
|
||||
#include <unistd.h> /* for usleep() */
|
||||
|
||||
#ifdef USER_BUFFERS
|
||||
#include "r300_mem.h"
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Destroy any device-dependent state associated with the texture. This may
|
||||
* include NULLing out hardware state that points to the texture.
|
||||
*/
|
||||
void r300DestroyTexObj(r300ContextPtr rmesa, r300TexObjPtr t)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE) {
|
||||
fprintf(stderr, "%s( %p, %p )\n", __FUNCTION__,
|
||||
(void *)t, (void *)t->base.tObj);
|
||||
}
|
||||
|
||||
for (i = 0; i < rmesa->radeon.glCtx->Const.MaxTextureUnits; i++) {
|
||||
if (rmesa->state.texture.unit[i].texobj == t) {
|
||||
rmesa->state.texture.unit[i].texobj = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------
|
||||
* Texture image conversions
|
||||
*/
|
||||
|
||||
static void r300UploadGARTClientSubImage(r300ContextPtr rmesa,
|
||||
r300TexObjPtr t,
|
||||
struct gl_texture_image *texImage,
|
||||
GLint hwlevel,
|
||||
GLint x, GLint y,
|
||||
GLint width, GLint height)
|
||||
{
|
||||
const struct gl_texture_format *texFormat = texImage->TexFormat;
|
||||
GLuint srcPitch, dstPitch;
|
||||
int blit_format;
|
||||
int srcOffset;
|
||||
|
||||
/*
|
||||
* XXX it appears that we always upload the full image, not a subimage.
|
||||
* I.e. x==0, y==0, width=texWidth, height=texWidth. If this is ever
|
||||
* changed, the src pitch will have to change.
|
||||
*/
|
||||
switch (texFormat->TexelBytes) {
|
||||
case 1:
|
||||
blit_format = R300_CP_COLOR_FORMAT_CI8;
|
||||
srcPitch = t->image[0][0].width * texFormat->TexelBytes;
|
||||
dstPitch = t->image[0][0].width * texFormat->TexelBytes;
|
||||
break;
|
||||
case 2:
|
||||
blit_format = R300_CP_COLOR_FORMAT_RGB565;
|
||||
srcPitch = t->image[0][0].width * texFormat->TexelBytes;
|
||||
dstPitch = t->image[0][0].width * texFormat->TexelBytes;
|
||||
break;
|
||||
case 4:
|
||||
blit_format = R300_CP_COLOR_FORMAT_ARGB8888;
|
||||
srcPitch = t->image[0][0].width * texFormat->TexelBytes;
|
||||
dstPitch = t->image[0][0].width * texFormat->TexelBytes;
|
||||
break;
|
||||
case 8:
|
||||
case 16:
|
||||
blit_format = R300_CP_COLOR_FORMAT_CI8;
|
||||
srcPitch = t->image[0][0].width * texFormat->TexelBytes;
|
||||
dstPitch = t->image[0][0].width * texFormat->TexelBytes;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
t->image[0][hwlevel].data = texImage->Data;
|
||||
srcOffset = r300GartOffsetFromVirtual(rmesa, texImage->Data);
|
||||
|
||||
assert(srcOffset != ~0);
|
||||
|
||||
/* Don't currently need to cope with small pitches?
|
||||
*/
|
||||
width = texImage->Width;
|
||||
height = texImage->Height;
|
||||
|
||||
if (texFormat->TexelBytes > 4) {
|
||||
width *= texFormat->TexelBytes;
|
||||
}
|
||||
|
||||
r300EmitWait(rmesa, R300_WAIT_3D);
|
||||
|
||||
r300EmitBlit(rmesa, blit_format,
|
||||
srcPitch,
|
||||
srcOffset,
|
||||
dstPitch,
|
||||
t->bufAddr,
|
||||
x,
|
||||
y,
|
||||
t->image[0][hwlevel].x + x,
|
||||
t->image[0][hwlevel].y + y, width, height);
|
||||
|
||||
r300EmitWait(rmesa, R300_WAIT_2D);
|
||||
}
|
||||
|
||||
static void r300UploadRectSubImage(r300ContextPtr rmesa,
|
||||
r300TexObjPtr t,
|
||||
struct gl_texture_image *texImage,
|
||||
GLint x, GLint y, GLint width, GLint height)
|
||||
{
|
||||
const struct gl_texture_format *texFormat = texImage->TexFormat;
|
||||
int blit_format, dstPitch, done;
|
||||
|
||||
switch (texFormat->TexelBytes) {
|
||||
case 1:
|
||||
blit_format = R300_CP_COLOR_FORMAT_CI8;
|
||||
break;
|
||||
case 2:
|
||||
blit_format = R300_CP_COLOR_FORMAT_RGB565;
|
||||
break;
|
||||
case 4:
|
||||
blit_format = R300_CP_COLOR_FORMAT_ARGB8888;
|
||||
break;
|
||||
case 8:
|
||||
case 16:
|
||||
blit_format = R300_CP_COLOR_FORMAT_CI8;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
t->image[0][0].data = texImage->Data;
|
||||
|
||||
/* Currently don't need to cope with small pitches.
|
||||
*/
|
||||
width = texImage->Width;
|
||||
height = texImage->Height;
|
||||
dstPitch = t->pitch;
|
||||
|
||||
if (texFormat->TexelBytes > 4) {
|
||||
width *= texFormat->TexelBytes;
|
||||
}
|
||||
|
||||
if (rmesa->prefer_gart_client_texturing && texImage->IsClientData) {
|
||||
/* In this case, could also use GART texturing. This is
|
||||
* currently disabled, but has been tested & works.
|
||||
*/
|
||||
t->offset = r300GartOffsetFromVirtual(rmesa, texImage->Data);
|
||||
t->pitch = texImage->RowStride * texFormat->TexelBytes - 32;
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr,
|
||||
"Using GART texturing for rectangular client texture\n");
|
||||
|
||||
/* Release FB memory allocated for this image:
|
||||
*/
|
||||
/* FIXME This may not be correct as driSwapOutTextureObject sets
|
||||
* FIXME dirty_images. It may be fine, though.
|
||||
*/
|
||||
if (t->base.memBlock) {
|
||||
driSwapOutTextureObject((driTextureObject *) t);
|
||||
}
|
||||
} else if (texImage->IsClientData) {
|
||||
/* Data already in GART memory, with usable pitch.
|
||||
*/
|
||||
GLuint srcPitch;
|
||||
srcPitch = texImage->RowStride * texFormat->TexelBytes;
|
||||
r300EmitBlit(rmesa,
|
||||
blit_format,
|
||||
srcPitch,
|
||||
r300GartOffsetFromVirtual(rmesa, texImage->Data),
|
||||
dstPitch, t->bufAddr, 0, 0, 0, 0, width, height);
|
||||
} else {
|
||||
/* Data not in GART memory, or bad pitch.
|
||||
*/
|
||||
for (done = 0; done < height;) {
|
||||
struct r300_dma_region region;
|
||||
int lines =
|
||||
MIN2(height - done, RADEON_BUFFER_SIZE / dstPitch);
|
||||
int src_pitch;
|
||||
char *tex;
|
||||
|
||||
src_pitch = texImage->RowStride * texFormat->TexelBytes;
|
||||
|
||||
tex = (char *)texImage->Data + done * src_pitch;
|
||||
|
||||
memset(®ion, 0, sizeof(region));
|
||||
r300AllocDmaRegion(rmesa, ®ion, lines * dstPitch,
|
||||
1024);
|
||||
|
||||
/* Copy texdata to dma:
|
||||
*/
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr,
|
||||
"%s: src_pitch %d dst_pitch %d\n",
|
||||
__FUNCTION__, src_pitch, dstPitch);
|
||||
|
||||
if (src_pitch == dstPitch) {
|
||||
memcpy(region.address + region.start, tex,
|
||||
lines * src_pitch);
|
||||
} else {
|
||||
char *buf = region.address + region.start;
|
||||
int i;
|
||||
for (i = 0; i < lines; i++) {
|
||||
memcpy(buf, tex, src_pitch);
|
||||
buf += dstPitch;
|
||||
tex += src_pitch;
|
||||
}
|
||||
}
|
||||
|
||||
r300EmitWait(rmesa, R300_WAIT_3D);
|
||||
|
||||
/* Blit to framebuffer
|
||||
*/
|
||||
r300EmitBlit(rmesa,
|
||||
blit_format,
|
||||
dstPitch, GET_START(®ion),
|
||||
dstPitch | (t->tile_bits >> 16),
|
||||
t->bufAddr, 0, 0, 0, done, width, lines);
|
||||
|
||||
r300EmitWait(rmesa, R300_WAIT_2D);
|
||||
#ifdef USER_BUFFERS
|
||||
r300_mem_use(rmesa, region.buf->id);
|
||||
#endif
|
||||
|
||||
r300ReleaseDmaRegion(rmesa, ®ion, __FUNCTION__);
|
||||
done += lines;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Upload the texture image associated with texture \a t at the specified
|
||||
* level at the address relative to \a start.
|
||||
*/
|
||||
static void r300UploadSubImage(r300ContextPtr rmesa, r300TexObjPtr t,
|
||||
GLint hwlevel,
|
||||
GLint x, GLint y, GLint width, GLint height,
|
||||
GLuint face)
|
||||
{
|
||||
struct gl_texture_image *texImage = NULL;
|
||||
GLuint offset;
|
||||
GLint imageWidth, imageHeight;
|
||||
GLint ret;
|
||||
drm_radeon_texture_t tex;
|
||||
drm_radeon_tex_image_t tmp;
|
||||
const int level = hwlevel + t->base.firstLevel;
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE) {
|
||||
fprintf(stderr,
|
||||
"%s( %p, %p ) level/width/height/face = %d/%d/%d/%u\n",
|
||||
__FUNCTION__, (void *)t, (void *)t->base.tObj, level,
|
||||
width, height, face);
|
||||
}
|
||||
|
||||
ASSERT(face < 6);
|
||||
|
||||
/* Ensure we have a valid texture to upload */
|
||||
if ((hwlevel < 0) || (hwlevel >= RADEON_MAX_TEXTURE_LEVELS)) {
|
||||
_mesa_problem(NULL, "bad texture level in %s", __FUNCTION__);
|
||||
return;
|
||||
}
|
||||
|
||||
texImage = t->base.tObj->Image[face][level];
|
||||
|
||||
if (!texImage) {
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: texImage %d is NULL!\n",
|
||||
__FUNCTION__, level);
|
||||
return;
|
||||
}
|
||||
if (!texImage->Data) {
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: image data is NULL!\n",
|
||||
__FUNCTION__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (t->base.tObj->Target == GL_TEXTURE_RECTANGLE_NV) {
|
||||
assert(level == 0);
|
||||
assert(hwlevel == 0);
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: image data is rectangular\n",
|
||||
__FUNCTION__);
|
||||
r300UploadRectSubImage(rmesa, t, texImage, x, y, width, height);
|
||||
return;
|
||||
} else if (texImage->IsClientData) {
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr,
|
||||
"%s: image data is in GART client storage\n",
|
||||
__FUNCTION__);
|
||||
r300UploadGARTClientSubImage(rmesa, t, texImage, hwlevel, x, y,
|
||||
width, height);
|
||||
return;
|
||||
} else if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: image data is in normal memory\n",
|
||||
__FUNCTION__);
|
||||
|
||||
imageWidth = texImage->Width;
|
||||
imageHeight = texImage->Height;
|
||||
|
||||
offset = t->bufAddr;
|
||||
|
||||
if (RADEON_DEBUG & (DEBUG_TEXTURE | DEBUG_IOCTL)) {
|
||||
GLint imageX = 0;
|
||||
GLint imageY = 0;
|
||||
GLint blitX = t->image[face][hwlevel].x;
|
||||
GLint blitY = t->image[face][hwlevel].y;
|
||||
GLint blitWidth = t->image[face][hwlevel].width;
|
||||
GLint blitHeight = t->image[face][hwlevel].height;
|
||||
fprintf(stderr, " upload image: %d,%d at %d,%d\n",
|
||||
imageWidth, imageHeight, imageX, imageY);
|
||||
fprintf(stderr, " upload blit: %d,%d at %d,%d\n",
|
||||
blitWidth, blitHeight, blitX, blitY);
|
||||
fprintf(stderr, " blit ofs: 0x%07x level: %d/%d\n",
|
||||
(GLuint) offset, hwlevel, level);
|
||||
}
|
||||
|
||||
t->image[face][hwlevel].data = texImage->Data;
|
||||
|
||||
/* Init the DRM_RADEON_TEXTURE command / drm_radeon_texture_t struct.
|
||||
* NOTE: we're always use a 1KB-wide blit and I8 texture format.
|
||||
* We used to use 1, 2 and 4-byte texels and used to use the texture
|
||||
* width to dictate the blit width - but that won't work for compressed
|
||||
* textures. (Brian)
|
||||
* NOTE: can't do that with texture tiling. (sroland)
|
||||
*/
|
||||
tex.offset = offset;
|
||||
tex.image = &tmp;
|
||||
/* copy (x,y,width,height,data) */
|
||||
memcpy(&tmp, &t->image[face][hwlevel], sizeof(tmp));
|
||||
|
||||
if (texImage->TexFormat->TexelBytes > 4) {
|
||||
const int log2TexelBytes =
|
||||
(3 + (texImage->TexFormat->TexelBytes >> 4));
|
||||
tex.format = RADEON_TXFORMAT_I8; /* any 1-byte texel format */
|
||||
tex.pitch =
|
||||
MAX2((texImage->Width * texImage->TexFormat->TexelBytes) /
|
||||
64, 1);
|
||||
tex.height = imageHeight;
|
||||
tex.width = imageWidth << log2TexelBytes;
|
||||
tex.offset += (tmp.x << log2TexelBytes) & ~1023;
|
||||
tmp.x = tmp.x % (1024 >> log2TexelBytes);
|
||||
tmp.width = tmp.width << log2TexelBytes;
|
||||
} else if (texImage->TexFormat->TexelBytes) {
|
||||
/* use multi-byte upload scheme */
|
||||
tex.height = imageHeight;
|
||||
tex.width = imageWidth;
|
||||
switch (texImage->TexFormat->TexelBytes) {
|
||||
case 1:
|
||||
tex.format = RADEON_TXFORMAT_I8;
|
||||
break;
|
||||
case 2:
|
||||
tex.format = RADEON_TXFORMAT_AI88;
|
||||
break;
|
||||
case 4:
|
||||
tex.format = RADEON_TXFORMAT_ARGB8888;
|
||||
break;
|
||||
}
|
||||
tex.pitch =
|
||||
MAX2((texImage->Width * texImage->TexFormat->TexelBytes) /
|
||||
64, 1);
|
||||
tex.offset += tmp.x & ~1023;
|
||||
tmp.x = tmp.x % 1024;
|
||||
|
||||
if (t->tile_bits & R300_TXO_MICRO_TILE) {
|
||||
/* need something like "tiled coordinates" ? */
|
||||
tmp.y = tmp.x / (tex.pitch * 128) * 2;
|
||||
tmp.x =
|
||||
tmp.x % (tex.pitch * 128) / 2 /
|
||||
texImage->TexFormat->TexelBytes;
|
||||
tex.pitch |= RADEON_DST_TILE_MICRO >> 22;
|
||||
} else {
|
||||
tmp.x = tmp.x >> (texImage->TexFormat->TexelBytes >> 1);
|
||||
}
|
||||
#if 1
|
||||
if ((t->tile_bits & R300_TXO_MACRO_TILE) &&
|
||||
(texImage->Width * texImage->TexFormat->TexelBytes >= 256)
|
||||
&& ((!(t->tile_bits & R300_TXO_MICRO_TILE)
|
||||
&& (texImage->Height >= 8))
|
||||
|| (texImage->Height >= 16))) {
|
||||
/* weird: R200 disables macro tiling if mip width is smaller than 256 bytes,
|
||||
OR if height is smaller than 8 automatically, but if micro tiling is active
|
||||
the limit is height 16 instead ? */
|
||||
tex.pitch |= RADEON_DST_TILE_MACRO >> 22;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
/* In case of for instance 8x8 texture (2x2 dxt blocks),
|
||||
padding after the first two blocks is needed (only
|
||||
with dxt1 since 2 dxt3/dxt5 blocks already use 32 Byte). */
|
||||
/* set tex.height to 1/4 since 1 "macropixel" (dxt-block)
|
||||
has 4 real pixels. Needed so the kernel module reads
|
||||
the right amount of data. */
|
||||
tex.format = RADEON_TXFORMAT_I8; /* any 1-byte texel format */
|
||||
tex.pitch = (R300_BLIT_WIDTH_BYTES / 64);
|
||||
tex.height = (imageHeight + 3) / 4;
|
||||
tex.width = (imageWidth + 3) / 4;
|
||||
if ((t->format & R300_TX_FORMAT_DXT1) == R300_TX_FORMAT_DXT1) {
|
||||
tex.width *= 8;
|
||||
} else {
|
||||
tex.width *= 16;
|
||||
}
|
||||
}
|
||||
|
||||
LOCK_HARDWARE(&rmesa->radeon);
|
||||
do {
|
||||
ret =
|
||||
drmCommandWriteRead(rmesa->radeon.dri.fd,
|
||||
DRM_RADEON_TEXTURE, &tex,
|
||||
sizeof(drm_radeon_texture_t));
|
||||
if (ret) {
|
||||
if (RADEON_DEBUG & DEBUG_IOCTL)
|
||||
fprintf(stderr,
|
||||
"DRM_RADEON_TEXTURE: again!\n");
|
||||
usleep(1);
|
||||
}
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
UNLOCK_HARDWARE(&rmesa->radeon);
|
||||
|
||||
if (ret) {
|
||||
fprintf(stderr, "DRM_RADEON_TEXTURE: return = %d\n", ret);
|
||||
fprintf(stderr, " offset=0x%08x\n", offset);
|
||||
fprintf(stderr, " image width=%d height=%d\n",
|
||||
imageWidth, imageHeight);
|
||||
fprintf(stderr, " blit width=%d height=%d data=%p\n",
|
||||
t->image[face][hwlevel].width,
|
||||
t->image[face][hwlevel].height,
|
||||
t->image[face][hwlevel].data);
|
||||
_mesa_exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Upload the texture images associated with texture \a t. This might
|
||||
* require the allocation of texture memory.
|
||||
*
|
||||
* \param rmesa Context pointer
|
||||
* \param t Texture to be uploaded
|
||||
* \param face Cube map face to be uploaded. Zero for non-cube maps.
|
||||
*/
|
||||
|
||||
int r300UploadTexImages(r300ContextPtr rmesa, r300TexObjPtr t, GLuint face)
|
||||
{
|
||||
const int numLevels = t->base.lastLevel - t->base.firstLevel + 1;
|
||||
|
||||
if (t->image_override)
|
||||
return 0;
|
||||
|
||||
if (RADEON_DEBUG & (DEBUG_TEXTURE | DEBUG_IOCTL)) {
|
||||
fprintf(stderr, "%s( %p, %p ) sz=%d lvls=%d-%d\n", __FUNCTION__,
|
||||
(void *)rmesa->radeon.glCtx, (void *)t->base.tObj,
|
||||
t->base.totalSize, t->base.firstLevel,
|
||||
t->base.lastLevel);
|
||||
}
|
||||
|
||||
if (t->base.totalSize == 0)
|
||||
return 0;
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_SYNC) {
|
||||
fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
|
||||
radeonFinish(rmesa->radeon.glCtx);
|
||||
}
|
||||
|
||||
LOCK_HARDWARE(&rmesa->radeon);
|
||||
|
||||
if (t->base.memBlock == NULL) {
|
||||
int heap;
|
||||
|
||||
heap = driAllocateTexture(rmesa->texture_heaps, rmesa->nr_heaps,
|
||||
(driTextureObject *) t);
|
||||
if (heap == -1) {
|
||||
UNLOCK_HARDWARE(&rmesa->radeon);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Set the base offset of the texture image */
|
||||
t->bufAddr = rmesa->radeon.radeonScreen->texOffset[heap]
|
||||
+ t->base.memBlock->ofs;
|
||||
t->offset = t->bufAddr;
|
||||
|
||||
if (!(t->base.tObj->Image[0][0]->IsClientData)) {
|
||||
/* hope it's safe to add that here... */
|
||||
t->offset |= t->tile_bits;
|
||||
}
|
||||
}
|
||||
|
||||
/* Let the world know we've used this memory recently.
|
||||
*/
|
||||
driUpdateTextureLRU((driTextureObject *) t);
|
||||
UNLOCK_HARDWARE(&rmesa->radeon);
|
||||
|
||||
/* Upload any images that are new */
|
||||
if (t->base.dirty_images[face]) {
|
||||
int i;
|
||||
for (i = 0; i < numLevels; i++) {
|
||||
if ((t->base.
|
||||
dirty_images[face] & (1 <<
|
||||
(i + t->base.firstLevel))) !=
|
||||
0) {
|
||||
r300UploadSubImage(rmesa, t, i, 0, 0,
|
||||
t->image[face][i].width,
|
||||
t->image[face][i].height,
|
||||
face);
|
||||
}
|
||||
}
|
||||
t->base.dirty_images[face] = 0;
|
||||
}
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_SYNC) {
|
||||
fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
|
||||
radeonFinish(rmesa->radeon.glCtx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "r300_state.h"
|
||||
#include "r300_ioctl.h"
|
||||
#include "radeon_ioctl.h"
|
||||
#include "r300_mipmap_tree.h"
|
||||
#include "r300_tex.h"
|
||||
#include "r300_reg.h"
|
||||
|
||||
|
|
@ -148,8 +149,7 @@ void r300SetDepthTexMode(struct gl_texture_object *tObj)
|
|||
if (!tObj)
|
||||
return;
|
||||
|
||||
t = (r300TexObjPtr) tObj->DriverData;
|
||||
|
||||
t = r300_tex_obj(tObj);
|
||||
|
||||
switch (tObj->Image[0][tObj->BaseLevel]->TexFormat->MesaFormat) {
|
||||
case MESA_FORMAT_Z16:
|
||||
|
|
@ -190,399 +190,228 @@ void r300SetDepthTexMode(struct gl_texture_object *tObj)
|
|||
|
||||
|
||||
/**
|
||||
* Compute sizes and fill in offset and blit information for the given
|
||||
* image (determined by \p face and \p level).
|
||||
*
|
||||
* \param curOffset points to the offset at which the image is to be stored
|
||||
* and is updated by this function according to the size of the image.
|
||||
*/
|
||||
static void compute_tex_image_offset(
|
||||
struct gl_texture_object *tObj,
|
||||
GLuint face,
|
||||
GLint level,
|
||||
GLint* curOffset)
|
||||
{
|
||||
r300TexObjPtr t = (r300TexObjPtr) tObj->DriverData;
|
||||
const struct gl_texture_image* texImage;
|
||||
GLuint blitWidth = R300_BLIT_WIDTH_BYTES;
|
||||
GLuint texelBytes;
|
||||
GLuint size;
|
||||
|
||||
texImage = tObj->Image[0][level + t->base.firstLevel];
|
||||
if (!texImage)
|
||||
return;
|
||||
|
||||
texelBytes = texImage->TexFormat->TexelBytes;
|
||||
|
||||
/* find image size in bytes */
|
||||
if (texImage->IsCompressed) {
|
||||
if ((t->format & R300_TX_FORMAT_DXT1) ==
|
||||
R300_TX_FORMAT_DXT1) {
|
||||
// fprintf(stderr,"DXT 1 %d %08X\n", texImage->Width, t->format);
|
||||
if ((texImage->Width + 3) < 8) /* width one block */
|
||||
size = texImage->CompressedSize * 4;
|
||||
else if ((texImage->Width + 3) < 16)
|
||||
size = texImage->CompressedSize * 2;
|
||||
else
|
||||
size = texImage->CompressedSize;
|
||||
} else {
|
||||
/* DXT3/5, 16 bytes per block */
|
||||
WARN_ONCE
|
||||
("DXT 3/5 suffers from multitexturing problems!\n");
|
||||
// fprintf(stderr,"DXT 3/5 %d\n", texImage->Width);
|
||||
if ((texImage->Width + 3) < 8)
|
||||
size = texImage->CompressedSize * 2;
|
||||
else
|
||||
size = texImage->CompressedSize;
|
||||
}
|
||||
} else if (tObj->Target == GL_TEXTURE_RECTANGLE_NV) {
|
||||
size =
|
||||
((texImage->Width * texelBytes +
|
||||
63) & ~63) * texImage->Height;
|
||||
blitWidth = 64 / texelBytes;
|
||||
} else if (t->tile_bits & R300_TXO_MICRO_TILE) {
|
||||
/* tile pattern is 16 bytes x2. mipmaps stay 32 byte aligned,
|
||||
though the actual offset may be different (if texture is less than
|
||||
32 bytes width) to the untiled case */
|
||||
int w = (texImage->Width * texelBytes * 2 + 31) & ~31;
|
||||
size =
|
||||
(w * ((texImage->Height + 1) / 2)) *
|
||||
texImage->Depth;
|
||||
blitWidth = MAX2(texImage->Width, 64 / texelBytes);
|
||||
} else {
|
||||
int w = (texImage->Width * texelBytes + 31) & ~31;
|
||||
size = w * texImage->Height * texImage->Depth;
|
||||
blitWidth = MAX2(texImage->Width, 64 / texelBytes);
|
||||
}
|
||||
assert(size > 0);
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "w=%d h=%d d=%d tb=%d intFormat=%d\n",
|
||||
texImage->Width, texImage->Height,
|
||||
texImage->Depth,
|
||||
texImage->TexFormat->TexelBytes,
|
||||
texImage->InternalFormat);
|
||||
|
||||
/* All images are aligned to a 32-byte offset */
|
||||
*curOffset = (*curOffset + 0x1f) & ~0x1f;
|
||||
|
||||
if (texelBytes) {
|
||||
/* fix x and y coords up later together with offset */
|
||||
t->image[face][level].x = *curOffset;
|
||||
t->image[face][level].y = 0;
|
||||
t->image[face][level].width =
|
||||
MIN2(size / texelBytes, blitWidth);
|
||||
t->image[face][level].height =
|
||||
(size / texelBytes) / t->image[face][level].width;
|
||||
} else {
|
||||
t->image[face][level].x = *curOffset % R300_BLIT_WIDTH_BYTES;
|
||||
t->image[face][level].y = *curOffset / R300_BLIT_WIDTH_BYTES;
|
||||
t->image[face][level].width =
|
||||
MIN2(size, R300_BLIT_WIDTH_BYTES);
|
||||
t->image[face][level].height = size / t->image[face][level].width;
|
||||
}
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr,
|
||||
"level %d, face %d: %dx%d x=%d y=%d w=%d h=%d size=%d at %d\n",
|
||||
level, face, texImage->Width, texImage->Height,
|
||||
t->image[face][level].x, t->image[face][level].y,
|
||||
t->image[face][level].width, t->image[face][level].height,
|
||||
size, *curOffset);
|
||||
|
||||
*curOffset += size;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This function computes the number of bytes of storage needed for
|
||||
* the given texture object (all mipmap levels, all cube faces).
|
||||
* The \c image[face][level].x/y/width/height parameters for upload/blitting
|
||||
* are computed here. \c filter, \c format, etc. will be set here
|
||||
* too.
|
||||
* Compute the cached hardware register values for the given texture object.
|
||||
*
|
||||
* \param rmesa Context pointer
|
||||
* \param tObj GL texture object whose images are to be posted to
|
||||
* hardware state.
|
||||
* \param t the r300 texture object
|
||||
*/
|
||||
static void r300SetTexImages(r300ContextPtr rmesa,
|
||||
struct gl_texture_object *tObj)
|
||||
static void setup_hardware_state(r300ContextPtr rmesa, r300TexObj *t)
|
||||
{
|
||||
r300TexObjPtr t = (r300TexObjPtr) tObj->DriverData;
|
||||
const struct gl_texture_image *baseImage =
|
||||
tObj->Image[0][tObj->BaseLevel];
|
||||
GLint curOffset;
|
||||
GLint i, texelBytes;
|
||||
GLint numLevels;
|
||||
GLint log2Width, log2Height, log2Depth;
|
||||
const struct gl_texture_image *firstImage =
|
||||
t->base.Image[0][t->mt->firstLevel];
|
||||
|
||||
/* Set the hardware texture format
|
||||
*/
|
||||
if (!t->image_override
|
||||
&& VALID_FORMAT(baseImage->TexFormat->MesaFormat)) {
|
||||
if (baseImage->TexFormat->BaseFormat == GL_DEPTH_COMPONENT) {
|
||||
r300SetDepthTexMode(tObj);
|
||||
&& VALID_FORMAT(firstImage->TexFormat->MesaFormat)) {
|
||||
if (firstImage->TexFormat->BaseFormat == GL_DEPTH_COMPONENT) {
|
||||
r300SetDepthTexMode(&t->base);
|
||||
} else {
|
||||
t->format = tx_table[baseImage->TexFormat->MesaFormat].format;
|
||||
t->format = tx_table[firstImage->TexFormat->MesaFormat].format;
|
||||
}
|
||||
|
||||
t->filter |= tx_table[baseImage->TexFormat->MesaFormat].filter;
|
||||
t->filter |= tx_table[firstImage->TexFormat->MesaFormat].filter;
|
||||
} else if (!t->image_override) {
|
||||
_mesa_problem(NULL, "unexpected texture format in %s",
|
||||
__FUNCTION__);
|
||||
return;
|
||||
}
|
||||
|
||||
texelBytes = baseImage->TexFormat->TexelBytes;
|
||||
|
||||
/* Compute which mipmap levels we really want to send to the hardware.
|
||||
*/
|
||||
driCalculateTextureFirstLastLevel((driTextureObject *) t);
|
||||
log2Width = tObj->Image[0][t->base.firstLevel]->WidthLog2;
|
||||
log2Height = tObj->Image[0][t->base.firstLevel]->HeightLog2;
|
||||
log2Depth = tObj->Image[0][t->base.firstLevel]->DepthLog2;
|
||||
|
||||
numLevels = t->base.lastLevel - t->base.firstLevel + 1;
|
||||
|
||||
assert(numLevels <= RADEON_MAX_TEXTURE_LEVELS);
|
||||
|
||||
/* Calculate mipmap offsets and dimensions for blitting (uploading)
|
||||
* The idea is that we lay out the mipmap levels within a block of
|
||||
* memory organized as a rectangle of width BLIT_WIDTH_BYTES.
|
||||
*/
|
||||
t->tile_bits = 0;
|
||||
|
||||
/* figure out if this texture is suitable for tiling. */
|
||||
#if 0 /* Disabled for now */
|
||||
if (texelBytes) {
|
||||
if ((tObj->Target != GL_TEXTURE_RECTANGLE_NV) &&
|
||||
/* texrect might be able to use micro tiling too in theory? */
|
||||
(baseImage->Height > 1)) {
|
||||
|
||||
/* allow 32 (bytes) x 1 mip (which will use two times the space
|
||||
the non-tiled version would use) max if base texture is large enough */
|
||||
if ((numLevels == 1) ||
|
||||
(((baseImage->Width * texelBytes /
|
||||
baseImage->Height) <= 32)
|
||||
&& (baseImage->Width * texelBytes > 64))
|
||||
||
|
||||
((baseImage->Width * texelBytes /
|
||||
baseImage->Height) <= 16)) {
|
||||
t->tile_bits |= R300_TXO_MICRO_TILE;
|
||||
}
|
||||
}
|
||||
|
||||
if (tObj->Target != GL_TEXTURE_RECTANGLE_NV) {
|
||||
/* we can set macro tiling even for small textures, they will be untiled anyway */
|
||||
t->tile_bits |= R300_TXO_MACRO_TILE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
curOffset = 0;
|
||||
|
||||
if (tObj->Target == GL_TEXTURE_CUBE_MAP) {
|
||||
ASSERT(log2Width == log2Height);
|
||||
if (t->base.Target == GL_TEXTURE_CUBE_MAP)
|
||||
t->format |= R300_TX_FORMAT_CUBIC_MAP;
|
||||
if (t->base.Target == GL_TEXTURE_3D)
|
||||
t->format |= R300_TX_FORMAT_3D;
|
||||
|
||||
for(i = 0; i < numLevels; i++) {
|
||||
GLuint face;
|
||||
for(face = 0; face < 6; face++)
|
||||
compute_tex_image_offset(tObj, face, i, &curOffset);
|
||||
}
|
||||
} else {
|
||||
if (tObj->Target == GL_TEXTURE_3D)
|
||||
t->format |= R300_TX_FORMAT_3D;
|
||||
t->size = (((firstImage->Width - 1) << R300_TX_WIDTHMASK_SHIFT)
|
||||
| ((firstImage->Height - 1) << R300_TX_HEIGHTMASK_SHIFT))
|
||||
| ((t->mt->lastLevel - t->mt->firstLevel) << R300_TX_MAX_MIP_LEVEL_SHIFT);
|
||||
|
||||
for (i = 0; i < numLevels; i++)
|
||||
compute_tex_image_offset(tObj, 0, i, &curOffset);
|
||||
}
|
||||
|
||||
/* Align the total size of texture memory block.
|
||||
*/
|
||||
t->base.totalSize =
|
||||
(curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;
|
||||
|
||||
t->size =
|
||||
(((tObj->Image[0][t->base.firstLevel]->Width -
|
||||
1) << R300_TX_WIDTHMASK_SHIFT)
|
||||
| ((tObj->Image[0][t->base.firstLevel]->Height - 1) <<
|
||||
R300_TX_HEIGHTMASK_SHIFT)
|
||||
| ((tObj->Image[0][t->base.firstLevel]->DepthLog2) <<
|
||||
R300_TX_DEPTHMASK_SHIFT))
|
||||
| ((numLevels - 1) << R300_TX_MAX_MIP_LEVEL_SHIFT);
|
||||
|
||||
t->pitch = 0;
|
||||
|
||||
/* Only need to round to nearest 32 for textures, but the blitter
|
||||
* requires 64-byte aligned pitches, and we may/may not need the
|
||||
* blitter. NPOT only!
|
||||
*/
|
||||
if (baseImage->IsCompressed) {
|
||||
t->pitch |=
|
||||
(tObj->Image[0][t->base.firstLevel]->Width + 63) & ~(63);
|
||||
} else if (tObj->Target == GL_TEXTURE_RECTANGLE_NV) {
|
||||
unsigned int align = (64 / texelBytes) - 1;
|
||||
t->pitch |= ((tObj->Image[0][t->base.firstLevel]->Width *
|
||||
texelBytes) + 63) & ~(63);
|
||||
if (t->base.Target == GL_TEXTURE_RECTANGLE_NV) {
|
||||
unsigned int align = (64 / t->mt->bpp) - 1;
|
||||
t->size |= R300_TX_SIZE_TXPITCH_EN;
|
||||
if (!t->image_override)
|
||||
t->pitch_reg =
|
||||
(((tObj->Image[0][t->base.firstLevel]->Width) +
|
||||
align) & ~align) - 1;
|
||||
} else {
|
||||
t->pitch |=
|
||||
((tObj->Image[0][t->base.firstLevel]->Width *
|
||||
texelBytes) + 63) & ~(63);
|
||||
t->pitch_reg = ((firstImage->Width + align) & ~align) - 1;
|
||||
}
|
||||
|
||||
if (rmesa->radeon.radeonScreen->chip_family >= CHIP_FAMILY_RV515) {
|
||||
if (tObj->Image[0][t->base.firstLevel]->Width > 2048)
|
||||
if (firstImage->Width > 2048)
|
||||
t->pitch_reg |= R500_TXWIDTH_BIT11;
|
||||
if (tObj->Image[0][t->base.firstLevel]->Height > 2048)
|
||||
if (firstImage->Height > 2048)
|
||||
t->pitch_reg |= R500_TXHEIGHT_BIT11;
|
||||
}
|
||||
}
|
||||
|
||||
/* ================================================================
|
||||
* Texture unit state management
|
||||
|
||||
static void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
|
||||
GLuint numrows, GLuint rowsize)
|
||||
{
|
||||
assert(rowsize <= dststride);
|
||||
assert(rowsize <= srcstride);
|
||||
|
||||
if (rowsize == srcstride && rowsize == dststride) {
|
||||
memcpy(dst, src, numrows*rowsize);
|
||||
} else {
|
||||
GLuint i;
|
||||
for(i = 0; i < numrows; ++i) {
|
||||
memcpy(dst, src, rowsize);
|
||||
dst += dststride;
|
||||
src += srcstride;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Ensure that the given image is stored in the given miptree from now on.
|
||||
*/
|
||||
|
||||
static GLboolean r300EnableTexture2D(GLcontext * ctx, int unit)
|
||||
static void migrate_image_to_miptree(r300_mipmap_tree *mt, r300_texture_image *image, int face, int level)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
|
||||
struct gl_texture_object *tObj = texUnit->_Current;
|
||||
r300TexObjPtr t = (r300TexObjPtr) tObj->DriverData;
|
||||
r300_mipmap_level *dstlvl = &mt->levels[level - mt->firstLevel];
|
||||
unsigned char *dest;
|
||||
|
||||
ASSERT(tObj->Target == GL_TEXTURE_2D || tObj->Target == GL_TEXTURE_1D);
|
||||
assert(image->mt != mt);
|
||||
assert(dstlvl->width == image->base.Width);
|
||||
assert(dstlvl->height == image->base.Height);
|
||||
assert(dstlvl->depth == image->base.Depth);
|
||||
|
||||
if (t->base.dirty_images[0]) {
|
||||
R300_FIREVERTICES(rmesa);
|
||||
radeon_bo_map(mt->bo, GL_TRUE);
|
||||
dest = mt->bo->ptr + dstlvl->faces[face].offset;
|
||||
|
||||
r300SetTexImages(rmesa, tObj);
|
||||
r300UploadTexImages(rmesa, (r300TexObjPtr) tObj->DriverData, 0);
|
||||
if (!t->base.memBlock && !t->image_override)
|
||||
return GL_FALSE;
|
||||
if (image->mt) {
|
||||
/* Format etc. should match, so we really just need a memcpy().
|
||||
* In fact, that memcpy() could be done by the hardware in many
|
||||
* cases, provided that we have a proper memory manager.
|
||||
*/
|
||||
r300_mipmap_level *srclvl = &image->mt->levels[image->mtlevel];
|
||||
|
||||
assert(srclvl->size == dstlvl->size);
|
||||
assert(srclvl->rowstride == dstlvl->rowstride);
|
||||
|
||||
radeon_bo_map(image->mt->bo, GL_FALSE);
|
||||
memcpy(dest,
|
||||
image->mt->bo->ptr + srclvl->faces[face].offset,
|
||||
dstlvl->size);
|
||||
radeon_bo_unmap(image->mt->bo);
|
||||
|
||||
r300_miptree_unreference(image->mt);
|
||||
} else {
|
||||
uint srcrowstride = image->base.Width * image->base.TexFormat->TexelBytes;
|
||||
|
||||
if (mt->tilebits)
|
||||
WARN_ONCE("%s: tiling not supported yet", __FUNCTION__);
|
||||
|
||||
copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride,
|
||||
image->base.Height * image->base.Depth, srcrowstride);
|
||||
|
||||
_mesa_free_texmemory(image->base.Data);
|
||||
image->base.Data = 0;
|
||||
}
|
||||
|
||||
return GL_TRUE;
|
||||
radeon_bo_unmap(mt->bo);
|
||||
|
||||
image->mt = mt;
|
||||
image->mtface = face;
|
||||
image->mtlevel = level;
|
||||
r300_miptree_reference(image->mt);
|
||||
}
|
||||
|
||||
static GLboolean r300EnableTexture3D(GLcontext * ctx, int unit)
|
||||
|
||||
/**
|
||||
* Ensure the given texture is ready for rendering.
|
||||
*
|
||||
* Mostly this means populating the texture object's mipmap tree.
|
||||
*/
|
||||
static GLboolean r300_validate_texture(GLcontext * ctx, struct gl_texture_object *texObj)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
|
||||
struct gl_texture_object *tObj = texUnit->_Current;
|
||||
r300TexObjPtr t = (r300TexObjPtr) tObj->DriverData;
|
||||
r300TexObj *t = r300_tex_obj(texObj);
|
||||
r300_texture_image *baseimage = get_r300_texture_image(texObj->Image[0][texObj->BaseLevel]);
|
||||
int face, level;
|
||||
|
||||
ASSERT(tObj->Target == GL_TEXTURE_3D);
|
||||
if (t->validated)
|
||||
return GL_TRUE;
|
||||
|
||||
/* r300 does not support mipmaps for 3D textures. */
|
||||
if ((tObj->MinFilter != GL_NEAREST) && (tObj->MinFilter != GL_LINEAR)) {
|
||||
return GL_FALSE;
|
||||
}
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "%s: Validating texture %p now\n", __FUNCTION__, texObj);
|
||||
|
||||
if (t->base.dirty_images[0]) {
|
||||
R300_FIREVERTICES(rmesa);
|
||||
r300SetTexImages(rmesa, tObj);
|
||||
r300UploadTexImages(rmesa, (r300TexObjPtr) tObj->DriverData, 0);
|
||||
if (!t->base.memBlock)
|
||||
return GL_FALSE;
|
||||
}
|
||||
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
||||
static GLboolean r300EnableTextureCube(GLcontext * ctx, int unit)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
|
||||
struct gl_texture_object *tObj = texUnit->_Current;
|
||||
r300TexObjPtr t = (r300TexObjPtr) tObj->DriverData;
|
||||
GLuint face;
|
||||
|
||||
ASSERT(tObj->Target == GL_TEXTURE_CUBE_MAP);
|
||||
|
||||
if (t->base.dirty_images[0] || t->base.dirty_images[1] ||
|
||||
t->base.dirty_images[2] || t->base.dirty_images[3] ||
|
||||
t->base.dirty_images[4] || t->base.dirty_images[5]) {
|
||||
/* flush */
|
||||
R300_FIREVERTICES(rmesa);
|
||||
/* layout memory space, once for all faces */
|
||||
r300SetTexImages(rmesa, tObj);
|
||||
}
|
||||
|
||||
/* upload (per face) */
|
||||
for (face = 0; face < 6; face++) {
|
||||
if (t->base.dirty_images[face]) {
|
||||
r300UploadTexImages(rmesa,
|
||||
(r300TexObjPtr) tObj->DriverData,
|
||||
face);
|
||||
}
|
||||
}
|
||||
|
||||
if (!t->base.memBlock) {
|
||||
/* texmem alloc failed, use s/w fallback */
|
||||
return GL_FALSE;
|
||||
}
|
||||
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
||||
static GLboolean r300EnableTextureRect(GLcontext * ctx, int unit)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
|
||||
struct gl_texture_object *tObj = texUnit->_Current;
|
||||
r300TexObjPtr t = (r300TexObjPtr) tObj->DriverData;
|
||||
|
||||
ASSERT(tObj->Target == GL_TEXTURE_RECTANGLE_NV);
|
||||
|
||||
if (t->base.dirty_images[0]) {
|
||||
R300_FIREVERTICES(rmesa);
|
||||
|
||||
r300SetTexImages(rmesa, tObj);
|
||||
r300UploadTexImages(rmesa, (r300TexObjPtr) tObj->DriverData, 0);
|
||||
if (!t->base.memBlock && !t->image_override &&
|
||||
!rmesa->prefer_gart_client_texturing)
|
||||
return GL_FALSE;
|
||||
}
|
||||
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
||||
static GLboolean r300UpdateTexture(GLcontext * ctx, int unit)
|
||||
{
|
||||
r300ContextPtr rmesa = R300_CONTEXT(ctx);
|
||||
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
|
||||
struct gl_texture_object *tObj = texUnit->_Current;
|
||||
r300TexObjPtr t = (r300TexObjPtr) tObj->DriverData;
|
||||
|
||||
/* Fallback if there's a texture border */
|
||||
if (tObj->Image[0][tObj->BaseLevel]->Border > 0)
|
||||
if (baseimage->base.Border > 0)
|
||||
return GL_FALSE;
|
||||
|
||||
/* Update state if this is a different texture object to last
|
||||
* time.
|
||||
/* Ensure a matching miptree exists.
|
||||
*
|
||||
* Differing mipmap trees can result when the app uses TexImage to
|
||||
* change texture dimensions.
|
||||
*
|
||||
* Prefer to use base image's miptree if it
|
||||
* exists, since that most likely contains more valid data (remember
|
||||
* that the base level is usually significantly larger than the rest
|
||||
* of the miptree, so cubemaps are the only possible exception).
|
||||
*/
|
||||
if (rmesa->state.texture.unit[unit].texobj != t) {
|
||||
if (rmesa->state.texture.unit[unit].texobj != NULL) {
|
||||
/* The old texture is no longer bound to this texture unit.
|
||||
* Mark it as such.
|
||||
*/
|
||||
|
||||
rmesa->state.texture.unit[unit].texobj->base.bound &=
|
||||
~(1 << unit);
|
||||
}
|
||||
|
||||
rmesa->state.texture.unit[unit].texobj = t;
|
||||
t->base.bound |= (1 << unit);
|
||||
driUpdateTextureLRU((driTextureObject *) t); /* XXX: should be locked! */
|
||||
if (baseimage->mt &&
|
||||
baseimage->mt != t->mt &&
|
||||
r300_miptree_matches_texture(baseimage->mt, &t->base)) {
|
||||
r300_miptree_unreference(t->mt);
|
||||
t->mt = baseimage->mt;
|
||||
r300_miptree_reference(t->mt);
|
||||
} else if (t->mt && !r300_miptree_matches_texture(t->mt, &t->base)) {
|
||||
r300_miptree_unreference(t->mt);
|
||||
t->mt = 0;
|
||||
}
|
||||
|
||||
return !t->border_fallback;
|
||||
if (!t->mt) {
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, " Allocate new miptree\n");
|
||||
r300_try_alloc_miptree(rmesa, t, &baseimage->base, 0, texObj->BaseLevel);
|
||||
if (!t->mt) {
|
||||
_mesa_problem(ctx, "r300_validate_texture failed to alloc miptree");
|
||||
return GL_FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure all images are stored in the single main miptree */
|
||||
for(face = 0; face < t->mt->faces; ++face) {
|
||||
for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level) {
|
||||
r300_texture_image *image = get_r300_texture_image(texObj->Image[face][level]);
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, " face %i, level %i... ", face, level);
|
||||
if (t->mt == image->mt) {
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "OK\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_TEXTURE)
|
||||
fprintf(stderr, "migrating\n");
|
||||
migrate_image_to_miptree(t->mt, image, face, level);
|
||||
}
|
||||
}
|
||||
|
||||
/* Configure the hardware registers (more precisely, the cached version
|
||||
* of the hardware registers). */
|
||||
setup_hardware_state(rmesa, t);
|
||||
|
||||
t->validated = GL_TRUE;
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Ensure all enabled and complete textures are uploaded.
|
||||
*/
|
||||
void r300ValidateTextures(GLcontext * ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ctx->Const.MaxTextureImageUnits; ++i) {
|
||||
if (!ctx->Texture.Unit[i]._ReallyEnabled)
|
||||
continue;
|
||||
|
||||
if (!r300_validate_texture(ctx, ctx->Texture.Unit[i]._Current)) {
|
||||
_mesa_warning(ctx,
|
||||
"failed to validate texture for unit %d.\n",
|
||||
i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void r300SetTexOffset(__DRIcontext * pDRICtx, GLint texname,
|
||||
|
|
@ -591,20 +420,18 @@ void r300SetTexOffset(__DRIcontext * pDRICtx, GLint texname,
|
|||
r300ContextPtr rmesa = pDRICtx->driverPrivate;
|
||||
struct gl_texture_object *tObj =
|
||||
_mesa_lookup_texture(rmesa->radeon.glCtx, texname);
|
||||
r300TexObjPtr t;
|
||||
r300TexObjPtr t = r300_tex_obj(tObj);
|
||||
uint32_t pitch_val;
|
||||
|
||||
if (!tObj)
|
||||
return;
|
||||
|
||||
t = (r300TexObjPtr) tObj->DriverData;
|
||||
|
||||
t->image_override = GL_TRUE;
|
||||
|
||||
if (!offset)
|
||||
return;
|
||||
|
||||
t->offset = offset;
|
||||
t->override_offset = offset;
|
||||
t->pitch_reg &= (1 << 13) -1;
|
||||
pitch_val = pitch;
|
||||
|
||||
|
|
@ -630,39 +457,3 @@ void r300SetTexOffset(__DRIcontext * pDRICtx, GLint texname,
|
|||
|
||||
t->pitch_reg |= pitch_val;
|
||||
}
|
||||
|
||||
static GLboolean r300UpdateTextureUnit(GLcontext * ctx, int unit)
|
||||
{
|
||||
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
|
||||
|
||||
if (texUnit->_ReallyEnabled & (TEXTURE_RECT_BIT)) {
|
||||
return (r300EnableTextureRect(ctx, unit) &&
|
||||
r300UpdateTexture(ctx, unit));
|
||||
} else if (texUnit->_ReallyEnabled & (TEXTURE_1D_BIT | TEXTURE_2D_BIT)) {
|
||||
return (r300EnableTexture2D(ctx, unit) &&
|
||||
r300UpdateTexture(ctx, unit));
|
||||
} else if (texUnit->_ReallyEnabled & (TEXTURE_3D_BIT)) {
|
||||
return (r300EnableTexture3D(ctx, unit) &&
|
||||
r300UpdateTexture(ctx, unit));
|
||||
} else if (texUnit->_ReallyEnabled & (TEXTURE_CUBE_BIT)) {
|
||||
return (r300EnableTextureCube(ctx, unit) &&
|
||||
r300UpdateTexture(ctx, unit));
|
||||
} else if (texUnit->_ReallyEnabled) {
|
||||
return GL_FALSE;
|
||||
} else {
|
||||
return GL_TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
void r300UpdateTextureState(GLcontext * ctx)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (!r300UpdateTextureUnit(ctx, i)) {
|
||||
_mesa_warning(ctx,
|
||||
"failed to update texture state for unit %d.\n",
|
||||
i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "drivers/common/driverfuncs.h"
|
||||
#include "swrast/swrast.h"
|
||||
|
||||
#include "radeon_buffer.h"
|
||||
#include "radeon_screen.h"
|
||||
#include "radeon_ioctl.h"
|
||||
#include "radeon_macros.h"
|
||||
|
|
@ -57,6 +58,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "utils.h"
|
||||
#include "vblank.h"
|
||||
#include "xmlpool.h" /* for symbolic values of enum-type options */
|
||||
#include "drirenderbuffer.h"
|
||||
|
||||
#define DRIVER_DATE "20060815"
|
||||
|
||||
|
|
@ -258,6 +260,54 @@ void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
radeon_make_renderbuffer_current(radeonContextPtr radeon,
|
||||
GLframebuffer *draw)
|
||||
{
|
||||
int size = 4096*4096*4;
|
||||
/* if radeon->fake */
|
||||
struct radeon_renderbuffer *rb;
|
||||
|
||||
if ((rb = (void *)draw->Attachment[BUFFER_FRONT_LEFT].Renderbuffer)) {
|
||||
|
||||
if (!rb->bo) {
|
||||
rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
||||
radeon->radeonScreen->frontOffset +
|
||||
radeon->radeonScreen->fbLocation,
|
||||
size,
|
||||
4096,
|
||||
0);
|
||||
}
|
||||
rb->cpp = radeon->radeonScreen->cpp;
|
||||
rb->pitch = radeon->radeonScreen->frontPitch;
|
||||
}
|
||||
if ((rb = (void *)draw->Attachment[BUFFER_BACK_LEFT].Renderbuffer)) {
|
||||
if (!rb->bo) {
|
||||
rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
||||
radeon->radeonScreen->backOffset +
|
||||
radeon->radeonScreen->fbLocation,
|
||||
size,
|
||||
4096,
|
||||
0);
|
||||
}
|
||||
rb->cpp = radeon->radeonScreen->cpp;
|
||||
rb->pitch = radeon->radeonScreen->backPitch;
|
||||
}
|
||||
if ((rb = (void *)draw->Attachment[BUFFER_DEPTH].Renderbuffer)) {
|
||||
if (!rb->bo) {
|
||||
rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
||||
radeon->radeonScreen->depthOffset +
|
||||
radeon->radeonScreen->fbLocation,
|
||||
size,
|
||||
4096,
|
||||
0);
|
||||
}
|
||||
rb->cpp = radeon->radeonScreen->cpp;
|
||||
rb->pitch = radeon->radeonScreen->depthPitch;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Force the context `c' to be the current context and associate with it
|
||||
* buffer `b'.
|
||||
*/
|
||||
|
|
@ -265,51 +315,57 @@ GLboolean radeonMakeCurrent(__DRIcontextPrivate * driContextPriv,
|
|||
__DRIdrawablePrivate * driDrawPriv,
|
||||
__DRIdrawablePrivate * driReadPriv)
|
||||
{
|
||||
if (driContextPriv) {
|
||||
radeonContextPtr radeon =
|
||||
(radeonContextPtr) driContextPriv->driverPrivate;
|
||||
radeonContextPtr radeon;
|
||||
GLframebuffer *dfb, *rfb;
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_DRI)
|
||||
fprintf(stderr, "%s ctx %p\n", __FUNCTION__,
|
||||
radeon->glCtx);
|
||||
|
||||
if (radeon->dri.drawable != driDrawPriv) {
|
||||
if (driDrawPriv->swap_interval == (unsigned)-1) {
|
||||
driDrawPriv->vblFlags =
|
||||
(radeon->radeonScreen->irq != 0)
|
||||
? driGetDefaultVBlankFlags(&radeon->
|
||||
optionCache)
|
||||
: VBLANK_FLAG_NO_IRQ;
|
||||
|
||||
driDrawableInitVBlank(driDrawPriv);
|
||||
}
|
||||
}
|
||||
|
||||
radeon->dri.readable = driReadPriv;
|
||||
|
||||
if (radeon->dri.drawable != driDrawPriv ||
|
||||
radeon->lastStamp != driDrawPriv->lastStamp) {
|
||||
radeon->dri.drawable = driDrawPriv;
|
||||
|
||||
radeonSetCliprects(radeon);
|
||||
r300UpdateViewportOffset(radeon->glCtx);
|
||||
}
|
||||
|
||||
_mesa_make_current(radeon->glCtx,
|
||||
(GLframebuffer *) driDrawPriv->
|
||||
driverPrivate,
|
||||
(GLframebuffer *) driReadPriv->
|
||||
driverPrivate);
|
||||
|
||||
_mesa_update_state(radeon->glCtx);
|
||||
|
||||
radeonUpdatePageFlipping(radeon);
|
||||
} else {
|
||||
if (!driContextPriv) {
|
||||
if (RADEON_DEBUG & DEBUG_DRI)
|
||||
fprintf(stderr, "%s ctx is null\n", __FUNCTION__);
|
||||
_mesa_make_current(0, 0, 0);
|
||||
_mesa_make_current(NULL, NULL, NULL);
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
||||
radeon = (radeonContextPtr) driContextPriv->driverPrivate;
|
||||
dfb = driDrawPriv->driverPrivate;
|
||||
rfb = driReadPriv->driverPrivate;
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_DRI)
|
||||
fprintf(stderr, "%s ctx %p\n", __FUNCTION__, radeon->glCtx);
|
||||
|
||||
driUpdateFramebufferSize(radeon->glCtx, driDrawPriv);
|
||||
if (driReadPriv != driDrawPriv)
|
||||
driUpdateFramebufferSize(radeon->glCtx, driReadPriv);
|
||||
|
||||
radeon_make_renderbuffer_current(radeon, dfb);
|
||||
|
||||
_mesa_make_current(radeon->glCtx, dfb, rfb);
|
||||
|
||||
if (radeon->dri.drawable != driDrawPriv) {
|
||||
if (driDrawPriv->swap_interval == (unsigned)-1) {
|
||||
driDrawPriv->vblFlags =
|
||||
(radeon->radeonScreen->irq != 0)
|
||||
? driGetDefaultVBlankFlags(&radeon->
|
||||
optionCache)
|
||||
: VBLANK_FLAG_NO_IRQ;
|
||||
|
||||
driDrawableInitVBlank(driDrawPriv);
|
||||
}
|
||||
}
|
||||
|
||||
radeon->dri.readable = driReadPriv;
|
||||
|
||||
if (radeon->dri.drawable != driDrawPriv ||
|
||||
radeon->lastStamp != driDrawPriv->lastStamp) {
|
||||
radeon->dri.drawable = driDrawPriv;
|
||||
|
||||
radeonSetCliprects(radeon);
|
||||
r300UpdateViewportOffset(radeon->glCtx);
|
||||
}
|
||||
|
||||
_mesa_update_state(radeon->glCtx);
|
||||
|
||||
radeonUpdatePageFlipping(radeon);
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_DRI)
|
||||
fprintf(stderr, "End %s\n", __FUNCTION__);
|
||||
return GL_TRUE;
|
||||
|
|
|
|||
|
|
@ -132,12 +132,13 @@ struct radeon_scissor_state {
|
|||
|
||||
struct radeon_colorbuffer_state {
|
||||
GLuint clear;
|
||||
GLint drawOffset, drawPitch;
|
||||
struct radeon_renderbuffer *rrb;
|
||||
};
|
||||
|
||||
struct radeon_state {
|
||||
struct radeon_colorbuffer_state color;
|
||||
struct radeon_scissor_state scissor;
|
||||
struct radeon_renderbuffer *depth_buffer;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "swrast/swrast.h"
|
||||
#include "r300_context.h"
|
||||
#include "radeon_ioctl.h"
|
||||
#include "radeon_buffer.h"
|
||||
#include "r300_ioctl.h"
|
||||
#include "r300_state.h"
|
||||
#include "radeon_reg.h"
|
||||
|
|
@ -171,7 +172,7 @@ void radeonCopyBuffer(__DRIdrawablePrivate * dPriv,
|
|||
assert(dPriv->driContextPriv->driverPrivate);
|
||||
|
||||
radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
|
||||
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_IOCTL) {
|
||||
fprintf(stderr, "\n%s( %p )\n\n", __FUNCTION__,
|
||||
(void *)radeon->glCtx);
|
||||
|
|
@ -261,6 +262,8 @@ void radeonPageFlip(__DRIdrawablePrivate * dPriv)
|
|||
GLint ret;
|
||||
GLboolean missed_target;
|
||||
__DRIscreenPrivate *psp = dPriv->driScreenPriv;
|
||||
GLframebuffer *fb = dPriv->driverPrivate;
|
||||
struct radeon_renderbuffer *rrb;
|
||||
|
||||
assert(dPriv);
|
||||
assert(dPriv->driContextPriv);
|
||||
|
|
@ -268,6 +271,8 @@ void radeonPageFlip(__DRIdrawablePrivate * dPriv)
|
|||
|
||||
radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
|
||||
|
||||
rrb = (void *)fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
|
||||
|
||||
if (RADEON_DEBUG & DEBUG_IOCTL) {
|
||||
fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
|
||||
radeon->sarea->pfCurrentPage);
|
||||
|
|
@ -315,32 +320,10 @@ void radeonPageFlip(__DRIdrawablePrivate * dPriv)
|
|||
radeon->swap_count++;
|
||||
(void)(*psp->systemTime->getUST) (&radeon->swap_ust);
|
||||
|
||||
driFlipRenderbuffers(radeon->glCtx->WinSysDrawBuffer,
|
||||
driFlipRenderbuffers(radeon->glCtx->WinSysDrawBuffer,
|
||||
radeon->sarea->pfCurrentPage);
|
||||
|
||||
if (radeon->sarea->pfCurrentPage == 1) {
|
||||
radeon->state.color.drawOffset = radeon->radeonScreen->frontOffset;
|
||||
radeon->state.color.drawPitch = radeon->radeonScreen->frontPitch;
|
||||
} else {
|
||||
radeon->state.color.drawOffset = radeon->radeonScreen->backOffset;
|
||||
radeon->state.color.drawPitch = radeon->radeonScreen->backPitch;
|
||||
}
|
||||
|
||||
if (IS_R300_CLASS(radeon->radeonScreen)) {
|
||||
r300ContextPtr r300 = (r300ContextPtr)radeon;
|
||||
R300_STATECHANGE(r300, cb);
|
||||
r300->hw.cb.cmd[R300_CB_OFFSET] = r300->radeon.state.color.drawOffset +
|
||||
r300->radeon.radeonScreen->fbLocation;
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] = r300->radeon.state.color.drawPitch;
|
||||
|
||||
if (r300->radeon.radeonScreen->cpp == 4)
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] |= R300_COLOR_FORMAT_ARGB8888;
|
||||
else
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] |= R300_COLOR_FORMAT_RGB565;
|
||||
|
||||
if (r300->radeon.sarea->tiling_enabled)
|
||||
r300->hw.cb.cmd[R300_CB_PITCH] |= R300_COLOR_TILE_ENABLE;
|
||||
}
|
||||
radeon->state.color.rrb = rrb;
|
||||
}
|
||||
|
||||
void radeonWaitForIdleLocked(radeonContextPtr radeon)
|
||||
|
|
@ -391,6 +374,7 @@ void radeonFinish(GLcontext * ctx)
|
|||
radeonEmitIrqLocked(radeon);
|
||||
UNLOCK_HARDWARE(radeon);
|
||||
radeonWaitIrq(radeon);
|
||||
} else
|
||||
} else {
|
||||
radeonWaitForIdle(radeon);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,11 +59,11 @@ int prevLockLine = 0;
|
|||
void radeonUpdatePageFlipping(radeonContextPtr rmesa)
|
||||
{
|
||||
int use_back;
|
||||
__DRIdrawablePrivate *const drawable = rmesa->dri.drawable;
|
||||
GLframebuffer *fb = drawable->driverPrivate;
|
||||
|
||||
rmesa->doPageFlip = rmesa->sarea->pfState;
|
||||
if (rmesa->glCtx->WinSysDrawBuffer) {
|
||||
driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer,
|
||||
rmesa->sarea->pfCurrentPage);
|
||||
r300UpdateDrawBuffer(rmesa->glCtx);
|
||||
}
|
||||
|
||||
|
|
@ -72,16 +72,12 @@ void radeonUpdatePageFlipping(radeonContextPtr rmesa)
|
|||
BUFFER_BACK_LEFT) : 1;
|
||||
use_back ^= (rmesa->sarea->pfCurrentPage == 1);
|
||||
|
||||
if (use_back) {
|
||||
rmesa->state.color.drawOffset =
|
||||
rmesa->radeonScreen->backOffset;
|
||||
rmesa->state.color.drawPitch = rmesa->radeonScreen->backPitch;
|
||||
} else {
|
||||
rmesa->state.color.drawOffset =
|
||||
rmesa->radeonScreen->frontOffset;
|
||||
rmesa->state.color.drawPitch =
|
||||
rmesa->radeonScreen->frontPitch;
|
||||
}
|
||||
if (use_back)
|
||||
rmesa->state.color.rrb = (void *)fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
|
||||
else
|
||||
rmesa->state.color.rrb = (void *)fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
|
||||
|
||||
rmesa->state.depth_buffer = (void *)fb->Attachment[BUFFER_DEPTH].Renderbuffer;
|
||||
}
|
||||
|
||||
/* Update the hardware state. This is called if another context has
|
||||
|
|
@ -98,7 +94,6 @@ void radeonGetLock(radeonContextPtr rmesa, GLuint flags)
|
|||
__DRIdrawablePrivate *const readable = rmesa->dri.readable;
|
||||
__DRIscreenPrivate *sPriv = rmesa->dri.screen;
|
||||
drm_radeon_sarea_t *sarea = rmesa->sarea;
|
||||
r300ContextPtr r300 = (r300ContextPtr) rmesa;
|
||||
|
||||
assert(drawable != NULL);
|
||||
|
||||
|
|
@ -125,12 +120,8 @@ void radeonGetLock(radeonContextPtr rmesa, GLuint flags)
|
|||
}
|
||||
|
||||
if (sarea->ctx_owner != rmesa->dri.hwContext) {
|
||||
int i;
|
||||
|
||||
sarea->ctx_owner = rmesa->dri.hwContext;
|
||||
for (i = 0; i < r300->nr_heaps; i++) {
|
||||
DRI_AGE_TEXTURES(r300->texture_heaps[i]);
|
||||
}
|
||||
radeon_bo_legacy_texture_age(rmesa->radeonScreen->bom);
|
||||
}
|
||||
|
||||
rmesa->lost_context = GL_TRUE;
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "r300_ioctl.h"
|
||||
#include "radeon_span.h"
|
||||
|
||||
#include "drirenderbuffer.h"
|
||||
#include "radeon_buffer.h"
|
||||
|
||||
#define DBG 0
|
||||
|
||||
|
|
@ -58,21 +58,21 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
* information.
|
||||
*/
|
||||
#define LOCAL_VARS \
|
||||
driRenderbuffer *drb = (driRenderbuffer *) rb; \
|
||||
const __DRIdrawablePrivate *dPriv = drb->dPriv; \
|
||||
struct radeon_renderbuffer *rrb = (void *) rb; \
|
||||
const __DRIdrawablePrivate *dPriv = rrb->dPriv; \
|
||||
const GLuint bottom = dPriv->h - 1; \
|
||||
GLubyte *buf = (GLubyte *) drb->flippedData \
|
||||
+ (dPriv->y * drb->flippedPitch + dPriv->x) * drb->cpp; \
|
||||
GLuint p; \
|
||||
(void) p;
|
||||
GLubyte *buf = (GLubyte *) ((char*)rrb->bo->ptr) \
|
||||
+ (dPriv->y * rrb->pitch + dPriv->x) * rrb->cpp; \
|
||||
GLuint p; \
|
||||
(void)p;
|
||||
|
||||
#define LOCAL_DEPTH_VARS \
|
||||
driRenderbuffer *drb = (driRenderbuffer *) rb; \
|
||||
const __DRIdrawablePrivate *dPriv = drb->dPriv; \
|
||||
struct radeon_renderbuffer *rrb = (void *) rb; \
|
||||
const __DRIdrawablePrivate *dPriv = rrb->dPriv; \
|
||||
const GLuint bottom = dPriv->h - 1; \
|
||||
GLuint xo = dPriv->x; \
|
||||
GLuint yo = dPriv->y; \
|
||||
GLubyte *buf = (GLubyte *) drb->Base.Data;
|
||||
GLubyte *buf = (GLubyte *) rrb->bo->ptr;
|
||||
|
||||
#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
|
||||
|
||||
|
|
@ -93,7 +93,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
|
||||
#define TAG(x) radeon##x##_RGB565
|
||||
#define TAG2(x,y) radeon##x##_RGB565##y
|
||||
#define GET_PTR(X,Y) (buf + ((Y) * drb->flippedPitch + (X)) * 2)
|
||||
#define GET_PTR(X,Y) (buf + ((Y) * rrb->pitch + (X)) * 2)
|
||||
#include "spantmp2.h"
|
||||
|
||||
/* 32 bit, ARGB8888 color spanline and pixel functions
|
||||
|
|
@ -103,7 +103,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
|
||||
#define TAG(x) radeon##x##_ARGB8888
|
||||
#define TAG2(x,y) radeon##x##_ARGB8888##y
|
||||
#define GET_PTR(X,Y) (buf + ((Y) * drb->flippedPitch + (X)) * 4)
|
||||
#define GET_PTR(X,Y) (buf + ((Y) * rrb->pitch + (X)) * 4)
|
||||
#include "spantmp2.h"
|
||||
|
||||
/* ================================================================
|
||||
|
|
@ -120,10 +120,11 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
* too...
|
||||
*/
|
||||
|
||||
static GLuint radeon_mba_z32(const driRenderbuffer * drb, GLint x, GLint y)
|
||||
static GLuint radeon_mba_z32(const struct radeon_renderbuffer * rrb,
|
||||
GLint x, GLint y)
|
||||
{
|
||||
GLuint pitch = drb->pitch;
|
||||
if (drb->depthHasSurface) {
|
||||
GLuint pitch = rrb->pitch;
|
||||
if (rrb->depthHasSurface) {
|
||||
return 4 * (x + y * pitch);
|
||||
} else {
|
||||
GLuint ba, address = 0; /* a[0..1] = 0 */
|
||||
|
|
@ -148,10 +149,10 @@ static GLuint radeon_mba_z32(const driRenderbuffer * drb, GLint x, GLint y)
|
|||
}
|
||||
|
||||
static INLINE GLuint
|
||||
radeon_mba_z16(const driRenderbuffer * drb, GLint x, GLint y)
|
||||
radeon_mba_z16(const struct radeon_renderbuffer *rrb, GLint x, GLint y)
|
||||
{
|
||||
GLuint pitch = drb->pitch;
|
||||
if (drb->depthHasSurface) {
|
||||
GLuint pitch = rrb->pitch;
|
||||
if (rrb->depthHasSurface) {
|
||||
return 2 * (x + y * pitch);
|
||||
} else {
|
||||
GLuint ba, address = 0; /* a[0] = 0 */
|
||||
|
|
@ -175,10 +176,10 @@ radeon_mba_z16(const driRenderbuffer * drb, GLint x, GLint y)
|
|||
#define VALUE_TYPE GLushort
|
||||
|
||||
#define WRITE_DEPTH( _x, _y, d ) \
|
||||
*(GLushort *)(buf + radeon_mba_z16( drb, _x + xo, _y + yo )) = d;
|
||||
*(GLushort *)(buf + radeon_mba_z16( rrb, _x + xo, _y + yo )) = d;
|
||||
|
||||
#define READ_DEPTH( d, _x, _y ) \
|
||||
d = *(GLushort *)(buf + radeon_mba_z16( drb, _x + xo, _y + yo ));
|
||||
d = *(GLushort *)(buf + radeon_mba_z16( rrb, _x + xo, _y + yo ));
|
||||
|
||||
#define TAG(x) radeon##x##_z16
|
||||
#include "depthtmp.h"
|
||||
|
|
@ -193,7 +194,7 @@ radeon_mba_z16(const driRenderbuffer * drb, GLint x, GLint y)
|
|||
#ifdef COMPILE_R300
|
||||
#define WRITE_DEPTH( _x, _y, d ) \
|
||||
do { \
|
||||
GLuint offset = radeon_mba_z32( drb, _x + xo, _y + yo ); \
|
||||
GLuint offset = radeon_mba_z32( rrb, _x + xo, _y + yo ); \
|
||||
GLuint tmp = *(GLuint *)(buf + offset); \
|
||||
tmp &= 0x000000ff; \
|
||||
tmp |= ((d << 8) & 0xffffff00); \
|
||||
|
|
@ -202,7 +203,7 @@ do { \
|
|||
#else
|
||||
#define WRITE_DEPTH( _x, _y, d ) \
|
||||
do { \
|
||||
GLuint offset = radeon_mba_z32( drb, _x + xo, _y + yo ); \
|
||||
GLuint offset = radeon_mba_z32( rrb, _x + xo, _y + yo ); \
|
||||
GLuint tmp = *(GLuint *)(buf + offset); \
|
||||
tmp &= 0xff000000; \
|
||||
tmp |= ((d) & 0x00ffffff); \
|
||||
|
|
@ -213,12 +214,12 @@ do { \
|
|||
#ifdef COMPILE_R300
|
||||
#define READ_DEPTH( d, _x, _y ) \
|
||||
do { \
|
||||
d = (*(GLuint *)(buf + radeon_mba_z32( drb, _x + xo, \
|
||||
d = (*(GLuint *)(buf + radeon_mba_z32( rrb, _x + xo, \
|
||||
_y + yo )) & 0xffffff00) >> 8; \
|
||||
}while(0)
|
||||
#else
|
||||
#define READ_DEPTH( d, _x, _y ) \
|
||||
d = *(GLuint *)(buf + radeon_mba_z32( drb, _x + xo, \
|
||||
d = *(GLuint *)(buf + radeon_mba_z32( rrb, _x + xo, \
|
||||
_y + yo )) & 0x00ffffff;
|
||||
#endif
|
||||
|
||||
|
|
@ -234,7 +235,7 @@ do { \
|
|||
#ifdef COMPILE_R300
|
||||
#define WRITE_STENCIL( _x, _y, d ) \
|
||||
do { \
|
||||
GLuint offset = radeon_mba_z32( drb, _x + xo, _y + yo ); \
|
||||
GLuint offset = radeon_mba_z32( rrb, _x + xo, _y + yo ); \
|
||||
GLuint tmp = *(GLuint *)(buf + offset); \
|
||||
tmp &= 0xffffff00; \
|
||||
tmp |= (d) & 0xff; \
|
||||
|
|
@ -243,7 +244,7 @@ do { \
|
|||
#else
|
||||
#define WRITE_STENCIL( _x, _y, d ) \
|
||||
do { \
|
||||
GLuint offset = radeon_mba_z32( drb, _x + xo, _y + yo ); \
|
||||
GLuint offset = radeon_mba_z32( rrb, _x + xo, _y + yo ); \
|
||||
GLuint tmp = *(GLuint *)(buf + offset); \
|
||||
tmp &= 0x00ffffff; \
|
||||
tmp |= (((d) & 0xff) << 24); \
|
||||
|
|
@ -254,14 +255,14 @@ do { \
|
|||
#ifdef COMPILE_R300
|
||||
#define READ_STENCIL( d, _x, _y ) \
|
||||
do { \
|
||||
GLuint offset = radeon_mba_z32( drb, _x + xo, _y + yo ); \
|
||||
GLuint offset = radeon_mba_z32( rrb, _x + xo, _y + yo ); \
|
||||
GLuint tmp = *(GLuint *)(buf + offset); \
|
||||
d = tmp & 0x000000ff; \
|
||||
} while (0)
|
||||
#else
|
||||
#define READ_STENCIL( d, _x, _y ) \
|
||||
do { \
|
||||
GLuint offset = radeon_mba_z32( drb, _x + xo, _y + yo ); \
|
||||
GLuint offset = radeon_mba_z32( rrb, _x + xo, _y + yo ); \
|
||||
GLuint tmp = *(GLuint *)(buf + offset); \
|
||||
d = (tmp & 0xff000000) >> 24; \
|
||||
} while (0)
|
||||
|
|
@ -270,6 +271,24 @@ do { \
|
|||
#define TAG(x) radeon##x##_z24_s8
|
||||
#include "stenciltmp.h"
|
||||
|
||||
static void map_buffer(struct gl_renderbuffer *rb, GLboolean write)
|
||||
{
|
||||
struct radeon_renderbuffer *rrb = (void*)rb;
|
||||
|
||||
if (rrb->bo) {
|
||||
radeon_bo_map(rrb->bo, write);
|
||||
}
|
||||
}
|
||||
|
||||
static void unmap_buffer(struct gl_renderbuffer *rb)
|
||||
{
|
||||
struct radeon_renderbuffer *rrb = (void*)rb;
|
||||
|
||||
if (rrb->bo) {
|
||||
radeon_bo_unmap(rrb->bo);
|
||||
}
|
||||
}
|
||||
|
||||
/* Move locking out to get reasonable span performance (10x better
|
||||
* than doing this in HW_LOCK above). WaitForIdle() is the main
|
||||
* culprit.
|
||||
|
|
@ -278,45 +297,63 @@ do { \
|
|||
static void radeonSpanRenderStart(GLcontext * ctx)
|
||||
{
|
||||
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
|
||||
int i;
|
||||
#ifdef COMPILE_R300
|
||||
r300ContextPtr r300 = (r300ContextPtr) rmesa;
|
||||
R300_FIREVERTICES(r300);
|
||||
#else
|
||||
RADEON_FIREVERTICES(rmesa);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
|
||||
if (ctx->Texture.Unit[i]._ReallyEnabled)
|
||||
ctx->Driver.MapTexture(ctx, ctx->Texture.Unit[i]._Current);
|
||||
}
|
||||
|
||||
/* color draw buffers */
|
||||
for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
|
||||
map_buffer(ctx->DrawBuffer->_ColorDrawBuffers[i], GL_TRUE);
|
||||
}
|
||||
|
||||
map_buffer(ctx->ReadBuffer->_ColorReadBuffer, GL_FALSE);
|
||||
|
||||
if (ctx->DrawBuffer->_DepthBuffer) {
|
||||
map_buffer(ctx->DrawBuffer->_DepthBuffer->Wrapped, GL_TRUE);
|
||||
}
|
||||
if (ctx->DrawBuffer->_StencilBuffer)
|
||||
map_buffer(ctx->DrawBuffer->_StencilBuffer->Wrapped, GL_TRUE);
|
||||
|
||||
/* The locking and wait for idle should really only be needed in classic mode.
|
||||
* In a future memory manager based implementation, this should become
|
||||
* unnecessary due to the fact that mapping our buffers, textures, etc.
|
||||
* should implicitly wait for any previous rendering commands that must
|
||||
* be waited on. */
|
||||
LOCK_HARDWARE(rmesa);
|
||||
radeonWaitForIdleLocked(rmesa);
|
||||
|
||||
/* Read the first pixel in the frame buffer. This should
|
||||
* be a noop, right? In fact without this conform fails as reading
|
||||
* from the framebuffer sometimes produces old results -- the
|
||||
* on-card read cache gets mixed up and doesn't notice that the
|
||||
* framebuffer has been updated.
|
||||
*
|
||||
* Note that we should probably be reading some otherwise unused
|
||||
* region of VRAM, otherwise we might get incorrect results when
|
||||
* reading pixels from the top left of the screen.
|
||||
*
|
||||
* I found this problem on an R420 with glean's texCube test.
|
||||
* Note that the R200 span code also *writes* the first pixel in the
|
||||
* framebuffer, but I've found this to be unnecessary.
|
||||
* -- Nicolai Hähnle, June 2008
|
||||
*/
|
||||
{
|
||||
int p;
|
||||
driRenderbuffer *drb =
|
||||
(driRenderbuffer *) ctx->WinSysDrawBuffer->_ColorDrawBuffers[0];
|
||||
volatile int *buf =
|
||||
(volatile int *)(rmesa->dri.screen->pFB + drb->offset);
|
||||
p = *buf;
|
||||
}
|
||||
}
|
||||
|
||||
static void radeonSpanRenderFinish(GLcontext * ctx)
|
||||
{
|
||||
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
|
||||
int i;
|
||||
_swrast_flush(ctx);
|
||||
UNLOCK_HARDWARE(rmesa);
|
||||
|
||||
for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
|
||||
if (ctx->Texture.Unit[i]._ReallyEnabled)
|
||||
ctx->Driver.UnmapTexture(ctx, ctx->Texture.Unit[i]._Current);
|
||||
}
|
||||
|
||||
/* color draw buffers */
|
||||
for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++)
|
||||
unmap_buffer(ctx->DrawBuffer->_ColorDrawBuffers[i]);
|
||||
|
||||
unmap_buffer(ctx->ReadBuffer->_ColorReadBuffer);
|
||||
|
||||
if (ctx->DrawBuffer->_DepthBuffer)
|
||||
unmap_buffer(ctx->DrawBuffer->_DepthBuffer->Wrapped);
|
||||
if (ctx->DrawBuffer->_StencilBuffer)
|
||||
unmap_buffer(ctx->DrawBuffer->_StencilBuffer->Wrapped);
|
||||
}
|
||||
|
||||
void radeonInitSpanFuncs(GLcontext * ctx)
|
||||
|
|
@ -330,20 +367,17 @@ void radeonInitSpanFuncs(GLcontext * ctx)
|
|||
/**
|
||||
* Plug in the Get/Put routines for the given driRenderbuffer.
|
||||
*/
|
||||
void radeonSetSpanFunctions(driRenderbuffer * drb, const GLvisual * vis)
|
||||
void radeonSetSpanFunctions(struct radeon_renderbuffer *rrb)
|
||||
{
|
||||
if (drb->Base.InternalFormat == GL_RGBA) {
|
||||
if (vis->redBits == 5 && vis->greenBits == 6
|
||||
&& vis->blueBits == 5) {
|
||||
radeonInitPointers_RGB565(&drb->Base);
|
||||
} else {
|
||||
radeonInitPointers_ARGB8888(&drb->Base);
|
||||
}
|
||||
} else if (drb->Base.InternalFormat == GL_DEPTH_COMPONENT16) {
|
||||
radeonInitDepthPointers_z16(&drb->Base);
|
||||
} else if (drb->Base.InternalFormat == GL_DEPTH_COMPONENT24) {
|
||||
radeonInitDepthPointers_z24_s8(&drb->Base);
|
||||
} else if (drb->Base.InternalFormat == GL_STENCIL_INDEX8_EXT) {
|
||||
radeonInitStencilPointers_z24_s8(&drb->Base);
|
||||
if (rrb->base.InternalFormat == GL_RGB5) {
|
||||
radeonInitPointers_RGB565(&rrb->base);
|
||||
} else if (rrb->base.InternalFormat == GL_RGBA8) {
|
||||
radeonInitPointers_ARGB8888(&rrb->base);
|
||||
} else if (rrb->base.InternalFormat == GL_DEPTH_COMPONENT16) {
|
||||
radeonInitDepthPointers_z16(&rrb->base);
|
||||
} else if (rrb->base.InternalFormat == GL_DEPTH_COMPONENT24) {
|
||||
radeonInitDepthPointers_z24_s8(&rrb->base);
|
||||
} else if (rrb->base.InternalFormat == GL_STENCIL_INDEX8_EXT) {
|
||||
radeonInitStencilPointers_z24_s8(&rrb->base);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -223,14 +223,6 @@ void radeonEnable(GLcontext* ctx, GLenum cap, GLboolean state)
|
|||
void radeonInitState(radeonContextPtr radeon)
|
||||
{
|
||||
radeon->Fallback = 0;
|
||||
|
||||
if (radeon->glCtx->Visual.doubleBufferMode && radeon->sarea->pfCurrentPage == 0) {
|
||||
radeon->state.color.drawOffset = radeon->radeonScreen->backOffset;
|
||||
radeon->state.color.drawPitch = radeon->radeonScreen->backPitch;
|
||||
} else {
|
||||
radeon->state.color.drawOffset = radeon->radeonScreen->frontOffset;
|
||||
radeon->state.color.drawPitch = radeon->radeonScreen->frontPitch;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
718
src/mesa/drivers/dri/radeon/radeon_bo_legacy.c
Normal file
718
src/mesa/drivers/dri/radeon/radeon_bo_legacy.c
Normal file
|
|
@ -0,0 +1,718 @@
|
|||
/*
|
||||
* Copyright © 2008 Nicolai Haehnle
|
||||
* Copyright © 2008 Dave Airlie
|
||||
* Copyright © 2008 Jérôme Glisse
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Aapo Tahkola <aet@rasterburn.org>
|
||||
* Nicolai Haehnle <prefect_@gmx.net>
|
||||
* Dave Airlie
|
||||
* Jérôme Glisse <glisse@freedesktop.org>
|
||||
*/
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include "xf86drm.h"
|
||||
#include "drm.h"
|
||||
#include "radeon_drm.h"
|
||||
#include "radeon_bo.h"
|
||||
#include "radeon_bo_legacy.h"
|
||||
#include "radeon_ioctl.h"
|
||||
#include "texmem.h"
|
||||
|
||||
struct bo_legacy {
|
||||
struct radeon_bo base;
|
||||
driTextureObject tobj_base;
|
||||
int map_count;
|
||||
uint32_t pending;
|
||||
int is_pending;
|
||||
int validated;
|
||||
int static_bo;
|
||||
int got_dri_texture_obj;
|
||||
int dirty;
|
||||
uint32_t offset;
|
||||
driTextureObject dri_texture_obj;
|
||||
void *ptr;
|
||||
struct bo_legacy *next, *prev;
|
||||
struct bo_legacy *pnext, *pprev;
|
||||
};
|
||||
|
||||
struct bo_manager_legacy {
|
||||
struct radeon_bo_manager base;
|
||||
unsigned nhandle;
|
||||
unsigned nfree_handles;
|
||||
unsigned cfree_handles;
|
||||
uint32_t current_age;
|
||||
struct bo_legacy bos;
|
||||
struct bo_legacy pending_bos;
|
||||
uint32_t fb_location;
|
||||
uint32_t texture_offset;
|
||||
unsigned dma_alloc_size;
|
||||
unsigned cpendings;
|
||||
driTextureObject texture_swapped;
|
||||
driTexHeap *texture_heap;
|
||||
struct radeon_screen *screen;
|
||||
unsigned *free_handles;
|
||||
};
|
||||
|
||||
static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
|
||||
{
|
||||
struct bo_legacy *bo_legacy;
|
||||
|
||||
bo_legacy = (struct bo_legacy*)((char*)t)-sizeof(struct radeon_bo);
|
||||
bo_legacy->got_dri_texture_obj = 0;
|
||||
bo_legacy->validated = 0;
|
||||
}
|
||||
|
||||
static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
*handle = 0;
|
||||
if (bom->nhandle == 0xFFFFFFFF) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if (bom->cfree_handles > 0) {
|
||||
tmp = bom->free_handles[--bom->cfree_handles];
|
||||
while (!bom->free_handles[bom->cfree_handles - 1]) {
|
||||
bom->cfree_handles--;
|
||||
if (bom->cfree_handles <= 0) {
|
||||
bom->cfree_handles = 0;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bom->cfree_handles = 0;
|
||||
tmp = bom->nhandle++;
|
||||
}
|
||||
assert(tmp);
|
||||
*handle = tmp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int legacy_free_handle(struct bo_manager_legacy *bom, uint32_t handle)
|
||||
{
|
||||
uint32_t *handles;
|
||||
|
||||
if (!handle) {
|
||||
return 0;
|
||||
}
|
||||
if (handle == (bom->nhandle - 1)) {
|
||||
int i;
|
||||
|
||||
bom->nhandle--;
|
||||
for (i = bom->cfree_handles - 1; i >= 0; i--) {
|
||||
if (bom->free_handles[i] == (bom->nhandle - 1)) {
|
||||
bom->nhandle--;
|
||||
bom->free_handles[i] = 0;
|
||||
}
|
||||
}
|
||||
while (!bom->free_handles[bom->cfree_handles - 1]) {
|
||||
bom->cfree_handles--;
|
||||
if (bom->cfree_handles <= 0) {
|
||||
bom->cfree_handles = 0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (bom->cfree_handles < bom->nfree_handles) {
|
||||
bom->free_handles[bom->cfree_handles++] = handle;
|
||||
return 0;
|
||||
}
|
||||
bom->nfree_handles += 0x100;
|
||||
handles = (uint32_t*)realloc(bom->free_handles, bom->nfree_handles * 4);
|
||||
if (handles == NULL) {
|
||||
bom->nfree_handles -= 0x100;
|
||||
return -ENOMEM;
|
||||
}
|
||||
bom->free_handles = handles;
|
||||
bom->free_handles[bom->cfree_handles++] = handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void legacy_get_current_age(struct bo_manager_legacy *boml)
|
||||
{
|
||||
drm_radeon_getparam_t gp;
|
||||
int r;
|
||||
|
||||
gp.param = RADEON_PARAM_LAST_CLEAR;
|
||||
gp.value = (int *)&boml->current_age;
|
||||
r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
|
||||
&gp, sizeof(gp));
|
||||
if (r) {
|
||||
fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static int legacy_is_pending(struct radeon_bo *bo)
|
||||
{
|
||||
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
||||
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
||||
|
||||
if (bo_legacy->is_pending <= 0) {
|
||||
bo_legacy->is_pending = 0;
|
||||
return 0;
|
||||
}
|
||||
if (boml->current_age >= bo_legacy->pending) {
|
||||
if (boml->pending_bos.pprev == bo_legacy) {
|
||||
boml->pending_bos.pprev = bo_legacy->pprev;
|
||||
}
|
||||
bo_legacy->pprev->pnext = bo_legacy->pnext;
|
||||
if (bo_legacy->pnext) {
|
||||
bo_legacy->pnext->pprev = bo_legacy->pprev;
|
||||
}
|
||||
while (bo_legacy->is_pending--) {
|
||||
radeon_bo_unref(bo);
|
||||
}
|
||||
bo_legacy->is_pending = 0;
|
||||
boml->cpendings--;
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int legacy_wait_pending(struct radeon_bo *bo)
|
||||
{
|
||||
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
||||
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
||||
|
||||
if (!bo_legacy->is_pending) {
|
||||
return 0;
|
||||
}
|
||||
/* FIXME: lockup and userspace busy looping that's all the folks */
|
||||
legacy_get_current_age(boml);
|
||||
while (legacy_is_pending(bo)) {
|
||||
usleep(10);
|
||||
legacy_get_current_age(boml);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void legacy_track_pending(struct bo_manager_legacy *boml)
|
||||
{
|
||||
struct bo_legacy *bo_legacy;
|
||||
struct bo_legacy *next;
|
||||
|
||||
legacy_get_current_age(boml);
|
||||
bo_legacy = boml->pending_bos.pnext;
|
||||
while (bo_legacy) {
|
||||
next = bo_legacy->pnext;
|
||||
if (legacy_is_pending(&(bo_legacy->base))) {
|
||||
}
|
||||
bo_legacy = next;
|
||||
}
|
||||
}
|
||||
|
||||
static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
|
||||
uint32_t size,
|
||||
uint32_t alignment,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct bo_legacy *bo_legacy;
|
||||
|
||||
bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
|
||||
if (bo_legacy == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
bo_legacy->base.bom = (struct radeon_bo_manager*)boml;
|
||||
bo_legacy->base.handle = 0;
|
||||
bo_legacy->base.size = size;
|
||||
bo_legacy->base.alignment = alignment;
|
||||
bo_legacy->base.flags = flags;
|
||||
bo_legacy->base.ptr = NULL;
|
||||
bo_legacy->map_count = 0;
|
||||
bo_legacy->next = NULL;
|
||||
bo_legacy->prev = NULL;
|
||||
bo_legacy->got_dri_texture_obj = 0;
|
||||
bo_legacy->pnext = NULL;
|
||||
bo_legacy->pprev = NULL;
|
||||
bo_legacy->next = boml->bos.next;
|
||||
bo_legacy->prev = &boml->bos;
|
||||
boml->bos.next = bo_legacy;
|
||||
if (bo_legacy->next) {
|
||||
bo_legacy->next->prev = bo_legacy;
|
||||
}
|
||||
return bo_legacy;
|
||||
}
|
||||
|
||||
static int bo_dma_alloc(struct radeon_bo *bo)
|
||||
{
|
||||
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
||||
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
||||
drm_radeon_mem_alloc_t alloc;
|
||||
unsigned size;
|
||||
int base_offset;
|
||||
int r;
|
||||
|
||||
/* align size on 4Kb */
|
||||
size = (((4 * 1024) - 1) + bo->size) & ~((4 * 1024) - 1);
|
||||
alloc.region = RADEON_MEM_REGION_GART;
|
||||
alloc.alignment = bo_legacy->base.alignment;
|
||||
alloc.size = size;
|
||||
alloc.region_offset = &base_offset;
|
||||
r = drmCommandWriteRead(bo->bom->fd,
|
||||
DRM_RADEON_ALLOC,
|
||||
&alloc,
|
||||
sizeof(alloc));
|
||||
if (r) {
|
||||
/* ptr is set to NULL if dma allocation failed */
|
||||
bo_legacy->ptr = NULL;
|
||||
exit(0);
|
||||
return r;
|
||||
}
|
||||
bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
|
||||
bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
|
||||
bo->size = size;
|
||||
boml->dma_alloc_size += size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bo_dma_free(struct radeon_bo *bo)
|
||||
{
|
||||
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
||||
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
||||
drm_radeon_mem_free_t memfree;
|
||||
int r;
|
||||
|
||||
if (bo_legacy->ptr == NULL) {
|
||||
/* ptr is set to NULL if dma allocation failed */
|
||||
return 0;
|
||||
}
|
||||
legacy_get_current_age(boml);
|
||||
memfree.region = RADEON_MEM_REGION_GART;
|
||||
memfree.region_offset = bo_legacy->offset;
|
||||
memfree.region_offset -= boml->screen->gart_texture_offset;
|
||||
r = drmCommandWrite(boml->base.fd,
|
||||
DRM_RADEON_FREE,
|
||||
&memfree,
|
||||
sizeof(memfree));
|
||||
if (r) {
|
||||
fprintf(stderr, "Failed to free bo[%p] at %08x\n",
|
||||
&bo_legacy->base, memfree.region_offset);
|
||||
fprintf(stderr, "ret = %s\n", strerror(-r));
|
||||
return r;
|
||||
}
|
||||
boml->dma_alloc_size -= bo_legacy->base.size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bo_free(struct bo_legacy *bo_legacy)
|
||||
{
|
||||
struct bo_manager_legacy *boml;
|
||||
|
||||
if (bo_legacy == NULL) {
|
||||
return;
|
||||
}
|
||||
boml = (struct bo_manager_legacy *)bo_legacy->base.bom;
|
||||
bo_legacy->prev->next = bo_legacy->next;
|
||||
if (bo_legacy->next) {
|
||||
bo_legacy->next->prev = bo_legacy->prev;
|
||||
}
|
||||
if (!bo_legacy->static_bo) {
|
||||
legacy_free_handle(boml, bo_legacy->base.handle);
|
||||
if (bo_legacy->base.flags & RADEON_GEM_DOMAIN_GTT) {
|
||||
/* dma buffers */
|
||||
bo_dma_free(&bo_legacy->base);
|
||||
} else {
|
||||
/* free backing store */
|
||||
free(bo_legacy->ptr);
|
||||
}
|
||||
}
|
||||
memset(bo_legacy, 0 , sizeof(struct bo_legacy));
|
||||
free(bo_legacy);
|
||||
}
|
||||
|
||||
static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
|
||||
uint32_t handle,
|
||||
uint32_t size,
|
||||
uint32_t alignment,
|
||||
uint32_t flags)
|
||||
{
|
||||
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
|
||||
struct bo_legacy *bo_legacy;
|
||||
int r;
|
||||
|
||||
if (handle) {
|
||||
bo_legacy = boml->bos.next;
|
||||
while (bo_legacy) {
|
||||
if (bo_legacy->base.handle == handle) {
|
||||
radeon_bo_ref(&(bo_legacy->base));
|
||||
return (struct radeon_bo*)bo_legacy;
|
||||
}
|
||||
bo_legacy = bo_legacy->next;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bo_legacy = bo_allocate(boml, size, alignment, flags);
|
||||
bo_legacy->static_bo = 0;
|
||||
r = legacy_new_handle(boml, &bo_legacy->base.handle);
|
||||
if (r) {
|
||||
bo_free(bo_legacy);
|
||||
return NULL;
|
||||
}
|
||||
if (bo_legacy->base.flags & RADEON_GEM_DOMAIN_GTT) {
|
||||
legacy_track_pending(boml);
|
||||
/* dma buffers */
|
||||
r = bo_dma_alloc(&(bo_legacy->base));
|
||||
if (r) {
|
||||
fprintf(stderr, "Ran out of GART memory (for %d)!\n", size);
|
||||
fprintf(stderr, "Please consider adjusting GARTSize option.\n");
|
||||
bo_free(bo_legacy);
|
||||
exit(-1);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
bo_legacy->ptr = malloc(bo_legacy->base.size);
|
||||
if (bo_legacy->ptr == NULL) {
|
||||
bo_free(bo_legacy);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
radeon_bo_ref(&(bo_legacy->base));
|
||||
return (struct radeon_bo*)bo_legacy;
|
||||
}
|
||||
|
||||
static void bo_ref(struct radeon_bo *bo)
|
||||
{
|
||||
}
|
||||
|
||||
static void bo_unref(struct radeon_bo *bo)
|
||||
{
|
||||
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
||||
|
||||
if (bo->cref <= 0) {
|
||||
bo_legacy->prev->next = bo_legacy->next;
|
||||
if (bo_legacy->next) {
|
||||
bo_legacy->next->prev = bo_legacy->prev;
|
||||
}
|
||||
if (!bo_legacy->is_pending) {
|
||||
bo_free(bo_legacy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int bo_map(struct radeon_bo *bo, int write)
|
||||
{
|
||||
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
||||
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
||||
|
||||
legacy_wait_pending(bo);
|
||||
bo_legacy->validated = 0;
|
||||
bo_legacy->dirty = 1;
|
||||
bo_legacy->map_count++;
|
||||
bo->ptr = bo_legacy->ptr;
|
||||
/* Read the first pixel in the frame buffer. This should
|
||||
* be a noop, right? In fact without this conform fails as reading
|
||||
* from the framebuffer sometimes produces old results -- the
|
||||
* on-card read cache gets mixed up and doesn't notice that the
|
||||
* framebuffer has been updated.
|
||||
*
|
||||
* Note that we should probably be reading some otherwise unused
|
||||
* region of VRAM, otherwise we might get incorrect results when
|
||||
* reading pixels from the top left of the screen.
|
||||
*
|
||||
* I found this problem on an R420 with glean's texCube test.
|
||||
* Note that the R200 span code also *writes* the first pixel in the
|
||||
* framebuffer, but I've found this to be unnecessary.
|
||||
* -- Nicolai Hähnle, June 2008
|
||||
*/
|
||||
{
|
||||
int p;
|
||||
volatile int *buf = (int*)boml->screen->driScreen->pFB;
|
||||
p = *buf;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bo_unmap(struct radeon_bo *bo)
|
||||
{
|
||||
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
||||
|
||||
if (--bo_legacy->map_count > 0) {
|
||||
return 0;
|
||||
}
|
||||
bo->ptr = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct radeon_bo_funcs bo_legacy_funcs = {
|
||||
bo_open,
|
||||
bo_ref,
|
||||
bo_unref,
|
||||
bo_map,
|
||||
bo_unmap
|
||||
};
|
||||
|
||||
static int bo_vram_validate(struct radeon_bo *bo,
|
||||
uint32_t *soffset,
|
||||
uint32_t *eoffset)
|
||||
{
|
||||
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
||||
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
||||
int r;
|
||||
|
||||
if (!bo_legacy->got_dri_texture_obj) {
|
||||
make_empty_list(&bo_legacy->dri_texture_obj);
|
||||
bo_legacy->dri_texture_obj.totalSize = bo->size;
|
||||
r = driAllocateTexture(&boml->texture_heap, 1,
|
||||
&bo_legacy->dri_texture_obj);
|
||||
if (r) {
|
||||
uint8_t *segfault=NULL;
|
||||
fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
|
||||
*segfault=1;
|
||||
return -1;
|
||||
}
|
||||
bo_legacy->offset = boml->texture_offset +
|
||||
bo_legacy->dri_texture_obj.memBlock->ofs;
|
||||
bo_legacy->got_dri_texture_obj = 1;
|
||||
bo_legacy->dirty = 1;
|
||||
}
|
||||
if (bo_legacy->dirty) {
|
||||
/* Copy to VRAM using a blit.
|
||||
* All memory is 4K aligned. We're using 1024 pixels wide blits.
|
||||
*/
|
||||
drm_radeon_texture_t tex;
|
||||
drm_radeon_tex_image_t tmp;
|
||||
int ret;
|
||||
|
||||
tex.offset = bo_legacy->offset;
|
||||
tex.image = &tmp;
|
||||
assert(!(tex.offset & 1023));
|
||||
|
||||
tmp.x = 0;
|
||||
tmp.y = 0;
|
||||
if (bo->size < 4096) {
|
||||
tmp.width = (bo->size + 3) / 4;
|
||||
tmp.height = 1;
|
||||
} else {
|
||||
tmp.width = 1024;
|
||||
tmp.height = (bo->size + 4095) / 4096;
|
||||
}
|
||||
tmp.data = bo_legacy->ptr;
|
||||
tex.format = RADEON_TXFORMAT_ARGB8888;
|
||||
tex.width = tmp.width;
|
||||
tex.height = tmp.height;
|
||||
tex.pitch = MAX2(tmp.width / 16, 1);
|
||||
do {
|
||||
ret = drmCommandWriteRead(bo->bom->fd,
|
||||
DRM_RADEON_TEXTURE,
|
||||
&tex,
|
||||
sizeof(drm_radeon_texture_t));
|
||||
if (ret) {
|
||||
if (RADEON_DEBUG & DEBUG_IOCTL)
|
||||
fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
|
||||
usleep(1);
|
||||
}
|
||||
} while (ret == -EAGAIN);
|
||||
bo_legacy->dirty = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_bo_legacy_validate(struct radeon_bo *bo,
|
||||
uint32_t *soffset,
|
||||
uint32_t *eoffset)
|
||||
{
|
||||
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
||||
int r;
|
||||
|
||||
if (bo_legacy->map_count) {
|
||||
fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
|
||||
bo, bo->size, bo_legacy->map_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (bo_legacy->static_bo || bo_legacy->validated) {
|
||||
*soffset = bo_legacy->offset;
|
||||
*eoffset = bo_legacy->offset + bo->size;
|
||||
return 0;
|
||||
}
|
||||
if (!(bo->flags & RADEON_GEM_DOMAIN_GTT)) {
|
||||
r = bo_vram_validate(bo, soffset, eoffset);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
*soffset = bo_legacy->offset;
|
||||
*eoffset = bo_legacy->offset + bo->size;
|
||||
bo_legacy->validated = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending)
|
||||
{
|
||||
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
||||
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
||||
|
||||
bo_legacy->pending = pending;
|
||||
bo_legacy->is_pending += 1;
|
||||
/* add to pending list */
|
||||
radeon_bo_ref(bo);
|
||||
if (bo_legacy->is_pending > 1) {
|
||||
return;
|
||||
}
|
||||
bo_legacy->pprev = boml->pending_bos.pprev;
|
||||
bo_legacy->pnext = NULL;
|
||||
bo_legacy->pprev->pnext = bo_legacy;
|
||||
boml->pending_bos.pprev = bo_legacy;
|
||||
boml->cpendings++;
|
||||
}
|
||||
|
||||
void radeon_bo_manager_legacy_shutdown(struct radeon_bo_manager *bom)
|
||||
{
|
||||
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
|
||||
struct bo_legacy *bo_legacy;
|
||||
|
||||
if (bom == NULL) {
|
||||
return;
|
||||
}
|
||||
bo_legacy = boml->bos.next;
|
||||
while (bo_legacy) {
|
||||
struct bo_legacy *next;
|
||||
|
||||
next = bo_legacy->next;
|
||||
bo_free(bo_legacy);
|
||||
bo_legacy = next;
|
||||
}
|
||||
free(boml->free_handles);
|
||||
free(boml);
|
||||
}
|
||||
|
||||
struct radeon_bo_manager *radeon_bo_manager_legacy(struct radeon_screen *scrn)
|
||||
{
|
||||
struct bo_manager_legacy *bom;
|
||||
struct bo_legacy *bo;
|
||||
unsigned size;
|
||||
|
||||
bom = (struct bo_manager_legacy*)
|
||||
calloc(1, sizeof(struct bo_manager_legacy));
|
||||
if (bom == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bom->texture_heap = driCreateTextureHeap(0,
|
||||
bom,
|
||||
scrn->texSize[0],
|
||||
12,
|
||||
RADEON_NR_TEX_REGIONS,
|
||||
(drmTextureRegionPtr)scrn->sarea->tex_list[0],
|
||||
&scrn->sarea->tex_age[0],
|
||||
&bom->texture_swapped,
|
||||
sizeof(struct bo_legacy),
|
||||
&bo_legacy_tobj_destroy);
|
||||
bom->texture_offset = scrn->texOffset[0];
|
||||
|
||||
bom->base.funcs = &bo_legacy_funcs;
|
||||
bom->base.fd = scrn->driScreen->fd;
|
||||
bom->bos.next = NULL;
|
||||
bom->bos.prev = NULL;
|
||||
bom->pending_bos.pprev = &bom->pending_bos;
|
||||
bom->pending_bos.pnext = NULL;
|
||||
bom->screen = scrn;
|
||||
bom->fb_location = scrn->fbLocation;
|
||||
bom->nhandle = 1;
|
||||
bom->cfree_handles = 0;
|
||||
bom->nfree_handles = 0x400;
|
||||
bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
|
||||
if (bom->free_handles == NULL) {
|
||||
radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* biggest framebuffer size */
|
||||
size = 4096*4096*4;
|
||||
/* allocate front */
|
||||
bo = bo_allocate(bom, size, 0, 0);
|
||||
if (bo == NULL) {
|
||||
radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
|
||||
return NULL;
|
||||
}
|
||||
if (scrn->sarea->tiling_enabled) {
|
||||
bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
|
||||
}
|
||||
bo->static_bo = 1;
|
||||
bo->offset = bom->screen->frontOffset + bom->fb_location;
|
||||
bo->base.handle = bo->offset;
|
||||
bo->ptr = scrn->driScreen->pFB + bom->screen->frontOffset;
|
||||
if (bo->base.handle > bom->nhandle) {
|
||||
bom->nhandle = bo->base.handle + 1;
|
||||
}
|
||||
/* allocate back */
|
||||
bo = bo_allocate(bom, size, 0, 0);
|
||||
if (bo == NULL) {
|
||||
radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
|
||||
return NULL;
|
||||
}
|
||||
if (scrn->sarea->tiling_enabled) {
|
||||
bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
|
||||
}
|
||||
bo->static_bo = 1;
|
||||
bo->offset = bom->screen->backOffset + bom->fb_location;
|
||||
bo->base.handle = bo->offset;
|
||||
bo->ptr = scrn->driScreen->pFB + bom->screen->backOffset;
|
||||
if (bo->base.handle > bom->nhandle) {
|
||||
bom->nhandle = bo->base.handle + 1;
|
||||
}
|
||||
/* allocate depth */
|
||||
bo = bo_allocate(bom, size, 0, 0);
|
||||
if (bo == NULL) {
|
||||
radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
|
||||
return NULL;
|
||||
}
|
||||
bo->base.flags = 0;
|
||||
if (scrn->sarea->tiling_enabled) {
|
||||
bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
|
||||
}
|
||||
bo->static_bo = 1;
|
||||
bo->offset = bom->screen->depthOffset + bom->fb_location;
|
||||
bo->base.handle = bo->offset;
|
||||
bo->ptr = scrn->driScreen->pFB + bom->screen->depthOffset;
|
||||
if (bo->base.handle > bom->nhandle) {
|
||||
bom->nhandle = bo->base.handle + 1;
|
||||
}
|
||||
return (struct radeon_bo_manager*)bom;
|
||||
}
|
||||
|
||||
void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom)
|
||||
{
|
||||
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
|
||||
DRI_AGE_TEXTURES(boml->texture_heap);
|
||||
}
|
||||
|
||||
unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo)
|
||||
{
|
||||
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
||||
|
||||
if (bo_legacy->static_bo || (bo->flags & RADEON_GEM_DOMAIN_GTT)) {
|
||||
return 0;
|
||||
}
|
||||
return bo->size;
|
||||
}
|
||||
47
src/mesa/drivers/dri/radeon/radeon_bo_legacy.h
Normal file
47
src/mesa/drivers/dri/radeon/radeon_bo_legacy.h
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Copyright © 2008 Nicolai Haehnle
|
||||
* Copyright © 2008 Jérôme Glisse
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Aapo Tahkola <aet@rasterburn.org>
|
||||
* Nicolai Haehnle <prefect_@gmx.net>
|
||||
* Jérôme Glisse <glisse@freedesktop.org>
|
||||
*/
|
||||
#ifndef RADEON_BO_LEGACY_H
|
||||
#define RADEON_BO_LEGACY_H
|
||||
|
||||
#include "radeon_bo.h"
|
||||
#include "radeon_screen.h"
|
||||
|
||||
void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending);
|
||||
int radeon_bo_legacy_validate(struct radeon_bo *bo,
|
||||
uint32_t *soffset,
|
||||
uint32_t *eoffset);
|
||||
struct radeon_bo_manager *radeon_bo_manager_legacy(struct radeon_screen *scrn);
|
||||
void radeon_bo_manager_legacy_shutdown(struct radeon_bo_manager *bom);
|
||||
void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom);
|
||||
unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo);
|
||||
|
||||
#endif
|
||||
47
src/mesa/drivers/dri/radeon/radeon_buffer.h
Normal file
47
src/mesa/drivers/dri/radeon/radeon_buffer.h
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Copyright 2008 Red Hat, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software")
|
||||
* to deal in the software without restriction, including without limitation
|
||||
* on the rights to use, copy, modify, merge, publish, distribute, sub
|
||||
* license, and/or sell copies of the Software, and to permit persons to whom
|
||||
* them Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
|
||||
* IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Adam Jackson <ajax@redhat.com>
|
||||
*/
|
||||
|
||||
#ifndef RADEON_BUFFER_H
|
||||
#define RADEON_BUFFER_H
|
||||
|
||||
#include "radeon_bo.h"
|
||||
#include "dri_util.h"
|
||||
|
||||
struct radeon_renderbuffer
|
||||
{
|
||||
struct gl_renderbuffer base;
|
||||
struct radeon_bo *bo;
|
||||
unsigned int cpp;
|
||||
/* unsigned int offset; */
|
||||
unsigned int pitch;
|
||||
unsigned int height;
|
||||
|
||||
/* boo Xorg 6.8.2 compat */
|
||||
int depthHasSurface;
|
||||
|
||||
__DRIdrawablePrivate *dPriv;
|
||||
};
|
||||
|
||||
#endif
|
||||
376
src/mesa/drivers/dri/radeon/radeon_cs_legacy.c
Normal file
376
src/mesa/drivers/dri/radeon/radeon_cs_legacy.c
Normal file
|
|
@ -0,0 +1,376 @@
|
|||
/*
|
||||
* Copyright © 2008 Nicolai Haehnle
|
||||
* Copyright © 2008 Jérôme Glisse
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Aapo Tahkola <aet@rasterburn.org>
|
||||
* Nicolai Haehnle <prefect_@gmx.net>
|
||||
* Jérôme Glisse <glisse@freedesktop.org>
|
||||
*/
|
||||
#include <errno.h>
|
||||
#include "r300_reg.h"
|
||||
#include "r300_emit.h"
|
||||
#include "r300_cmdbuf.h"
|
||||
#include "radeon_cs.h"
|
||||
#include "radeon_cs_legacy.h"
|
||||
#include "radeon_bo_legacy.h"
|
||||
#include "radeon_context.h"
|
||||
|
||||
struct cs_manager_legacy {
|
||||
struct radeon_cs_manager base;
|
||||
struct radeon_context *ctx;
|
||||
/* hack for scratch stuff */
|
||||
uint32_t pending_age;
|
||||
uint32_t pending_count;
|
||||
};
|
||||
|
||||
struct cs_reloc_legacy {
|
||||
struct radeon_cs_reloc base;
|
||||
uint32_t cindices;
|
||||
uint32_t *indices;
|
||||
};
|
||||
|
||||
|
||||
static struct radeon_cs *cs_create(struct radeon_cs_manager *csm,
|
||||
uint32_t ndw)
|
||||
{
|
||||
struct radeon_cs *cs;
|
||||
|
||||
cs = (struct radeon_cs*)calloc(1, sizeof(struct radeon_cs));
|
||||
if (cs == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
cs->csm = csm;
|
||||
cs->ndw = (ndw + 0x3FF) & (~0x3FF);
|
||||
cs->packets = (uint32_t*)malloc(4*cs->ndw);
|
||||
if (cs->packets == NULL) {
|
||||
free(cs);
|
||||
return NULL;
|
||||
}
|
||||
cs->relocs_total_size = 0;
|
||||
return cs;
|
||||
}
|
||||
|
||||
static int cs_write_dword(struct radeon_cs *cs, uint32_t dword)
|
||||
{
|
||||
if (cs->cdw >= cs->ndw) {
|
||||
uint32_t tmp, *ptr;
|
||||
tmp = (cs->cdw + 1 + 0x3FF) & (~0x3FF);
|
||||
ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
|
||||
if (ptr == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
cs->packets = ptr;
|
||||
cs->ndw = tmp;
|
||||
}
|
||||
cs->packets[cs->cdw++] = dword;
|
||||
if (cs->section) {
|
||||
cs->section_cdw++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cs_write_reloc(struct radeon_cs *cs,
|
||||
struct radeon_bo *bo,
|
||||
uint32_t soffset,
|
||||
uint32_t eoffset,
|
||||
uint32_t domains)
|
||||
{
|
||||
struct cs_reloc_legacy *relocs;
|
||||
int i;
|
||||
|
||||
relocs = (struct cs_reloc_legacy *)cs->relocs;
|
||||
/* check reloc window */
|
||||
if (eoffset > bo->size) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if (soffset > eoffset) {
|
||||
return -EINVAL;
|
||||
}
|
||||
/* check if bo is already referenced */
|
||||
for(i = 0; i < cs->crelocs; i++) {
|
||||
uint32_t *indices;
|
||||
|
||||
if (relocs[i].base.bo->handle == bo->handle) {
|
||||
/* update start offset and size */
|
||||
if (eoffset > relocs[i].base.eoffset) {
|
||||
relocs[i].base.eoffset = eoffset;
|
||||
}
|
||||
if (soffset < relocs[i].base.soffset) {
|
||||
relocs[i].base.soffset = soffset;
|
||||
}
|
||||
relocs[i].base.size = relocs[i].base.eoffset -
|
||||
relocs[i].base.soffset;
|
||||
relocs[i].base.domains |= domains;
|
||||
relocs[i].cindices += 1;
|
||||
indices = (uint32_t*)realloc(relocs[i].indices,
|
||||
relocs[i].cindices * 4);
|
||||
if (indices == NULL) {
|
||||
relocs[i].cindices -= 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
relocs[i].indices = indices;
|
||||
relocs[i].indices[relocs[i].cindices - 1] = cs->cdw - 1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
/* add bo to reloc */
|
||||
relocs = (struct cs_reloc_legacy*)
|
||||
realloc(cs->relocs,
|
||||
sizeof(struct cs_reloc_legacy) * (cs->crelocs + 1));
|
||||
if (relocs == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
cs->relocs = relocs;
|
||||
relocs[cs->crelocs].base.bo = bo;
|
||||
relocs[cs->crelocs].base.soffset = soffset;
|
||||
relocs[cs->crelocs].base.eoffset = eoffset;
|
||||
relocs[cs->crelocs].base.size = eoffset - soffset;
|
||||
relocs[cs->crelocs].base.domains = domains;
|
||||
relocs[cs->crelocs].indices = (uint32_t*)malloc(4);
|
||||
if (relocs[cs->crelocs].indices == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
relocs[cs->crelocs].indices[0] = cs->cdw - 1;
|
||||
relocs[cs->crelocs].cindices = 1;
|
||||
cs->relocs_total_size += radeon_bo_legacy_relocs_size(bo);
|
||||
cs->crelocs++;
|
||||
radeon_bo_ref(bo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cs_begin(struct radeon_cs *cs,
|
||||
uint32_t ndw,
|
||||
const char *file,
|
||||
const char *func,
|
||||
int line)
|
||||
{
|
||||
if (cs->section) {
|
||||
fprintf(stderr, "CS already in a section(%s,%s,%d)\n",
|
||||
cs->section_file, cs->section_func, cs->section_line);
|
||||
fprintf(stderr, "CS can't start section(%s,%s,%d)\n",
|
||||
file, func, line);
|
||||
return -EPIPE;
|
||||
}
|
||||
cs->section = 1;
|
||||
cs->section_ndw = ndw;
|
||||
cs->section_cdw = 0;
|
||||
cs->section_file = file;
|
||||
cs->section_func = func;
|
||||
cs->section_line = line;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cs_end(struct radeon_cs *cs,
|
||||
const char *file,
|
||||
const char *func,
|
||||
int line)
|
||||
|
||||
{
|
||||
if (!cs->section) {
|
||||
fprintf(stderr, "CS no section to end at (%s,%s,%d)\n",
|
||||
file, func, line);
|
||||
return -EPIPE;
|
||||
}
|
||||
cs->section = 0;
|
||||
if (cs->section_ndw != cs->section_cdw) {
|
||||
fprintf(stderr, "CS section size missmatch start at (%s,%s,%d)\n",
|
||||
cs->section_file, cs->section_func, cs->section_line);
|
||||
fprintf(stderr, "CS section end at (%s,%s,%d)\n",
|
||||
file, func, line);
|
||||
return -EPIPE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cs_process_relocs(struct radeon_cs *cs)
|
||||
{
|
||||
struct cs_manager_legacy *csm = (struct cs_manager_legacy*)cs->csm;
|
||||
struct cs_reloc_legacy *relocs;
|
||||
int i, j, r;
|
||||
|
||||
if (!IS_R300_CLASS(csm->ctx->radeonScreen)) {
|
||||
/* FIXME: r300 only right now */
|
||||
return -EINVAL;
|
||||
}
|
||||
csm = (struct cs_manager_legacy*)cs->csm;
|
||||
relocs = (struct cs_reloc_legacy *)cs->relocs;
|
||||
for (i = 0; i < cs->crelocs; i++) {
|
||||
for (j = 0; j < relocs[i].cindices; j++) {
|
||||
uint32_t soffset, eoffset;
|
||||
|
||||
soffset = relocs[i].base.soffset;
|
||||
eoffset = relocs[i].base.eoffset;
|
||||
r = radeon_bo_legacy_validate(relocs[i].base.bo,
|
||||
&soffset, &eoffset);
|
||||
if (r) {
|
||||
fprintf(stderr, "validated %p [0x%08X, 0x%08X]\n",
|
||||
relocs[i].base.bo, soffset, eoffset);
|
||||
return r;
|
||||
}
|
||||
cs->packets[relocs[i].indices[j]] += soffset;
|
||||
if (cs->packets[relocs[i].indices[j]] >= eoffset) {
|
||||
radeon_bo_debug(relocs[i].base.bo, 12);
|
||||
fprintf(stderr, "validated %p [0x%08X, 0x%08X]\n",
|
||||
relocs[i].base.bo, soffset, eoffset);
|
||||
fprintf(stderr, "above end: %p 0x%08X 0x%08X\n",
|
||||
relocs[i].base.bo,
|
||||
cs->packets[relocs[i].indices[j]],
|
||||
eoffset);
|
||||
exit(0);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cs_set_age(struct radeon_cs *cs)
|
||||
{
|
||||
struct cs_manager_legacy *csm = (struct cs_manager_legacy*)cs->csm;
|
||||
struct cs_reloc_legacy *relocs;
|
||||
int i;
|
||||
|
||||
relocs = (struct cs_reloc_legacy *)cs->relocs;
|
||||
for (i = 0; i < cs->crelocs; i++) {
|
||||
radeon_bo_legacy_pending(relocs[i].base.bo, csm->pending_age);
|
||||
radeon_bo_unref(relocs[i].base.bo);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cs_emit(struct radeon_cs *cs)
|
||||
{
|
||||
struct cs_manager_legacy *csm = (struct cs_manager_legacy*)cs->csm;
|
||||
drm_radeon_cmd_buffer_t cmd;
|
||||
drm_r300_cmd_header_t age;
|
||||
uint64_t ull;
|
||||
int r;
|
||||
|
||||
/* please flush pipe do all pending work */
|
||||
cs_write_dword(cs, cmdpacket0(R300_SC_SCREENDOOR, 1));
|
||||
cs_write_dword(cs, 0x0);
|
||||
cs_write_dword(cs, cmdpacket0(R300_SC_SCREENDOOR, 1));
|
||||
cs_write_dword(cs, 0x00FFFFFF);
|
||||
cs_write_dword(cs, cmdpacket0(R300_SC_HYPERZ, 1));
|
||||
cs_write_dword(cs, 0x0);
|
||||
cs_write_dword(cs, cmdpacket0(R300_US_CONFIG, 1));
|
||||
cs_write_dword(cs, 0x0);
|
||||
cs_write_dword(cs, cmdpacket0(R300_ZB_CNTL, 1));
|
||||
cs_write_dword(cs, 0x0);
|
||||
cs_write_dword(cs, cmdwait(R300_WAIT_3D));
|
||||
cs_write_dword(cs, cmdpacket0(R300_RB3D_DSTCACHE_CTLSTAT, 1));
|
||||
cs_write_dword(cs, R300_RB3D_DSTCACHE_CTLSTAT_DC_FLUSH_FLUSH_DIRTY_3D);
|
||||
cs_write_dword(cs, cmdpacket0(R300_ZB_ZCACHE_CTLSTAT, 1));
|
||||
cs_write_dword(cs, R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE);
|
||||
cs_write_dword(cs, cmdwait(R300_WAIT_3D | R300_WAIT_3D_CLEAN));
|
||||
|
||||
/* append buffer age */
|
||||
age.scratch.cmd_type = R300_CMD_SCRATCH;
|
||||
/* Scratch register 2 corresponds to what radeonGetAge polls */
|
||||
csm->pending_age = 0;
|
||||
csm->pending_count = 1;
|
||||
ull = (uint64_t) (intptr_t) &csm->pending_age;
|
||||
age.scratch.reg = 2;
|
||||
age.scratch.n_bufs = 1;
|
||||
age.scratch.flags = 0;
|
||||
radeon_cs_write_dword(cs, age.u);
|
||||
radeon_cs_write_dword(cs, ull & 0xffffffff);
|
||||
radeon_cs_write_dword(cs, ull >> 32);
|
||||
radeon_cs_write_dword(cs, 0);
|
||||
|
||||
|
||||
r = cs_process_relocs(cs);
|
||||
if (r) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmd.buf = (char *)cs->packets;
|
||||
cmd.bufsz = cs->cdw * 4;
|
||||
if (csm->ctx->state.scissor.enabled) {
|
||||
cmd.nbox = csm->ctx->state.scissor.numClipRects;
|
||||
cmd.boxes = (drm_clip_rect_t *) csm->ctx->state.scissor.pClipRects;
|
||||
} else {
|
||||
cmd.nbox = csm->ctx->numClipRects;
|
||||
cmd.boxes = (drm_clip_rect_t *) csm->ctx->pClipRects;
|
||||
}
|
||||
|
||||
r = drmCommandWrite(cs->csm->fd, DRM_RADEON_CMDBUF, &cmd, sizeof(cmd));
|
||||
cs_set_age(cs);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int cs_destroy(struct radeon_cs *cs)
|
||||
{
|
||||
free(cs->relocs);
|
||||
free(cs->packets);
|
||||
free(cs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cs_erase(struct radeon_cs *cs)
|
||||
{
|
||||
free(cs->relocs);
|
||||
cs->relocs_total_size = 0;
|
||||
cs->relocs = NULL;
|
||||
cs->crelocs = 0;
|
||||
cs->cdw = 0;
|
||||
cs->section = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cs_need_flush(struct radeon_cs *cs)
|
||||
{
|
||||
/* FIXME: we should get the texture heap size */
|
||||
return (cs->relocs_total_size > (7*1024*1024));
|
||||
}
|
||||
|
||||
struct radeon_cs_funcs radeon_cs_funcs = {
|
||||
cs_create,
|
||||
cs_write_dword,
|
||||
cs_write_reloc,
|
||||
cs_begin,
|
||||
cs_end,
|
||||
cs_emit,
|
||||
cs_destroy,
|
||||
cs_erase,
|
||||
cs_need_flush
|
||||
};
|
||||
|
||||
struct radeon_cs_manager *radeon_cs_manager_legacy(struct radeon_context *ctx)
|
||||
{
|
||||
struct cs_manager_legacy *csm;
|
||||
|
||||
csm = (struct cs_manager_legacy*)
|
||||
calloc(1, sizeof(struct cs_manager_legacy));
|
||||
if (csm == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
csm->base.funcs = &radeon_cs_funcs;
|
||||
csm->base.fd = ctx->dri.fd;
|
||||
csm->ctx = ctx;
|
||||
csm->pending_age = 1;
|
||||
return (struct radeon_cs_manager*)csm;
|
||||
}
|
||||
40
src/mesa/drivers/dri/radeon/radeon_cs_legacy.h
Normal file
40
src/mesa/drivers/dri/radeon/radeon_cs_legacy.h
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright © 2008 Nicolai Haehnle
|
||||
* Copyright © 2008 Jérôme Glisse
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*/
|
||||
/*
|
||||
* Authors:
|
||||
* Aapo Tahkola <aet@rasterburn.org>
|
||||
* Nicolai Haehnle <prefect_@gmx.net>
|
||||
* Jérôme Glisse <glisse@freedesktop.org>
|
||||
*/
|
||||
#ifndef RADEON_CS_LEGACY_H
|
||||
#define RADEON_CS_LEGACY_H
|
||||
|
||||
#include "radeon_cs.h"
|
||||
#include "radeon_context.h"
|
||||
|
||||
struct radeon_cs_manager *radeon_cs_manager_legacy(struct radeon_context *ctx);
|
||||
|
||||
#endif
|
||||
|
|
@ -45,6 +45,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "radeon_chipset.h"
|
||||
#include "radeon_macros.h"
|
||||
#include "radeon_screen.h"
|
||||
#include "radeon_buffer.h"
|
||||
#if !RADEON_COMMON
|
||||
#include "radeon_context.h"
|
||||
#include "radeon_span.h"
|
||||
|
|
@ -70,6 +71,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
/* Radeon configuration
|
||||
*/
|
||||
#include "xmlpool.h"
|
||||
#include "radeon_bo_legacy.h"
|
||||
|
||||
#if !RADEON_COMMON /* R100 */
|
||||
PUBLIC const char __driConfigOptions[] =
|
||||
|
|
@ -960,6 +962,13 @@ radeonCreateScreen( __DRIscreenPrivate *sPriv )
|
|||
|
||||
screen->driScreen = sPriv;
|
||||
screen->sarea_priv_offset = dri_priv->sarea_priv_offset;
|
||||
screen->sarea = (drm_radeon_sarea_t *) ((GLubyte *) sPriv->pSAREA +
|
||||
screen->sarea_priv_offset);
|
||||
screen->bom = radeon_bo_manager_legacy(screen);
|
||||
if (screen->bom == NULL) {
|
||||
free(screen);
|
||||
return NULL;
|
||||
}
|
||||
return screen;
|
||||
}
|
||||
|
||||
|
|
@ -973,6 +982,8 @@ radeonDestroyScreen( __DRIscreenPrivate *sPriv )
|
|||
if (!screen)
|
||||
return;
|
||||
|
||||
radeon_bo_manager_legacy_shutdown(screen->bom);
|
||||
|
||||
if ( screen->gartTextures.map ) {
|
||||
drmUnmap( screen->gartTextures.map, screen->gartTextures.size );
|
||||
}
|
||||
|
|
@ -1002,6 +1013,160 @@ radeonInitDriver( __DRIscreenPrivate *sPriv )
|
|||
return GL_TRUE;
|
||||
}
|
||||
|
||||
#if RADEON_COMMON && defined(RADEON_COMMON_FOR_R300)
|
||||
static GLboolean
|
||||
radeon_alloc_window_storage(GLcontext *ctx, struct gl_renderbuffer *rb,
|
||||
GLenum intFormat, GLuint w, GLuint h)
|
||||
{
|
||||
rb->Width = w;
|
||||
rb->Height = h;
|
||||
rb->_ActualFormat = intFormat;
|
||||
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
||||
|
||||
static struct radeon_renderbuffer *
|
||||
radeon_create_renderbuffer(GLenum format, __DRIdrawablePrivate *driDrawPriv)
|
||||
{
|
||||
struct radeon_renderbuffer *ret;
|
||||
|
||||
ret = CALLOC_STRUCT(radeon_renderbuffer);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
|
||||
_mesa_init_renderbuffer(&ret->base, 0);
|
||||
|
||||
/* XXX format junk */
|
||||
switch (format) {
|
||||
case GL_RGB5:
|
||||
ret->base._ActualFormat = GL_RGB5;
|
||||
ret->base._BaseFormat = GL_RGBA;
|
||||
ret->base.RedBits = 5;
|
||||
ret->base.GreenBits = 6;
|
||||
ret->base.BlueBits = 5;
|
||||
ret->base.DataType = GL_UNSIGNED_BYTE;
|
||||
break;
|
||||
case GL_RGBA8:
|
||||
ret->base._ActualFormat = GL_RGBA8;
|
||||
ret->base._BaseFormat = GL_RGBA;
|
||||
ret->base.RedBits = 8;
|
||||
ret->base.GreenBits = 8;
|
||||
ret->base.BlueBits = 8;
|
||||
ret->base.AlphaBits = 8;
|
||||
ret->base.DataType = GL_UNSIGNED_BYTE;
|
||||
break;
|
||||
case GL_STENCIL_INDEX8_EXT:
|
||||
ret->base._ActualFormat = GL_STENCIL_INDEX8_EXT;
|
||||
ret->base._BaseFormat = GL_STENCIL_INDEX;
|
||||
ret->base.StencilBits = 8;
|
||||
ret->base.DataType = GL_UNSIGNED_BYTE;
|
||||
break;
|
||||
case GL_DEPTH_COMPONENT16:
|
||||
ret->base._ActualFormat = GL_DEPTH_COMPONENT16;
|
||||
ret->base._BaseFormat = GL_DEPTH_COMPONENT;
|
||||
ret->base.DepthBits = 16;
|
||||
ret->base.DataType = GL_UNSIGNED_SHORT;
|
||||
break;
|
||||
case GL_DEPTH_COMPONENT24:
|
||||
ret->base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
|
||||
ret->base._BaseFormat = GL_DEPTH_COMPONENT;
|
||||
ret->base.DepthBits = 24;
|
||||
ret->base.DataType = GL_UNSIGNED_INT;
|
||||
break;
|
||||
case GL_DEPTH24_STENCIL8_EXT:
|
||||
ret->base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
|
||||
ret->base._BaseFormat = GL_DEPTH_STENCIL_EXT;
|
||||
ret->base.DepthBits = 24;
|
||||
ret->base.StencilBits = 8;
|
||||
ret->base.DataType = GL_UNSIGNED_INT_24_8_EXT;
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "%s: Unknown format 0x%04x\n", __FUNCTION__, format);
|
||||
_mesa_delete_renderbuffer(&ret->base);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret->dPriv = driDrawPriv;
|
||||
ret->base.InternalFormat = format;
|
||||
|
||||
ret->base.AllocStorage = radeon_alloc_window_storage;
|
||||
|
||||
radeonSetSpanFunctions(ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the Mesa framebuffer and renderbuffers for a given window/drawable.
|
||||
*
|
||||
* \todo This function (and its interface) will need to be updated to support
|
||||
* pbuffers.
|
||||
*/
|
||||
static GLboolean
|
||||
radeonCreateBuffer( __DRIscreenPrivate *driScrnPriv,
|
||||
__DRIdrawablePrivate *driDrawPriv,
|
||||
const __GLcontextModes *mesaVis,
|
||||
GLboolean isPixmap )
|
||||
{
|
||||
radeonScreenPtr screen = (radeonScreenPtr) driScrnPriv->private;
|
||||
|
||||
const GLboolean swDepth = GL_FALSE;
|
||||
const GLboolean swAlpha = GL_FALSE;
|
||||
const GLboolean swAccum = mesaVis->accumRedBits > 0;
|
||||
const GLboolean swStencil = mesaVis->stencilBits > 0 &&
|
||||
mesaVis->depthBits != 24;
|
||||
GLenum rgbFormat = (mesaVis->redBits == 5 ? GL_RGB5 : GL_RGBA8);
|
||||
GLenum depthFormat = GL_NONE;
|
||||
struct gl_framebuffer *fb = _mesa_create_framebuffer(mesaVis);
|
||||
|
||||
if (mesaVis->depthBits == 16)
|
||||
depthFormat = GL_DEPTH_COMPONENT16;
|
||||
else if (mesaVis->depthBits == 24)
|
||||
depthFormat = GL_DEPTH_COMPONENT24;
|
||||
|
||||
/* front color renderbuffer */
|
||||
{
|
||||
struct radeon_renderbuffer *front =
|
||||
radeon_create_renderbuffer(rgbFormat, driDrawPriv);
|
||||
_mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &front->base);
|
||||
}
|
||||
|
||||
/* back color renderbuffer */
|
||||
if (mesaVis->doubleBufferMode) {
|
||||
struct radeon_renderbuffer *back =
|
||||
radeon_create_renderbuffer(rgbFormat, driDrawPriv);
|
||||
_mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &back->base);
|
||||
}
|
||||
|
||||
/* depth renderbuffer */
|
||||
if (depthFormat != GL_NONE) {
|
||||
struct radeon_renderbuffer *depth =
|
||||
radeon_create_renderbuffer(depthFormat, driDrawPriv);
|
||||
_mesa_add_renderbuffer(fb, BUFFER_DEPTH, &depth->base);
|
||||
depth->depthHasSurface = screen->depthHasSurface;
|
||||
}
|
||||
|
||||
/* stencil renderbuffer */
|
||||
if (mesaVis->stencilBits > 0 && !swStencil) {
|
||||
struct radeon_renderbuffer *stencil =
|
||||
radeon_create_renderbuffer(GL_STENCIL_INDEX8_EXT, driDrawPriv);
|
||||
_mesa_add_renderbuffer(fb, BUFFER_STENCIL, &stencil->base);
|
||||
stencil->depthHasSurface = screen->depthHasSurface;
|
||||
}
|
||||
|
||||
_mesa_add_soft_renderbuffers(fb,
|
||||
GL_FALSE, /* color */
|
||||
swDepth,
|
||||
swStencil,
|
||||
swAccum,
|
||||
swAlpha,
|
||||
GL_FALSE /* aux */);
|
||||
driDrawPriv->driverPrivate = (void *) fb;
|
||||
|
||||
return (driDrawPriv->driverPrivate != NULL);
|
||||
}
|
||||
#else
|
||||
|
||||
/**
|
||||
* Create the Mesa framebuffer and renderbuffers for a given window/drawable.
|
||||
|
|
@ -1101,7 +1266,7 @@ radeonCreateBuffer( __DRIscreenPrivate *driScrnPriv,
|
|||
return (driDrawPriv->driverPrivate != NULL);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void
|
||||
radeonDestroyBuffer(__DRIdrawablePrivate *driDrawPriv)
|
||||
|
|
@ -1197,11 +1362,11 @@ radeonInitScreen(__DRIscreenPrivate *psp)
|
|||
if (!radeonInitDriver(psp))
|
||||
return NULL;
|
||||
|
||||
/* for now fill in all modes */
|
||||
return radeonFillInModes( psp,
|
||||
dri_priv->bpp,
|
||||
(dri_priv->bpp == 16) ? 16 : 24,
|
||||
(dri_priv->bpp == 16) ? 0 : 8,
|
||||
(dri_priv->backOffset != dri_priv->depthOffset) );
|
||||
(dri_priv->bpp == 16) ? 0 : 8, 1);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
#include "radeon_reg.h"
|
||||
#include "drm_sarea.h"
|
||||
#include "xmlconfig.h"
|
||||
#include "radeon_bo.h"
|
||||
|
||||
|
||||
typedef struct {
|
||||
|
|
@ -54,7 +55,7 @@ typedef struct {
|
|||
drmAddress map; /* Mapping of the DRM region */
|
||||
} radeonRegionRec, *radeonRegionPtr;
|
||||
|
||||
typedef struct {
|
||||
typedef struct radeon_screen {
|
||||
int chip_family;
|
||||
int chip_flags;
|
||||
int cpp;
|
||||
|
|
@ -106,6 +107,8 @@ typedef struct {
|
|||
const __DRIextension *extensions[8];
|
||||
|
||||
int num_gb_pipes;
|
||||
drm_radeon_sarea_t *sarea; /* Private SAREA data */
|
||||
struct radeon_bo_manager *bom;
|
||||
} radeonScreenRec, *radeonScreenPtr;
|
||||
|
||||
#define IS_R100_CLASS(screen) \
|
||||
|
|
|
|||
|
|
@ -44,7 +44,13 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
|
||||
#include "drirenderbuffer.h"
|
||||
|
||||
extern void radeonInitSpanFuncs(GLcontext * ctx);
|
||||
extern void radeonSetSpanFunctions(driRenderbuffer * rb, const GLvisual * vis);
|
||||
#include "radeon_buffer.h"
|
||||
|
||||
extern void radeonInitSpanFuncs(GLcontext * ctx);
|
||||
|
||||
#if COMPILE_R300
|
||||
extern void radeonSetSpanFunctions(struct radeon_renderbuffer *rrb);
|
||||
#else
|
||||
extern void radeonSetSpanFunctions(driRenderbuffer * rb, const GLvisual * vis);
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue