freedreno/{a4xx,a5xx}: switch to CP_LOAD_STATE4

The layout of CP_LOAD_STATE packet is slightly different on a4xx+.
Switch to the a4xx+ specific CP_LOAD_STATE4 to get the correct encoding.

Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Rob Clark 2017-04-03 13:38:02 -04:00
parent dfdb1fed78
commit 9567beab36
5 changed files with 124 additions and 127 deletions

View file

@ -45,9 +45,9 @@
#include "fd4_format.h"
#include "fd4_zsa.h"
static const enum adreno_state_block sb[] = {
[SHADER_VERTEX] = SB_VERT_SHADER,
[SHADER_FRAGMENT] = SB_FRAG_SHADER,
static const enum a4xx_state_block sb[] = {
[SHADER_VERTEX] = SB4_VS_SHADER,
[SHADER_FRAGMENT] = SB4_FS_SHADER,
};
/* regid: base const register
@ -60,31 +60,31 @@ fd4_emit_const(struct fd_ringbuffer *ring, enum shader_t type,
const uint32_t *dwords, struct pipe_resource *prsc)
{
uint32_t i, sz;
enum adreno_state_src src;
enum a4xx_state_src src;
debug_assert((regid % 4) == 0);
debug_assert((sizedwords % 4) == 0);
if (prsc) {
sz = 0;
src = 0x2; // TODO ??
src = SS4_INDIRECT;
} else {
sz = sizedwords;
src = SS_DIRECT;
src = SS4_DIRECT;
}
OUT_PKT3(ring, CP_LOAD_STATE, 2 + sz);
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid/4) |
CP_LOAD_STATE_0_STATE_SRC(src) |
CP_LOAD_STATE_0_STATE_BLOCK(sb[type]) |
CP_LOAD_STATE_0_NUM_UNIT(sizedwords/4));
OUT_PKT3(ring, CP_LOAD_STATE4, 2 + sz);
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid/4) |
CP_LOAD_STATE4_0_STATE_SRC(src) |
CP_LOAD_STATE4_0_STATE_BLOCK(sb[type]) |
CP_LOAD_STATE4_0_NUM_UNIT(sizedwords/4));
if (prsc) {
struct fd_bo *bo = fd_resource(prsc)->bo;
OUT_RELOC(ring, bo, offset,
CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS), 0);
CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS), 0);
} else {
OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS));
OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS));
dwords = (uint32_t *)&((uint8_t *)dwords)[offset];
}
for (i = 0; i < sz; i++) {
@ -101,13 +101,13 @@ fd4_emit_const_bo(struct fd_ringbuffer *ring, enum shader_t type, boolean write,
debug_assert((regid % 4) == 0);
OUT_PKT3(ring, CP_LOAD_STATE, 2 + anum);
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid/4) |
CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
CP_LOAD_STATE_0_STATE_BLOCK(sb[type]) |
CP_LOAD_STATE_0_NUM_UNIT(anum/4));
OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS));
OUT_PKT3(ring, CP_LOAD_STATE4, 2 + anum);
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid/4) |
CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
CP_LOAD_STATE4_0_STATE_BLOCK(sb[type]) |
CP_LOAD_STATE4_0_NUM_UNIT(anum/4));
OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS));
for (i = 0; i < num; i++) {
if (prscs[i]) {
@ -127,12 +127,12 @@ fd4_emit_const_bo(struct fd_ringbuffer *ring, enum shader_t type, boolean write,
static void
emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
enum adreno_state_block sb, struct fd_texture_stateobj *tex,
enum a4xx_state_block sb, struct fd_texture_stateobj *tex,
const struct ir3_shader_variant *v)
{
static const uint32_t bcolor_reg[] = {
[SB_VERT_TEX] = REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR,
[SB_FRAG_TEX] = REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR,
[SB4_VS_TEX] = REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR,
[SB4_FS_TEX] = REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR,
};
struct fd4_context *fd4_ctx = fd4_context(ctx);
bool needs_border = false;
@ -148,13 +148,13 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
num_samplers = align(tex->num_samplers, 2);
/* output sampler state: */
OUT_PKT3(ring, CP_LOAD_STATE, 2 + (2 * num_samplers));
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) |
CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
CP_LOAD_STATE_0_STATE_BLOCK(sb) |
CP_LOAD_STATE_0_NUM_UNIT(num_samplers));
OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER) |
CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (2 * num_samplers));
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
CP_LOAD_STATE4_0_NUM_UNIT(num_samplers));
OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER) |
CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
for (i = 0; i < tex->num_samplers; i++) {
static const struct fd4_sampler_stateobj dummy_sampler = {};
const struct fd4_sampler_stateobj *sampler = tex->samplers[i] ?
@ -176,13 +176,13 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
unsigned num_textures = tex->num_textures + v->astc_srgb.count;
/* emit texture state: */
OUT_PKT3(ring, CP_LOAD_STATE, 2 + (8 * num_textures));
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) |
CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
CP_LOAD_STATE_0_STATE_BLOCK(sb) |
CP_LOAD_STATE_0_NUM_UNIT(num_textures));
OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (8 * num_textures));
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
CP_LOAD_STATE4_0_NUM_UNIT(num_textures));
OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS) |
CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
for (i = 0; i < tex->num_textures; i++) {
static const struct fd4_pipe_sampler_view dummy_view = {};
const struct fd4_pipe_sampler_view *view = tex->textures[i] ?
@ -267,13 +267,13 @@ fd4_emit_gmem_restore_tex(struct fd_ringbuffer *ring, unsigned nr_bufs,
}
/* output sampler state: */
OUT_PKT3(ring, CP_LOAD_STATE, 2 + (2 * nr_bufs));
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) |
CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX) |
CP_LOAD_STATE_0_NUM_UNIT(nr_bufs));
OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER) |
CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (2 * nr_bufs));
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
CP_LOAD_STATE4_0_STATE_BLOCK(SB4_FS_TEX) |
CP_LOAD_STATE4_0_NUM_UNIT(nr_bufs));
OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER) |
CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
for (i = 0; i < nr_bufs; i++) {
OUT_RING(ring, A4XX_TEX_SAMP_0_XY_MAG(A4XX_TEX_NEAREST) |
A4XX_TEX_SAMP_0_XY_MIN(A4XX_TEX_NEAREST) |
@ -284,13 +284,13 @@ fd4_emit_gmem_restore_tex(struct fd_ringbuffer *ring, unsigned nr_bufs,
}
/* emit texture state: */
OUT_PKT3(ring, CP_LOAD_STATE, 2 + (8 * nr_bufs));
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) |
CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
CP_LOAD_STATE_0_STATE_BLOCK(SB_FRAG_TEX) |
CP_LOAD_STATE_0_NUM_UNIT(nr_bufs));
OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (8 * nr_bufs));
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
CP_LOAD_STATE4_0_STATE_BLOCK(SB4_FS_TEX) |
CP_LOAD_STATE4_0_NUM_UNIT(nr_bufs));
OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS) |
CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
for (i = 0; i < nr_bufs; i++) {
if (bufs[i]) {
struct fd_resource *rsc = fd_resource(bufs[i]->texture);
@ -747,14 +747,14 @@ fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
if (dirty & FD_DIRTY_VERTTEX) {
if (vp->has_samp)
emit_textures(ctx, ring, SB_VERT_TEX, &ctx->verttex, vp);
emit_textures(ctx, ring, SB4_VS_TEX, &ctx->verttex, vp);
else
dirty &= ~FD_DIRTY_VERTTEX;
}
if (dirty & FD_DIRTY_FRAGTEX) {
if (fp->has_samp)
emit_textures(ctx, ring, SB_FRAG_TEX, &ctx->fragtex, fp);
emit_textures(ctx, ring, SB4_FS_TEX, &ctx->fragtex, fp);
else
dirty &= ~FD_DIRTY_FRAGTEX;
}

View file

@ -94,32 +94,32 @@ emit_shader(struct fd_ringbuffer *ring, const struct ir3_shader_variant *so)
uint32_t i, sz, *bin;
if (so->type == SHADER_VERTEX) {
sb = SB_VERT_SHADER;
sb = SB4_VS_SHADER;
} else {
sb = SB_FRAG_SHADER;
sb = SB4_FS_SHADER;
}
if (fd_mesa_debug & FD_DBG_DIRECT) {
sz = si->sizedwords;
src = SS_DIRECT;
src = SS4_DIRECT;
bin = fd_bo_map(so->bo);
} else {
sz = 0;
src = 2; // enums different on a4xx..
src = SS4_INDIRECT;
bin = NULL;
}
OUT_PKT3(ring, CP_LOAD_STATE, 2 + sz);
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) |
CP_LOAD_STATE_0_STATE_SRC(src) |
CP_LOAD_STATE_0_STATE_BLOCK(sb) |
CP_LOAD_STATE_0_NUM_UNIT(so->instrlen));
OUT_PKT3(ring, CP_LOAD_STATE4, 2 + sz);
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
CP_LOAD_STATE4_0_STATE_SRC(src) |
CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
CP_LOAD_STATE4_0_NUM_UNIT(so->instrlen));
if (bin) {
OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER));
OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER));
} else {
OUT_RELOC(ring, so->bo, 0,
CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER), 0);
CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER), 0);
}
/* for how clever coverity is, it is sometimes rather dull, and

View file

@ -43,9 +43,9 @@
#include "fd5_format.h"
#include "fd5_zsa.h"
static const enum adreno_state_block sb[] = {
[SHADER_VERTEX] = SB_VERT_SHADER,
[SHADER_FRAGMENT] = SB_FRAG_SHADER,
static const enum a4xx_state_block sb[] = {
[SHADER_VERTEX] = SB4_VS_SHADER,
[SHADER_FRAGMENT] = SB4_FS_SHADER,
};
/* regid: base const register
@ -58,32 +58,32 @@ fd5_emit_const(struct fd_ringbuffer *ring, enum shader_t type,
const uint32_t *dwords, struct pipe_resource *prsc)
{
uint32_t i, sz;
enum adreno_state_src src;
enum a4xx_state_src src;
debug_assert((regid % 4) == 0);
debug_assert((sizedwords % 4) == 0);
if (prsc) {
sz = 0;
src = 0x2; // TODO ??
src = SS4_INDIRECT;
} else {
sz = sizedwords;
src = SS_DIRECT;
src = SS4_DIRECT;
}
OUT_PKT7(ring, CP_LOAD_STATE, 3 + sz);
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid/4) |
CP_LOAD_STATE_0_STATE_SRC(src) |
CP_LOAD_STATE_0_STATE_BLOCK(sb[type]) |
CP_LOAD_STATE_0_NUM_UNIT(sizedwords/4));
OUT_PKT7(ring, CP_LOAD_STATE4, 3 + sz);
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid/4) |
CP_LOAD_STATE4_0_STATE_SRC(src) |
CP_LOAD_STATE4_0_STATE_BLOCK(sb[type]) |
CP_LOAD_STATE4_0_NUM_UNIT(sizedwords/4));
if (prsc) {
struct fd_bo *bo = fd_resource(prsc)->bo;
OUT_RELOC(ring, bo, offset,
CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS), 0);
CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS), 0);
} else {
OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS));
OUT_RING(ring, CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(0));
OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS));
OUT_RING(ring, CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(0));
dwords = (uint32_t *)&((uint8_t *)dwords)[offset];
}
for (i = 0; i < sz; i++) {
@ -100,14 +100,14 @@ fd5_emit_const_bo(struct fd_ringbuffer *ring, enum shader_t type, boolean write,
debug_assert((regid % 4) == 0);
OUT_PKT7(ring, CP_LOAD_STATE, 3 + (2 * anum));
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(regid/4) |
CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
CP_LOAD_STATE_0_STATE_BLOCK(sb[type]) |
CP_LOAD_STATE_0_NUM_UNIT(anum/2));
OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS));
OUT_RING(ring, CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(0));
OUT_PKT7(ring, CP_LOAD_STATE4, 3 + (2 * anum));
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid/4) |
CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
CP_LOAD_STATE4_0_STATE_BLOCK(sb[type]) |
CP_LOAD_STATE4_0_NUM_UNIT(anum/2));
OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS));
OUT_RING(ring, CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(0));
for (i = 0; i < num; i++) {
if (prscs[i]) {
@ -276,22 +276,22 @@ emit_border_color(struct fd_context *ctx, struct fd_ringbuffer *ring)
static bool
emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
enum adreno_state_block sb, struct fd_texture_stateobj *tex)
enum a4xx_state_block sb, struct fd_texture_stateobj *tex)
{
bool needs_border = false;
unsigned bcolor_offset = (sb == SB_FRAG_TEX) ? ctx->verttex.num_samplers : 0;
unsigned bcolor_offset = (sb == SB4_FS_TEX) ? ctx->verttex.num_samplers : 0;
unsigned i;
if (tex->num_samplers > 0) {
/* output sampler state: */
OUT_PKT7(ring, CP_LOAD_STATE, 3 + (4 * tex->num_samplers));
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) |
CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
CP_LOAD_STATE_0_STATE_BLOCK(sb) |
CP_LOAD_STATE_0_NUM_UNIT(tex->num_samplers));
OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER) |
CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
OUT_RING(ring, CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(0));
OUT_PKT7(ring, CP_LOAD_STATE4, 3 + (4 * tex->num_samplers));
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
CP_LOAD_STATE4_0_NUM_UNIT(tex->num_samplers));
OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER) |
CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
OUT_RING(ring, CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(0));
for (i = 0; i < tex->num_samplers; i++) {
static const struct fd5_sampler_stateobj dummy_sampler = {};
const struct fd5_sampler_stateobj *sampler = tex->samplers[i] ?
@ -311,14 +311,14 @@ emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
unsigned num_textures = tex->num_textures;
/* emit texture state: */
OUT_PKT7(ring, CP_LOAD_STATE, 3 + (12 * num_textures));
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) |
CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) |
CP_LOAD_STATE_0_STATE_BLOCK(sb) |
CP_LOAD_STATE_0_NUM_UNIT(num_textures));
OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
OUT_RING(ring, CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(0));
OUT_PKT7(ring, CP_LOAD_STATE4, 3 + (12 * num_textures));
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
CP_LOAD_STATE4_0_NUM_UNIT(num_textures));
OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS) |
CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
OUT_RING(ring, CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(0));
for (i = 0; i < tex->num_textures; i++) {
static const struct fd5_pipe_sampler_view dummy_view = {};
const struct fd5_pipe_sampler_view *view = tex->textures[i] ?
@ -653,7 +653,7 @@ fd5_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
if (dirty & FD_DIRTY_VERTTEX) {
if (vp->has_samp) {
needs_border |= emit_textures(ctx, ring, SB_VERT_TEX, &ctx->verttex);
needs_border |= emit_textures(ctx, ring, SB4_VS_TEX, &ctx->verttex);
OUT_PKT4(ring, REG_A5XX_TPL1_VS_TEX_COUNT, 1);
OUT_RING(ring, ctx->verttex.num_textures);
} else {
@ -663,7 +663,7 @@ fd5_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
if (dirty & FD_DIRTY_FRAGTEX) {
if (fp->has_samp) {
needs_border |= emit_textures(ctx, ring, SB_FRAG_TEX, &ctx->fragtex);
needs_border |= emit_textures(ctx, ring, SB4_FS_TEX, &ctx->fragtex);
OUT_PKT4(ring, REG_A5XX_TPL1_FS_TEX_COUNT, 1);
OUT_RING(ring, ctx->fragtex.num_textures);
} else {

View file

@ -31,9 +31,6 @@
#include "a5xx.xml.h"
// XXX temp hack
#define CP_LOAD_STATE_2_EXT_SRC_ADDR_HI CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI
enum a5xx_vtx_fmt fd5_pipe2vtx(enum pipe_format format);
enum a5xx_tex_fmt fd5_pipe2tex(enum pipe_format format);
enum a5xx_color_fmt fd5_pipe2color(enum pipe_format format);

View file

@ -88,38 +88,38 @@ static void
emit_shader(struct fd_ringbuffer *ring, const struct ir3_shader_variant *so)
{
const struct ir3_info *si = &so->info;
enum adreno_state_block sb;
enum adreno_state_src src;
enum a4xx_state_block sb;
enum a4xx_state_src src;
uint32_t i, sz, *bin;
if (so->type == SHADER_VERTEX) {
sb = SB_VERT_SHADER;
sb = SB4_VS_SHADER;
} else {
sb = SB_FRAG_SHADER;
sb = SB4_FS_SHADER;
}
if (fd_mesa_debug & FD_DBG_DIRECT) {
sz = si->sizedwords;
src = SS_DIRECT;
src = SS4_DIRECT;
bin = fd_bo_map(so->bo);
} else {
sz = 0;
src = 2; // enums different on a5xx..
src = SS4_INDIRECT;
bin = NULL;
}
OUT_PKT7(ring, CP_LOAD_STATE, 3 + sz);
OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) |
CP_LOAD_STATE_0_STATE_SRC(src) |
CP_LOAD_STATE_0_STATE_BLOCK(sb) |
CP_LOAD_STATE_0_NUM_UNIT(so->instrlen));
OUT_PKT7(ring, CP_LOAD_STATE4, 3 + sz);
OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
CP_LOAD_STATE4_0_STATE_SRC(src) |
CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
CP_LOAD_STATE4_0_NUM_UNIT(so->instrlen));
if (bin) {
OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER));
OUT_RING(ring, CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(0));
OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) |
CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER));
OUT_RING(ring, CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(0));
} else {
OUT_RELOC(ring, so->bo, 0,
CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER), 0);
CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER), 0);
}
/* for how clever coverity is, it is sometimes rather dull, and