2012-07-19 15:20:45 +02:00
|
|
|
/*
|
|
|
|
|
* Copyright 2012 Advanced Micro Devices, Inc.
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* on the rights to use, copy, modify, merge, publish, distribute, sub
|
|
|
|
|
* license, and/or sell copies of the Software, and to permit persons to whom
|
|
|
|
|
* the Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
|
|
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
|
|
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
|
*
|
|
|
|
|
* Authors:
|
|
|
|
|
* Christian König <christian.koenig@amd.com>
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include "util/u_memory.h"
|
|
|
|
|
#include "util/u_framebuffer.h"
|
|
|
|
|
#include "util/u_blitter.h"
|
|
|
|
|
#include "tgsi/tgsi_parse.h"
|
2014-01-04 18:44:33 +01:00
|
|
|
#include "si_pipe.h"
|
|
|
|
|
#include "si_shader.h"
|
2012-07-19 15:20:45 +02:00
|
|
|
#include "si_state.h"
|
2013-08-14 01:04:39 +02:00
|
|
|
#include "../radeon/r600_cs.h"
|
2012-07-19 15:20:45 +02:00
|
|
|
#include "sid.h"
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Shaders
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void si_pipe_shader_vs(struct pipe_context *ctx, struct si_pipe_shader *shader)
|
|
|
|
|
{
|
2014-01-11 16:00:50 +01:00
|
|
|
struct si_context *sctx = (struct si_context *)ctx;
|
2012-07-19 15:20:45 +02:00
|
|
|
struct si_pm4_state *pm4;
|
|
|
|
|
unsigned num_sgprs, num_user_sgprs;
|
2013-03-21 18:02:52 +01:00
|
|
|
unsigned nparams, i, vgpr_comp_cnt;
|
2012-07-19 15:20:45 +02:00
|
|
|
uint64_t va;
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_delete_state(sctx, vs, shader->pm4);
|
|
|
|
|
pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
|
2013-06-07 14:04:58 -04:00
|
|
|
|
|
|
|
|
if (pm4 == NULL)
|
|
|
|
|
return;
|
2012-07-19 15:20:45 +02:00
|
|
|
|
|
|
|
|
/* Certain attributes (position, psize, etc.) don't count as params.
|
|
|
|
|
* VS is required to export at least one param and r600_shader_from_tgsi()
|
|
|
|
|
* takes care of adding a dummy export.
|
|
|
|
|
*/
|
|
|
|
|
for (nparams = 0, i = 0 ; i < shader->shader.noutput; i++) {
|
2013-05-03 17:59:34 +02:00
|
|
|
switch (shader->shader.output[i].name) {
|
2013-08-07 11:30:50 +02:00
|
|
|
case TGSI_SEMANTIC_CLIPVERTEX:
|
2013-05-03 17:59:34 +02:00
|
|
|
case TGSI_SEMANTIC_POSITION:
|
|
|
|
|
case TGSI_SEMANTIC_PSIZE:
|
|
|
|
|
break;
|
|
|
|
|
default:
|
2012-07-19 15:20:45 +02:00
|
|
|
nparams++;
|
2013-05-03 17:59:34 +02:00
|
|
|
}
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
if (nparams < 1)
|
|
|
|
|
nparams = 1;
|
|
|
|
|
|
|
|
|
|
si_pm4_set_reg(pm4, R_0286C4_SPI_VS_OUT_CONFIG,
|
|
|
|
|
S_0286C4_VS_EXPORT_COUNT(nparams - 1));
|
|
|
|
|
|
|
|
|
|
si_pm4_set_reg(pm4, R_02870C_SPI_SHADER_POS_FORMAT,
|
|
|
|
|
S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
|
2013-08-07 18:14:16 +02:00
|
|
|
S_02870C_POS1_EXPORT_FORMAT(shader->shader.nr_pos_exports > 1 ?
|
2013-05-03 17:59:34 +02:00
|
|
|
V_02870C_SPI_SHADER_4COMP :
|
|
|
|
|
V_02870C_SPI_SHADER_NONE) |
|
2013-08-07 18:14:16 +02:00
|
|
|
S_02870C_POS2_EXPORT_FORMAT(shader->shader.nr_pos_exports > 2 ?
|
2013-05-15 18:09:50 +02:00
|
|
|
V_02870C_SPI_SHADER_4COMP :
|
|
|
|
|
V_02870C_SPI_SHADER_NONE) |
|
2013-08-07 18:14:16 +02:00
|
|
|
S_02870C_POS3_EXPORT_FORMAT(shader->shader.nr_pos_exports > 3 ?
|
2013-05-15 18:09:50 +02:00
|
|
|
V_02870C_SPI_SHADER_4COMP :
|
|
|
|
|
V_02870C_SPI_SHADER_NONE));
|
2012-07-19 15:20:45 +02:00
|
|
|
|
|
|
|
|
va = r600_resource_va(ctx->screen, (void *)shader->bo);
|
|
|
|
|
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ);
|
|
|
|
|
si_pm4_set_reg(pm4, R_00B120_SPI_SHADER_PGM_LO_VS, va >> 8);
|
|
|
|
|
si_pm4_set_reg(pm4, R_00B124_SPI_SHADER_PGM_HI_VS, va >> 40);
|
|
|
|
|
|
2012-09-26 20:42:23 +02:00
|
|
|
num_user_sgprs = SI_VS_NUM_USER_SGPR;
|
2012-07-19 15:20:45 +02:00
|
|
|
num_sgprs = shader->num_sgprs;
|
2013-08-06 10:45:50 +02:00
|
|
|
if (num_user_sgprs > num_sgprs) {
|
|
|
|
|
/* Last 2 reserved SGPRs are used for VCC */
|
|
|
|
|
num_sgprs = num_user_sgprs + 2;
|
|
|
|
|
}
|
2012-07-19 15:20:45 +02:00
|
|
|
assert(num_sgprs <= 104);
|
|
|
|
|
|
2013-03-21 18:02:52 +01:00
|
|
|
vgpr_comp_cnt = shader->shader.uses_instanceid ? 3 : 0;
|
|
|
|
|
|
2012-07-19 15:20:45 +02:00
|
|
|
si_pm4_set_reg(pm4, R_00B128_SPI_SHADER_PGM_RSRC1_VS,
|
|
|
|
|
S_00B128_VGPRS((shader->num_vgprs - 1) / 4) |
|
2013-03-21 18:02:52 +01:00
|
|
|
S_00B128_SGPRS((num_sgprs - 1) / 8) |
|
|
|
|
|
S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt));
|
2012-07-19 15:20:45 +02:00
|
|
|
si_pm4_set_reg(pm4, R_00B12C_SPI_SHADER_PGM_RSRC2_VS,
|
2013-09-01 23:59:06 +02:00
|
|
|
S_00B12C_USER_SGPR(num_user_sgprs) |
|
|
|
|
|
S_00B12C_SO_BASE0_EN(!!shader->selector->so.stride[0]) |
|
|
|
|
|
S_00B12C_SO_BASE1_EN(!!shader->selector->so.stride[1]) |
|
|
|
|
|
S_00B12C_SO_BASE2_EN(!!shader->selector->so.stride[2]) |
|
|
|
|
|
S_00B12C_SO_BASE3_EN(!!shader->selector->so.stride[3]) |
|
|
|
|
|
S_00B12C_SO_EN(!!shader->selector->so.num_outputs));
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_bind_state(sctx, vs, shader->pm4);
|
|
|
|
|
sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void si_pipe_shader_ps(struct pipe_context *ctx, struct si_pipe_shader *shader)
|
|
|
|
|
{
|
2014-01-11 16:00:50 +01:00
|
|
|
struct si_context *sctx = (struct si_context *)ctx;
|
2012-07-19 15:20:45 +02:00
|
|
|
struct si_pm4_state *pm4;
|
2013-12-04 12:28:29 +01:00
|
|
|
unsigned i, exports_ps, spi_ps_in_control, db_shader_control;
|
2012-07-19 15:20:45 +02:00
|
|
|
unsigned num_sgprs, num_user_sgprs;
|
2013-08-09 14:58:21 +02:00
|
|
|
unsigned spi_baryc_cntl = 0, spi_ps_input_ena, spi_shader_z_format;
|
2012-07-19 15:20:45 +02:00
|
|
|
uint64_t va;
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_delete_state(sctx, ps, shader->pm4);
|
|
|
|
|
pm4 = shader->pm4 = si_pm4_alloc_state(sctx);
|
2013-06-07 14:04:58 -04:00
|
|
|
|
|
|
|
|
if (pm4 == NULL)
|
|
|
|
|
return;
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2013-08-08 00:26:02 +02:00
|
|
|
db_shader_control = S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z) |
|
2014-01-11 16:00:50 +01:00
|
|
|
S_02880C_ALPHA_TO_MASK_DISABLE(sctx->fb_cb0_is_integer);
|
2013-08-08 00:26:02 +02:00
|
|
|
|
2012-07-19 15:20:45 +02:00
|
|
|
for (i = 0; i < shader->shader.ninput; i++) {
|
2012-09-25 12:41:31 +02:00
|
|
|
switch (shader->shader.input[i].name) {
|
|
|
|
|
case TGSI_SEMANTIC_POSITION:
|
2012-09-06 16:18:11 -04:00
|
|
|
if (shader->shader.input[i].centroid) {
|
2013-08-09 14:58:21 +02:00
|
|
|
/* SPI_BARYC_CNTL.POS_FLOAT_LOCATION
|
2012-09-06 16:18:11 -04:00
|
|
|
* Possible vaules:
|
|
|
|
|
* 0 -> Position = pixel center (default)
|
|
|
|
|
* 1 -> Position = pixel centroid
|
|
|
|
|
* 2 -> Position = iterated sample number XXX:
|
|
|
|
|
* What does this mean?
|
|
|
|
|
*/
|
2013-08-09 14:58:21 +02:00
|
|
|
spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(1);
|
2012-09-06 16:18:11 -04:00
|
|
|
}
|
2012-09-25 12:41:31 +02:00
|
|
|
/* Fall through */
|
|
|
|
|
case TGSI_SEMANTIC_FACE:
|
2012-09-06 16:18:11 -04:00
|
|
|
continue;
|
|
|
|
|
}
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < shader->shader.noutput; i++) {
|
|
|
|
|
if (shader->shader.output[i].name == TGSI_SEMANTIC_POSITION)
|
|
|
|
|
db_shader_control |= S_02880C_Z_EXPORT_ENABLE(1);
|
|
|
|
|
if (shader->shader.output[i].name == TGSI_SEMANTIC_STENCIL)
|
2012-11-13 17:35:09 +01:00
|
|
|
db_shader_control |= S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(1);
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
2013-03-22 15:59:22 +01:00
|
|
|
if (shader->shader.uses_kill || shader->key.ps.alpha_func != PIPE_FUNC_ALWAYS)
|
2012-07-19 15:20:45 +02:00
|
|
|
db_shader_control |= S_02880C_KILL_ENABLE(1);
|
|
|
|
|
|
|
|
|
|
exports_ps = 0;
|
|
|
|
|
for (i = 0; i < shader->shader.noutput; i++) {
|
|
|
|
|
if (shader->shader.output[i].name == TGSI_SEMANTIC_POSITION ||
|
|
|
|
|
shader->shader.output[i].name == TGSI_SEMANTIC_STENCIL)
|
|
|
|
|
exports_ps |= 1;
|
|
|
|
|
}
|
|
|
|
|
if (!exports_ps) {
|
|
|
|
|
/* always at least export 1 component per pixel */
|
|
|
|
|
exports_ps = 2;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-19 15:45:32 +02:00
|
|
|
spi_ps_in_control = S_0286D8_NUM_INTERP(shader->shader.ninterp) |
|
|
|
|
|
S_0286D8_BC_OPTIMIZE_DISABLE(1);
|
2012-07-19 15:20:45 +02:00
|
|
|
|
|
|
|
|
si_pm4_set_reg(pm4, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl);
|
2012-08-21 14:41:29 +02:00
|
|
|
spi_ps_input_ena = shader->spi_ps_input_ena;
|
|
|
|
|
/* we need to enable at least one of them, otherwise we hang the GPU */
|
2012-09-06 15:41:59 -04:00
|
|
|
assert(G_0286CC_PERSP_SAMPLE_ENA(spi_ps_input_ena) ||
|
|
|
|
|
G_0286CC_PERSP_CENTER_ENA(spi_ps_input_ena) ||
|
|
|
|
|
G_0286CC_PERSP_CENTROID_ENA(spi_ps_input_ena) ||
|
|
|
|
|
G_0286CC_PERSP_PULL_MODEL_ENA(spi_ps_input_ena) ||
|
|
|
|
|
G_0286CC_LINEAR_SAMPLE_ENA(spi_ps_input_ena) ||
|
|
|
|
|
G_0286CC_LINEAR_CENTER_ENA(spi_ps_input_ena) ||
|
|
|
|
|
G_0286CC_LINEAR_CENTROID_ENA(spi_ps_input_ena) ||
|
|
|
|
|
G_0286CC_LINE_STIPPLE_TEX_ENA(spi_ps_input_ena));
|
|
|
|
|
|
2012-08-21 14:41:29 +02:00
|
|
|
si_pm4_set_reg(pm4, R_0286CC_SPI_PS_INPUT_ENA, spi_ps_input_ena);
|
|
|
|
|
si_pm4_set_reg(pm4, R_0286D0_SPI_PS_INPUT_ADDR, spi_ps_input_ena);
|
2012-07-19 15:20:45 +02:00
|
|
|
si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control);
|
|
|
|
|
|
2012-11-13 17:35:09 +01:00
|
|
|
if (G_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(db_shader_control))
|
|
|
|
|
spi_shader_z_format = V_028710_SPI_SHADER_32_GR;
|
|
|
|
|
else if (G_02880C_Z_EXPORT_ENABLE(db_shader_control))
|
|
|
|
|
spi_shader_z_format = V_028710_SPI_SHADER_32_R;
|
|
|
|
|
else
|
|
|
|
|
spi_shader_z_format = 0;
|
|
|
|
|
si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT, spi_shader_z_format);
|
2012-12-21 15:39:26 +01:00
|
|
|
si_pm4_set_reg(pm4, R_028714_SPI_SHADER_COL_FORMAT,
|
|
|
|
|
shader->spi_shader_col_format);
|
2013-04-30 16:34:10 +02:00
|
|
|
si_pm4_set_reg(pm4, R_02823C_CB_SHADER_MASK, shader->cb_shader_mask);
|
2012-07-19 15:20:45 +02:00
|
|
|
|
|
|
|
|
va = r600_resource_va(ctx->screen, (void *)shader->bo);
|
|
|
|
|
si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ);
|
|
|
|
|
si_pm4_set_reg(pm4, R_00B020_SPI_SHADER_PGM_LO_PS, va >> 8);
|
|
|
|
|
si_pm4_set_reg(pm4, R_00B024_SPI_SHADER_PGM_HI_PS, va >> 40);
|
|
|
|
|
|
2012-09-26 20:42:23 +02:00
|
|
|
num_user_sgprs = SI_PS_NUM_USER_SGPR;
|
2012-07-19 15:20:45 +02:00
|
|
|
num_sgprs = shader->num_sgprs;
|
2013-08-09 18:36:31 +02:00
|
|
|
/* One SGPR after user SGPRs is pre-loaded with {prim_mask, lds_offset} */
|
|
|
|
|
if ((num_user_sgprs + 1) > num_sgprs) {
|
2013-08-06 10:45:50 +02:00
|
|
|
/* Last 2 reserved SGPRs are used for VCC */
|
2013-08-09 18:36:31 +02:00
|
|
|
num_sgprs = num_user_sgprs + 1 + 2;
|
2013-08-06 10:45:50 +02:00
|
|
|
}
|
2012-07-19 15:20:45 +02:00
|
|
|
assert(num_sgprs <= 104);
|
|
|
|
|
|
|
|
|
|
si_pm4_set_reg(pm4, R_00B028_SPI_SHADER_PGM_RSRC1_PS,
|
|
|
|
|
S_00B028_VGPRS((shader->num_vgprs - 1) / 4) |
|
|
|
|
|
S_00B028_SGPRS((num_sgprs - 1) / 8));
|
|
|
|
|
si_pm4_set_reg(pm4, R_00B02C_SPI_SHADER_PGM_RSRC2_PS,
|
2013-06-19 18:14:01 +02:00
|
|
|
S_00B02C_EXTRA_LDS_SIZE(shader->lds_size) |
|
2012-07-19 15:20:45 +02:00
|
|
|
S_00B02C_USER_SGPR(num_user_sgprs));
|
|
|
|
|
|
|
|
|
|
si_pm4_set_reg(pm4, R_02880C_DB_SHADER_CONTROL, db_shader_control);
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
shader->cb0_is_integer = sctx->fb_cb0_is_integer;
|
|
|
|
|
shader->sprite_coord_enable = sctx->sprite_coord_enable;
|
|
|
|
|
si_pm4_bind_state(sctx, ps, shader->pm4);
|
|
|
|
|
sctx->b.flags |= R600_CONTEXT_INV_SHADER_CACHE;
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Drawing
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static unsigned si_conv_pipe_prim(unsigned pprim)
|
|
|
|
|
{
|
|
|
|
|
static const unsigned prim_conv[] = {
|
|
|
|
|
[PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST,
|
|
|
|
|
[PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST,
|
|
|
|
|
[PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP,
|
|
|
|
|
[PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP,
|
|
|
|
|
[PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST,
|
|
|
|
|
[PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP,
|
|
|
|
|
[PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN,
|
|
|
|
|
[PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST,
|
|
|
|
|
[PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP,
|
|
|
|
|
[PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON,
|
|
|
|
|
[PIPE_PRIM_LINES_ADJACENCY] = ~0,
|
|
|
|
|
[PIPE_PRIM_LINE_STRIP_ADJACENCY] = ~0,
|
|
|
|
|
[PIPE_PRIM_TRIANGLES_ADJACENCY] = ~0,
|
|
|
|
|
[PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = ~0
|
|
|
|
|
};
|
|
|
|
|
unsigned result = prim_conv[pprim];
|
|
|
|
|
if (result == ~0) {
|
|
|
|
|
R600_ERR("unsupported primitive type %d\n", pprim);
|
|
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-07 03:18:25 +01:00
|
|
|
static unsigned si_conv_prim_to_gs_out(unsigned mode)
|
2013-08-18 03:05:34 +02:00
|
|
|
{
|
|
|
|
|
static const int prim_conv[] = {
|
|
|
|
|
[PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST,
|
|
|
|
|
[PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
|
|
|
|
|
[PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
|
|
|
|
|
[PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
|
|
|
|
|
[PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
|
|
|
|
|
[PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
|
|
|
|
|
[PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
|
|
|
|
|
[PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
|
|
|
|
|
[PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
|
|
|
|
|
[PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
|
|
|
|
|
[PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
|
|
|
|
|
[PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP,
|
|
|
|
|
[PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP,
|
|
|
|
|
[PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP
|
|
|
|
|
};
|
|
|
|
|
assert(mode < Elements(prim_conv));
|
|
|
|
|
|
|
|
|
|
return prim_conv[mode];
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
static bool si_update_draw_info_state(struct si_context *sctx,
|
2013-10-08 02:47:36 +02:00
|
|
|
const struct pipe_draw_info *info,
|
|
|
|
|
const struct pipe_index_buffer *ib)
|
2012-07-19 15:20:45 +02:00
|
|
|
{
|
2014-01-11 16:00:50 +01:00
|
|
|
struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
|
|
|
|
|
struct si_shader *vs = &sctx->vs_shader->current->shader;
|
2012-07-19 15:20:45 +02:00
|
|
|
unsigned prim = si_conv_pipe_prim(info->mode);
|
2014-01-07 03:18:25 +01:00
|
|
|
unsigned gs_out_prim = si_conv_prim_to_gs_out(info->mode);
|
2012-07-19 15:20:45 +02:00
|
|
|
unsigned ls_mask = 0;
|
|
|
|
|
|
|
|
|
|
if (pm4 == NULL)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
if (prim == ~0) {
|
|
|
|
|
FREE(pm4);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->b.chip_class >= CIK) {
|
|
|
|
|
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
|
2013-10-08 02:47:36 +02:00
|
|
|
bool wd_switch_on_eop = prim == V_008958_DI_PT_POLYGON ||
|
|
|
|
|
prim == V_008958_DI_PT_LINELOOP ||
|
|
|
|
|
prim == V_008958_DI_PT_TRIFAN ||
|
|
|
|
|
prim == V_008958_DI_PT_TRISTRIP_ADJ ||
|
2013-10-30 00:36:58 +01:00
|
|
|
info->primitive_restart ||
|
|
|
|
|
(rs ? rs->line_stipple_enable : false);
|
|
|
|
|
/* If the WD switch is false, the IA switch must be false too. */
|
|
|
|
|
bool ia_switch_on_eop = wd_switch_on_eop;
|
2013-10-08 02:47:36 +02:00
|
|
|
|
|
|
|
|
si_pm4_set_reg(pm4, R_028AA8_IA_MULTI_VGT_PARAM,
|
2013-10-30 00:36:58 +01:00
|
|
|
S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
|
2013-10-08 02:47:36 +02:00
|
|
|
S_028AA8_PARTIAL_VS_WAVE_ON(1) |
|
|
|
|
|
S_028AA8_PRIMGROUP_SIZE(63) |
|
|
|
|
|
S_028AA8_WD_SWITCH_ON_EOP(wd_switch_on_eop));
|
|
|
|
|
si_pm4_set_reg(pm4, R_028B74_VGT_DISPATCH_DRAW_INDEX,
|
|
|
|
|
ib->index_size == 4 ? 0xFC000000 : 0xFC00);
|
|
|
|
|
|
2012-09-28 17:35:26 -04:00
|
|
|
si_pm4_set_reg(pm4, R_030908_VGT_PRIMITIVE_TYPE, prim);
|
2013-10-08 02:47:36 +02:00
|
|
|
} else {
|
2012-09-28 17:35:26 -04:00
|
|
|
si_pm4_set_reg(pm4, R_008958_VGT_PRIMITIVE_TYPE, prim);
|
2013-08-18 03:05:34 +02:00
|
|
|
}
|
2013-10-08 02:47:36 +02:00
|
|
|
|
|
|
|
|
si_pm4_set_reg(pm4, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim);
|
2012-08-03 10:26:01 +02:00
|
|
|
si_pm4_set_reg(pm4, R_028408_VGT_INDX_OFFSET,
|
|
|
|
|
info->indexed ? info->index_bias : info->start);
|
2012-07-19 15:20:45 +02:00
|
|
|
si_pm4_set_reg(pm4, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, info->restart_index);
|
|
|
|
|
si_pm4_set_reg(pm4, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart);
|
2013-03-21 18:30:23 +01:00
|
|
|
si_pm4_set_reg(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0 + SI_SGPR_START_INSTANCE * 4,
|
|
|
|
|
info->start_instance);
|
2012-07-19 15:20:45 +02:00
|
|
|
|
|
|
|
|
if (prim == V_008958_DI_PT_LINELIST)
|
|
|
|
|
ls_mask = 1;
|
|
|
|
|
else if (prim == V_008958_DI_PT_LINESTRIP)
|
|
|
|
|
ls_mask = 2;
|
|
|
|
|
si_pm4_set_reg(pm4, R_028A0C_PA_SC_LINE_STIPPLE,
|
|
|
|
|
S_028A0C_AUTO_RESET_CNTL(ls_mask) |
|
2014-01-11 16:00:50 +01:00
|
|
|
sctx->pa_sc_line_stipple);
|
2012-07-19 15:20:45 +02:00
|
|
|
|
|
|
|
|
if (info->mode == PIPE_PRIM_QUADS || info->mode == PIPE_PRIM_QUAD_STRIP || info->mode == PIPE_PRIM_POLYGON) {
|
|
|
|
|
si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL,
|
2014-01-11 16:00:50 +01:00
|
|
|
S_028814_PROVOKING_VTX_LAST(1) | sctx->pa_su_sc_mode_cntl);
|
2012-07-19 15:20:45 +02:00
|
|
|
} else {
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_set_reg(pm4, R_028814_PA_SU_SC_MODE_CNTL, sctx->pa_su_sc_mode_cntl);
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
si_pm4_set_reg(pm4, R_02881C_PA_CL_VS_OUT_CNTL,
|
2013-05-03 17:59:34 +02:00
|
|
|
S_02881C_USE_VTX_POINT_SIZE(vs->vs_out_point_size) |
|
2013-11-19 22:07:30 +01:00
|
|
|
S_02881C_USE_VTX_EDGE_FLAG(vs->vs_out_edgeflag) |
|
2013-11-21 15:21:38 +01:00
|
|
|
S_02881C_USE_VTX_RENDER_TARGET_INDX(vs->vs_out_layer) |
|
2013-05-15 18:09:50 +02:00
|
|
|
S_02881C_VS_OUT_CCDIST0_VEC_ENA((vs->clip_dist_write & 0x0F) != 0) |
|
|
|
|
|
S_02881C_VS_OUT_CCDIST1_VEC_ENA((vs->clip_dist_write & 0xF0) != 0) |
|
|
|
|
|
S_02881C_VS_OUT_MISC_VEC_ENA(vs->vs_out_misc_write) |
|
2014-01-11 16:00:50 +01:00
|
|
|
(sctx->queued.named.rasterizer->clip_plane_enable &
|
2013-05-15 18:09:50 +02:00
|
|
|
vs->clip_dist_write));
|
|
|
|
|
si_pm4_set_reg(pm4, R_028810_PA_CL_CLIP_CNTL,
|
2014-01-11 16:00:50 +01:00
|
|
|
sctx->queued.named.rasterizer->pa_cl_clip_cntl |
|
2013-05-02 15:39:15 +02:00
|
|
|
(vs->clip_dist_write ? 0 :
|
2014-01-11 16:00:50 +01:00
|
|
|
sctx->queued.named.rasterizer->clip_plane_enable & 0x3F));
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_set_state(sctx, draw_info, pm4);
|
2012-07-19 15:20:45 +02:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
static void si_update_spi_map(struct si_context *sctx)
|
2012-07-19 15:20:45 +02:00
|
|
|
{
|
2014-01-11 16:00:50 +01:00
|
|
|
struct si_shader *ps = &sctx->ps_shader->current->shader;
|
|
|
|
|
struct si_shader *vs = &sctx->vs_shader->current->shader;
|
|
|
|
|
struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
|
2012-07-19 15:20:45 +02:00
|
|
|
unsigned i, j, tmp;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < ps->ninput; i++) {
|
2012-09-06 18:03:38 +02:00
|
|
|
unsigned name = ps->input[i].name;
|
|
|
|
|
unsigned param_offset = ps->input[i].param_offset;
|
|
|
|
|
|
2013-02-13 16:07:15 +01:00
|
|
|
if (name == TGSI_SEMANTIC_POSITION)
|
|
|
|
|
/* Read from preloaded VGPRs, not parameters */
|
|
|
|
|
continue;
|
|
|
|
|
|
2012-09-06 18:03:38 +02:00
|
|
|
bcolor:
|
2012-07-19 15:20:45 +02:00
|
|
|
tmp = 0;
|
|
|
|
|
|
2013-02-13 16:07:15 +01:00
|
|
|
if (ps->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT ||
|
2012-07-19 15:20:45 +02:00
|
|
|
(ps->input[i].interpolate == TGSI_INTERPOLATE_COLOR &&
|
2014-01-11 16:00:50 +01:00
|
|
|
sctx->ps_shader->current->key.ps.flatshade)) {
|
2012-07-19 15:20:45 +02:00
|
|
|
tmp |= S_028644_FLAT_SHADE(1);
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-06 18:03:38 +02:00
|
|
|
if (name == TGSI_SEMANTIC_GENERIC &&
|
2014-01-11 16:00:50 +01:00
|
|
|
sctx->sprite_coord_enable & (1 << ps->input[i].sid)) {
|
2012-07-19 15:20:45 +02:00
|
|
|
tmp |= S_028644_PT_SPRITE_TEX(1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (j = 0; j < vs->noutput; j++) {
|
2012-09-06 18:03:38 +02:00
|
|
|
if (name == vs->output[j].name &&
|
2012-07-19 15:20:45 +02:00
|
|
|
ps->input[i].sid == vs->output[j].sid) {
|
|
|
|
|
tmp |= S_028644_OFFSET(vs->output[j].param_offset);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (j == vs->noutput) {
|
|
|
|
|
/* No corresponding output found, load defaults into input */
|
|
|
|
|
tmp |= S_028644_OFFSET(0x20);
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-27 20:01:33 +02:00
|
|
|
si_pm4_set_reg(pm4,
|
2012-09-06 18:03:38 +02:00
|
|
|
R_028644_SPI_PS_INPUT_CNTL_0 + param_offset * 4,
|
2012-09-27 20:01:33 +02:00
|
|
|
tmp);
|
2012-09-06 18:03:38 +02:00
|
|
|
|
|
|
|
|
if (name == TGSI_SEMANTIC_COLOR &&
|
2014-01-11 16:00:50 +01:00
|
|
|
sctx->ps_shader->current->key.ps.color_two_side) {
|
2012-09-06 18:03:38 +02:00
|
|
|
name = TGSI_SEMANTIC_BCOLOR;
|
|
|
|
|
param_offset++;
|
|
|
|
|
goto bcolor;
|
|
|
|
|
}
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_set_state(sctx, spi, pm4);
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
static void si_update_derived_state(struct si_context *sctx)
|
2012-07-19 15:20:45 +02:00
|
|
|
{
|
2014-01-11 16:00:50 +01:00
|
|
|
struct pipe_context * ctx = (struct pipe_context*)sctx;
|
2013-03-22 15:59:22 +01:00
|
|
|
unsigned vs_dirty = 0, ps_dirty = 0;
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
if (!sctx->blitter->running) {
|
2013-01-17 19:36:41 +01:00
|
|
|
/* Flush depth textures which need to be flushed. */
|
2013-08-06 06:42:22 +02:00
|
|
|
for (int i = 0; i < SI_NUM_SHADERS; i++) {
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->samplers[i].depth_texture_mask) {
|
|
|
|
|
si_flush_depth_textures(sctx, &sctx->samplers[i]);
|
2013-08-06 06:42:22 +02:00
|
|
|
}
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->samplers[i].compressed_colortex_mask) {
|
|
|
|
|
si_decompress_color_textures(sctx, &sctx->samplers[i]);
|
2013-08-06 08:48:07 +02:00
|
|
|
}
|
2013-01-17 19:36:41 +01:00
|
|
|
}
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_shader_select(ctx, sctx->vs_shader, &vs_dirty);
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
if (!sctx->vs_shader->current->pm4) {
|
|
|
|
|
si_pipe_shader_vs(ctx, sctx->vs_shader->current);
|
2013-03-22 15:59:22 +01:00
|
|
|
vs_dirty = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vs_dirty) {
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_bind_state(sctx, vs, sctx->vs_shader->current->pm4);
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
|
2013-03-22 15:59:22 +01:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_shader_select(ctx, sctx->ps_shader, &ps_dirty);
|
2013-03-22 15:59:22 +01:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
if (!sctx->ps_shader->current->pm4) {
|
|
|
|
|
si_pipe_shader_ps(ctx, sctx->ps_shader->current);
|
2012-08-23 17:10:37 +02:00
|
|
|
ps_dirty = 0;
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->ps_shader->current->cb0_is_integer != sctx->fb_cb0_is_integer) {
|
|
|
|
|
si_pipe_shader_ps(ctx, sctx->ps_shader->current);
|
2013-12-04 13:54:50 +01:00
|
|
|
ps_dirty = 0;
|
2013-08-08 00:26:02 +02:00
|
|
|
}
|
2012-08-23 17:10:37 +02:00
|
|
|
|
|
|
|
|
if (ps_dirty) {
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_bind_state(sctx, ps, sctx->ps_shader->current->pm4);
|
2012-08-08 15:35:42 +02:00
|
|
|
}
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
if (si_pm4_state_changed(sctx, ps) || si_pm4_state_changed(sctx, vs)) {
|
2013-03-19 17:57:11 +01:00
|
|
|
/* XXX: Emitting the PS state even when only the VS changed
|
|
|
|
|
* fixes random failures with piglit glsl-max-varyings.
|
|
|
|
|
* Not sure why...
|
|
|
|
|
*/
|
2014-01-11 16:00:50 +01:00
|
|
|
sctx->emitted.named.ps = NULL;
|
|
|
|
|
si_update_spi_map(sctx);
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
static void si_vertex_buffer_update(struct si_context *sctx)
|
2012-07-19 15:20:45 +02:00
|
|
|
{
|
2014-01-11 16:00:50 +01:00
|
|
|
struct pipe_context *ctx = &sctx->b.b;
|
|
|
|
|
struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
|
2012-07-25 11:22:59 +02:00
|
|
|
bool bound[PIPE_MAX_ATTRIBS] = {};
|
|
|
|
|
unsigned i, count;
|
2012-07-19 15:20:45 +02:00
|
|
|
uint64_t va;
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
count = sctx->vertex_elements->count;
|
2012-07-19 15:20:45 +02:00
|
|
|
assert(count <= 256 / 4);
|
|
|
|
|
|
2012-08-12 19:26:24 +02:00
|
|
|
si_pm4_sh_data_begin(pm4);
|
|
|
|
|
for (i = 0 ; i < count; i++) {
|
2014-01-11 16:00:50 +01:00
|
|
|
struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
|
2012-07-25 11:22:59 +02:00
|
|
|
struct pipe_vertex_buffer *vb;
|
2013-08-14 01:04:39 +02:00
|
|
|
struct r600_resource *rbuffer;
|
2012-07-25 11:22:59 +02:00
|
|
|
unsigned offset;
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
if (ve->vertex_buffer_index >= sctx->nr_vertex_buffers)
|
2012-07-19 15:20:45 +02:00
|
|
|
continue;
|
2012-07-25 11:22:59 +02:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
|
2013-08-14 01:04:39 +02:00
|
|
|
rbuffer = (struct r600_resource*)vb->buffer;
|
2012-07-25 11:22:59 +02:00
|
|
|
if (rbuffer == NULL)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
offset = 0;
|
|
|
|
|
offset += vb->buffer_offset;
|
|
|
|
|
offset += ve->src_offset;
|
2012-07-19 15:20:45 +02:00
|
|
|
|
|
|
|
|
va = r600_resource_va(ctx->screen, (void*)rbuffer);
|
|
|
|
|
va += offset;
|
|
|
|
|
|
|
|
|
|
/* Fill in T# buffer resource description */
|
2012-08-12 19:26:24 +02:00
|
|
|
si_pm4_sh_data_add(pm4, va & 0xFFFFFFFF);
|
|
|
|
|
si_pm4_sh_data_add(pm4, (S_008F04_BASE_ADDRESS_HI(va >> 32) |
|
|
|
|
|
S_008F04_STRIDE(vb->stride)));
|
2013-03-12 12:34:37 +01:00
|
|
|
if (vb->stride)
|
|
|
|
|
/* Round up by rounding down and adding 1 */
|
|
|
|
|
si_pm4_sh_data_add(pm4,
|
|
|
|
|
(vb->buffer->width0 - offset -
|
|
|
|
|
util_format_get_blocksize(ve->src_format)) /
|
|
|
|
|
vb->stride + 1);
|
|
|
|
|
else
|
|
|
|
|
si_pm4_sh_data_add(pm4, vb->buffer->width0 - offset);
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_sh_data_add(pm4, sctx->vertex_elements->rsrc_word3[i]);
|
2012-07-25 11:22:59 +02:00
|
|
|
|
|
|
|
|
if (!bound[ve->vertex_buffer_index]) {
|
|
|
|
|
si_pm4_add_bo(pm4, rbuffer, RADEON_USAGE_READ);
|
|
|
|
|
bound[ve->vertex_buffer_index] = true;
|
|
|
|
|
}
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
2012-09-26 20:42:23 +02:00
|
|
|
si_pm4_sh_data_end(pm4, R_00B130_SPI_SHADER_USER_DATA_VS_0, SI_SGPR_VERTEX_BUFFER);
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_set_state(sctx, vertex_buffers, pm4);
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
static void si_state_draw(struct si_context *sctx,
|
2012-08-03 10:26:01 +02:00
|
|
|
const struct pipe_draw_info *info,
|
|
|
|
|
const struct pipe_index_buffer *ib)
|
|
|
|
|
{
|
2014-01-11 16:00:50 +01:00
|
|
|
struct si_pm4_state *pm4 = si_pm4_alloc_state(sctx);
|
2013-06-07 14:04:58 -04:00
|
|
|
|
|
|
|
|
if (pm4 == NULL)
|
|
|
|
|
return;
|
2012-08-03 10:26:01 +02:00
|
|
|
|
|
|
|
|
/* queries need some special values
|
|
|
|
|
* (this is non-zero if any query is active) */
|
2014-01-22 00:06:32 +01:00
|
|
|
if (sctx->b.num_occlusion_queries > 0) {
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->b.chip_class >= CIK) {
|
2013-10-08 14:23:22 +02:00
|
|
|
si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
|
|
|
|
|
S_028004_PERFECT_ZPASS_COUNTS(1) |
|
2014-01-11 16:00:50 +01:00
|
|
|
S_028004_SAMPLE_RATE(sctx->fb_log_samples) |
|
2013-10-08 14:23:22 +02:00
|
|
|
S_028004_ZPASS_ENABLE(1) |
|
|
|
|
|
S_028004_SLICE_EVEN_ENABLE(1) |
|
|
|
|
|
S_028004_SLICE_ODD_ENABLE(1));
|
|
|
|
|
} else {
|
|
|
|
|
si_pm4_set_reg(pm4, R_028004_DB_COUNT_CONTROL,
|
|
|
|
|
S_028004_PERFECT_ZPASS_COUNTS(1) |
|
2014-01-11 16:00:50 +01:00
|
|
|
S_028004_SAMPLE_RATE(sctx->fb_log_samples));
|
2013-10-08 14:23:22 +02:00
|
|
|
}
|
2012-08-03 10:26:01 +02:00
|
|
|
}
|
|
|
|
|
|
2013-08-26 18:17:09 +02:00
|
|
|
if (info->count_from_stream_output) {
|
|
|
|
|
struct r600_so_target *t =
|
|
|
|
|
(struct r600_so_target*)info->count_from_stream_output;
|
2014-01-11 16:00:50 +01:00
|
|
|
uint64_t va = r600_resource_va(&sctx->screen->b.b,
|
2013-08-26 18:17:09 +02:00
|
|
|
&t->buf_filled_size->b.b);
|
|
|
|
|
va += t->buf_filled_size_offset;
|
|
|
|
|
|
|
|
|
|
si_pm4_set_reg(pm4, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
|
|
|
|
|
t->stride_in_dw);
|
|
|
|
|
|
|
|
|
|
si_pm4_cmd_begin(pm4, PKT3_COPY_DATA);
|
|
|
|
|
si_pm4_cmd_add(pm4,
|
|
|
|
|
COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
|
|
|
|
|
COPY_DATA_DST_SEL(COPY_DATA_REG) |
|
|
|
|
|
COPY_DATA_WR_CONFIRM);
|
|
|
|
|
si_pm4_cmd_add(pm4, va); /* src address lo */
|
|
|
|
|
si_pm4_cmd_add(pm4, va >> 32UL); /* src address hi */
|
|
|
|
|
si_pm4_cmd_add(pm4, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
|
|
|
|
|
si_pm4_cmd_add(pm4, 0); /* unused */
|
|
|
|
|
si_pm4_add_bo(pm4, t->buf_filled_size, RADEON_USAGE_READ);
|
|
|
|
|
si_pm4_cmd_end(pm4, true);
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-03 10:26:01 +02:00
|
|
|
/* draw packet */
|
|
|
|
|
si_pm4_cmd_begin(pm4, PKT3_INDEX_TYPE);
|
|
|
|
|
if (ib->index_size == 4) {
|
2014-01-11 15:56:47 +01:00
|
|
|
si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_32 | (SI_BIG_ENDIAN ?
|
2012-08-03 10:26:01 +02:00
|
|
|
V_028A7C_VGT_DMA_SWAP_32_BIT : 0));
|
|
|
|
|
} else {
|
2014-01-11 15:56:47 +01:00
|
|
|
si_pm4_cmd_add(pm4, V_028A7C_VGT_INDEX_16 | (SI_BIG_ENDIAN ?
|
2012-08-03 10:26:01 +02:00
|
|
|
V_028A7C_VGT_DMA_SWAP_16_BIT : 0));
|
|
|
|
|
}
|
2014-01-22 00:06:32 +01:00
|
|
|
si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
|
2012-08-03 10:26:01 +02:00
|
|
|
|
|
|
|
|
si_pm4_cmd_begin(pm4, PKT3_NUM_INSTANCES);
|
|
|
|
|
si_pm4_cmd_add(pm4, info->instance_count);
|
2014-01-22 00:06:32 +01:00
|
|
|
si_pm4_cmd_end(pm4, sctx->b.predicate_drawing);
|
2012-08-03 10:26:01 +02:00
|
|
|
|
|
|
|
|
if (info->indexed) {
|
2012-09-20 17:20:51 +02:00
|
|
|
uint32_t max_size = (ib->buffer->width0 - ib->offset) /
|
2014-01-11 16:00:50 +01:00
|
|
|
sctx->index_buffer.index_size;
|
2012-08-03 10:26:01 +02:00
|
|
|
uint64_t va;
|
2014-01-11 16:00:50 +01:00
|
|
|
va = r600_resource_va(&sctx->screen->b.b, ib->buffer);
|
2012-08-03 10:26:01 +02:00
|
|
|
va += ib->offset;
|
|
|
|
|
|
2013-08-14 01:04:39 +02:00
|
|
|
si_pm4_add_bo(pm4, (struct r600_resource *)ib->buffer, RADEON_USAGE_READ);
|
2012-09-20 17:20:51 +02:00
|
|
|
si_cmd_draw_index_2(pm4, max_size, va, info->count,
|
|
|
|
|
V_0287F0_DI_SRC_SEL_DMA,
|
2014-01-22 00:06:32 +01:00
|
|
|
sctx->b.predicate_drawing);
|
2012-08-03 10:26:01 +02:00
|
|
|
} else {
|
2012-09-20 17:20:51 +02:00
|
|
|
uint32_t initiator = V_0287F0_DI_SRC_SEL_AUTO_INDEX;
|
|
|
|
|
initiator |= S_0287F0_USE_OPAQUE(!!info->count_from_stream_output);
|
2014-01-22 00:06:32 +01:00
|
|
|
si_cmd_draw_index_auto(pm4, info->count, initiator, sctx->b.predicate_drawing);
|
2012-08-03 10:26:01 +02:00
|
|
|
}
|
2013-12-07 04:42:24 +01:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_set_state(sctx, draw, pm4);
|
2012-08-03 10:26:01 +02:00
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
void si_emit_cache_flush(struct r600_common_context *sctx, struct r600_atom *atom)
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
{
|
2014-01-11 16:00:50 +01:00
|
|
|
struct radeon_winsys_cs *cs = sctx->rings.gfx.cs;
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
uint32_t cp_coher_cntl = 0;
|
|
|
|
|
|
|
|
|
|
/* XXX SI flushes both ICACHE and KCACHE if either flag is set.
|
|
|
|
|
* XXX CIK shouldn't have this issue. Test CIK before separating the flags
|
|
|
|
|
* XXX to ensure there is no regression. Also find out if there is another
|
|
|
|
|
* XXX way to flush either ICACHE or KCACHE but not both for SI. */
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->flags & (R600_CONTEXT_INV_SHADER_CACHE |
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
R600_CONTEXT_INV_CONST_CACHE)) {
|
|
|
|
|
cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1) |
|
|
|
|
|
S_0085F0_SH_KCACHE_ACTION_ENA(1);
|
|
|
|
|
}
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->flags & (R600_CONTEXT_INV_TEX_CACHE |
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
R600_CONTEXT_STREAMOUT_FLUSH)) {
|
|
|
|
|
cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1) |
|
|
|
|
|
S_0085F0_TCL1_ACTION_ENA(1);
|
|
|
|
|
}
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB) {
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
|
|
|
|
|
S_0085F0_CB0_DEST_BASE_ENA(1) |
|
|
|
|
|
S_0085F0_CB1_DEST_BASE_ENA(1) |
|
|
|
|
|
S_0085F0_CB2_DEST_BASE_ENA(1) |
|
|
|
|
|
S_0085F0_CB3_DEST_BASE_ENA(1) |
|
|
|
|
|
S_0085F0_CB4_DEST_BASE_ENA(1) |
|
|
|
|
|
S_0085F0_CB5_DEST_BASE_ENA(1) |
|
|
|
|
|
S_0085F0_CB6_DEST_BASE_ENA(1) |
|
|
|
|
|
S_0085F0_CB7_DEST_BASE_ENA(1);
|
|
|
|
|
}
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB) {
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
|
|
|
|
|
S_0085F0_DB_DEST_BASE_ENA(1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (cp_coher_cntl) {
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->chip_class >= CIK) {
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
|
|
|
|
|
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
|
|
|
|
|
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
|
|
|
|
|
radeon_emit(cs, 0xff); /* CP_COHER_SIZE_HI */
|
|
|
|
|
radeon_emit(cs, 0); /* CP_COHER_BASE */
|
|
|
|
|
radeon_emit(cs, 0); /* CP_COHER_BASE_HI */
|
|
|
|
|
radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
|
|
|
|
|
} else {
|
|
|
|
|
radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
|
|
|
|
|
radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
|
|
|
|
|
radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
|
|
|
|
|
radeon_emit(cs, 0); /* CP_COHER_BASE */
|
|
|
|
|
radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META) {
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
|
|
|
|
|
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
|
|
|
|
|
}
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META) {
|
2013-12-17 00:46:45 +01:00
|
|
|
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
|
|
|
|
|
radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
|
|
|
|
|
}
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
|
2014-01-13 13:15:19 +01:00
|
|
|
if (sctx->flags & (R600_CONTEXT_WAIT_3D_IDLE |
|
|
|
|
|
R600_CONTEXT_PS_PARTIAL_FLUSH)) {
|
2013-09-22 21:47:35 +02:00
|
|
|
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
|
|
|
|
|
radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
|
2014-01-11 16:00:50 +01:00
|
|
|
} else if (sctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) {
|
2013-09-02 12:57:46 +02:00
|
|
|
/* Needed if streamout buffers are going to be used as a source. */
|
|
|
|
|
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
|
|
|
|
|
radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
sctx->flags = 0;
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
}
|
|
|
|
|
|
2013-12-17 00:46:45 +01:00
|
|
|
const struct r600_atom si_atom_cache_flush = { si_emit_cache_flush, 13 }; /* number of CS dwords */
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
|
2012-08-03 10:26:01 +02:00
|
|
|
void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
|
2012-07-19 15:20:45 +02:00
|
|
|
{
|
2014-01-11 16:00:50 +01:00
|
|
|
struct si_context *sctx = (struct si_context *)ctx;
|
2012-07-19 15:20:45 +02:00
|
|
|
struct pipe_index_buffer ib = {};
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
uint32_t i;
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2013-02-01 18:49:07 +01:00
|
|
|
if (!info->count && (info->indexed || !info->count_from_stream_output))
|
2012-07-19 15:20:45 +02:00
|
|
|
return;
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
if (!sctx->ps_shader || !sctx->vs_shader)
|
2012-07-19 15:20:45 +02:00
|
|
|
return;
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_update_derived_state(sctx);
|
|
|
|
|
si_vertex_buffer_update(sctx);
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2012-08-03 10:26:01 +02:00
|
|
|
if (info->indexed) {
|
2012-07-19 15:20:45 +02:00
|
|
|
/* Initialize the index buffer struct. */
|
2014-01-11 16:00:50 +01:00
|
|
|
pipe_resource_reference(&ib.buffer, sctx->index_buffer.buffer);
|
|
|
|
|
ib.user_buffer = sctx->index_buffer.user_buffer;
|
|
|
|
|
ib.index_size = sctx->index_buffer.index_size;
|
|
|
|
|
ib.offset = sctx->index_buffer.offset + info->start * ib.index_size;
|
2012-07-19 15:20:45 +02:00
|
|
|
|
|
|
|
|
/* Translate or upload, if needed. */
|
2014-01-11 16:00:50 +01:00
|
|
|
si_translate_index_buffer(sctx, &ib, info->count);
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2013-02-01 18:49:07 +01:00
|
|
|
if (ib.user_buffer && !ib.buffer) {
|
2014-01-22 02:57:28 +01:00
|
|
|
u_upload_data(sctx->b.uploader, 0, info->count * ib.index_size,
|
|
|
|
|
ib.user_buffer, &ib.offset, &ib.buffer);
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
if (!si_update_draw_info_state(sctx, info, &ib))
|
2012-07-19 15:20:45 +02:00
|
|
|
return;
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_state_draw(sctx, info, &ib);
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
sctx->pm4_dirty_cdwords += si_pm4_dirty_dw(sctx);
|
2012-07-19 15:20:45 +02:00
|
|
|
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
/* Check flush flags. */
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->b.flags)
|
|
|
|
|
sctx->atoms.cache_flush->dirty = true;
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_need_cs_space(sctx, 0, TRUE);
|
2012-07-19 15:20:45 +02:00
|
|
|
|
radeonsi: simplify and improve flushing
This mimics r600g. The R600_CONTEXT_xxx flags are added to rctx->b.flags
and si_emit_cache_flush emits the packets. That's it. The shared radeon code
tells us when the streamout cache should be flushed, so we have to check
the flags anyway.
There is a new atom "cache_flush", because caches must be flushed *after*
resource descriptors are changed in memory.
Functional changes:
* Write caches are flushed at the end of CS and read caches are flushed
at its beginning.
* Sampler view states are removed from si_state, they only held the flush
flags.
* Everytime a shader is changed, the I cache is flushed. Is this needed?
Due to a hw bug, this also flushes the K cache.
* The WRITE_DATA packet is changed to use TC, which fixes a rendering issue
in openarena. I'm not sure how TC interacts with CP DMA, but for now it
seems to work better than any other solution I tried. (BTW CIK allows us
to use TC for CP DMA.)
* Flush the K cache instead of the texture cache when updating resource
descriptors (due to a hw bug, this also flushes the I cache).
I think the K cache flush is correct here, but I'm not sure if the texture
cache should be flushed too (probably not considering we use TC
for WRITE_DATA, but we don't use TC for CP DMA).
* The number of resource contexts is decreased to 16. With all of these cache
changes, 4 doesn't work, but 8 works, which suggests I'm actually doing
the right thing here and the pipeline isn't drained during flushes.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Tested-by: Tom Stellard <thomas.stellard@amd.com>
2013-08-26 17:19:39 +02:00
|
|
|
/* Emit states. */
|
2014-01-11 16:00:50 +01:00
|
|
|
for (i = 0; i < SI_NUM_ATOMS(sctx); i++) {
|
|
|
|
|
if (sctx->atoms.array[i]->dirty) {
|
|
|
|
|
sctx->atoms.array[i]->emit(&sctx->b, sctx->atoms.array[i]);
|
|
|
|
|
sctx->atoms.array[i]->dirty = false;
|
2013-08-06 06:42:22 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-11 16:00:50 +01:00
|
|
|
si_pm4_emit_dirty(sctx);
|
|
|
|
|
sctx->pm4_dirty_cdwords = 0;
|
2012-07-19 15:20:45 +02:00
|
|
|
|
2014-01-11 15:56:47 +01:00
|
|
|
#if SI_TRACE_CS
|
2014-01-22 02:02:18 +01:00
|
|
|
if (sctx->screen->b.trace_bo) {
|
2014-01-11 16:00:50 +01:00
|
|
|
si_trace_emit(sctx);
|
2013-03-25 11:46:38 -04:00
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2013-01-17 19:36:41 +01:00
|
|
|
/* Set the depth buffer as dirty. */
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->framebuffer.zsbuf) {
|
|
|
|
|
struct pipe_surface *surf = sctx->framebuffer.zsbuf;
|
2013-08-05 03:42:11 +02:00
|
|
|
struct r600_texture *rtex = (struct r600_texture *)surf->texture;
|
2013-01-17 19:36:41 +01:00
|
|
|
|
2013-08-05 14:40:43 +02:00
|
|
|
rtex->dirty_level_mask |= 1 << surf->u.tex.level;
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|
2014-01-11 16:00:50 +01:00
|
|
|
if (sctx->fb_compressed_cb_mask) {
|
2013-08-06 08:48:07 +02:00
|
|
|
struct pipe_surface *surf;
|
|
|
|
|
struct r600_texture *rtex;
|
2014-01-11 16:00:50 +01:00
|
|
|
unsigned mask = sctx->fb_compressed_cb_mask;
|
2013-08-06 08:48:07 +02:00
|
|
|
|
|
|
|
|
do {
|
|
|
|
|
unsigned i = u_bit_scan(&mask);
|
2014-01-11 16:00:50 +01:00
|
|
|
surf = sctx->framebuffer.cbufs[i];
|
2013-08-06 08:48:07 +02:00
|
|
|
rtex = (struct r600_texture*)surf->texture;
|
|
|
|
|
|
|
|
|
|
rtex->dirty_level_mask |= 1 << surf->u.tex.level;
|
|
|
|
|
} while (mask);
|
|
|
|
|
}
|
2012-07-19 15:20:45 +02:00
|
|
|
|
|
|
|
|
pipe_resource_reference(&ib.buffer, NULL);
|
2014-01-22 01:29:18 +01:00
|
|
|
sctx->b.num_draw_calls++;
|
2012-07-19 15:20:45 +02:00
|
|
|
}
|