mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-03 00:40:09 +01:00
radeonsi: remove the unused cs parameter from radeon_set_sh_reg
Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13015>
This commit is contained in:
parent
8b52fd28c3
commit
f4ece6cf12
6 changed files with 53 additions and 53 deletions
|
|
@ -110,15 +110,15 @@
|
|||
radeon_emit(value); \
|
||||
} while (0)
|
||||
|
||||
#define radeon_set_sh_reg_seq(cs, reg, num) do { \
|
||||
#define radeon_set_sh_reg_seq(reg, num) do { \
|
||||
SI_CHECK_SHADOWED_REGS(reg, num); \
|
||||
assert((reg) >= SI_SH_REG_OFFSET && (reg) < SI_SH_REG_END); \
|
||||
radeon_emit(PKT3(PKT3_SET_SH_REG, num, 0)); \
|
||||
radeon_emit(((reg) - SI_SH_REG_OFFSET) >> 2); \
|
||||
} while (0)
|
||||
|
||||
#define radeon_set_sh_reg(cs, reg, value) do { \
|
||||
radeon_set_sh_reg_seq(cs, reg, 1); \
|
||||
#define radeon_set_sh_reg(reg, value) do { \
|
||||
radeon_set_sh_reg_seq(reg, 1); \
|
||||
radeon_emit(value); \
|
||||
} while (0)
|
||||
|
||||
|
|
@ -263,7 +263,7 @@
|
|||
unsigned __value = val; \
|
||||
if (((sctx->tracked_regs.reg_saved >> (reg)) & 0x1) != 0x1 || \
|
||||
sctx->tracked_regs.reg_value[reg] != __value) { \
|
||||
radeon_set_sh_reg(cs, offset, __value); \
|
||||
radeon_set_sh_reg(offset, __value); \
|
||||
sctx->tracked_regs.reg_saved |= BITFIELD64_BIT(reg); \
|
||||
sctx->tracked_regs.reg_value[reg] = __value; \
|
||||
} \
|
||||
|
|
@ -297,7 +297,7 @@
|
|||
|
||||
#define radeon_emit_one_32bit_pointer(sctx, desc, sh_base) do { \
|
||||
unsigned sh_offset = (sh_base) + (desc)->shader_userdata_offset; \
|
||||
radeon_set_sh_reg_seq(cs, sh_offset, 1); \
|
||||
radeon_set_sh_reg_seq(sh_offset, 1); \
|
||||
radeon_emit_32bit_pointer(sctx->screen, cs, (desc)->gpu_address); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
|||
|
|
@ -367,10 +367,10 @@ static void si_set_global_binding(struct pipe_context *ctx, unsigned first, unsi
|
|||
void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf *cs)
|
||||
{
|
||||
radeon_begin(cs);
|
||||
radeon_set_sh_reg(cs, R_00B834_COMPUTE_PGM_HI,
|
||||
radeon_set_sh_reg(R_00B834_COMPUTE_PGM_HI,
|
||||
S_00B834_DATA(sctx->screen->info.address32_hi >> 8));
|
||||
|
||||
radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
|
||||
radeon_set_sh_reg_seq(R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
|
||||
/* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1,
|
||||
* renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */
|
||||
radeon_emit(S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
|
||||
|
|
@ -384,7 +384,7 @@ void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf
|
|||
* TODO: This should be:
|
||||
* (number of compute units) * 4 * (waves per simd) - 1
|
||||
*/
|
||||
radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID, 0x190 /* Default value */);
|
||||
radeon_set_sh_reg(R_00B82C_COMPUTE_MAX_WAVE_ID, 0x190 /* Default value */);
|
||||
|
||||
if (sctx->screen->info.si_TA_CS_BC_BASE_ADDR_allowed) {
|
||||
uint64_t bc_va = sctx->border_color_buffer->gpu_address;
|
||||
|
|
@ -395,14 +395,14 @@ void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf
|
|||
|
||||
if (sctx->chip_class >= GFX7) {
|
||||
/* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
|
||||
radeon_set_sh_reg_seq(cs, R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
|
||||
radeon_set_sh_reg_seq(R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
|
||||
radeon_emit(S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
|
||||
radeon_emit(S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
|
||||
|
||||
/* Disable profiling on compute queues. */
|
||||
if (cs != &sctx->gfx_cs || !sctx->screen->info.has_graphics) {
|
||||
radeon_set_sh_reg(cs, R_00B82C_COMPUTE_PERFCOUNT_ENABLE, 0);
|
||||
radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE, 0);
|
||||
radeon_set_sh_reg(R_00B82C_COMPUTE_PERFCOUNT_ENABLE, 0);
|
||||
radeon_set_sh_reg(R_00B878_COMPUTE_THREAD_TRACE_ENABLE, 0);
|
||||
}
|
||||
|
||||
/* Set the pointer to border colors. */
|
||||
|
|
@ -426,14 +426,14 @@ void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf
|
|||
}
|
||||
|
||||
if (sctx->chip_class >= GFX10) {
|
||||
radeon_set_sh_reg_seq(cs, R_00B890_COMPUTE_USER_ACCUM_0, 5);
|
||||
radeon_set_sh_reg_seq(R_00B890_COMPUTE_USER_ACCUM_0, 5);
|
||||
radeon_emit(0); /* R_00B890_COMPUTE_USER_ACCUM_0 */
|
||||
radeon_emit(0); /* R_00B894_COMPUTE_USER_ACCUM_1 */
|
||||
radeon_emit(0); /* R_00B898_COMPUTE_USER_ACCUM_2 */
|
||||
radeon_emit(0); /* R_00B89C_COMPUTE_USER_ACCUM_3 */
|
||||
radeon_emit(0); /* R_00B8A0_COMPUTE_PGM_RSRC3 */
|
||||
|
||||
radeon_set_sh_reg(cs, R_00B9F4_COMPUTE_DISPATCH_TUNNEL, 0);
|
||||
radeon_set_sh_reg(R_00B9F4_COMPUTE_DISPATCH_TUNNEL, 0);
|
||||
}
|
||||
radeon_end();
|
||||
}
|
||||
|
|
@ -538,9 +538,9 @@ static bool si_switch_compute_shader(struct si_context *sctx, struct si_compute
|
|||
RADEON_PRIO_SHADER_BINARY);
|
||||
|
||||
radeon_begin(cs);
|
||||
radeon_set_sh_reg(cs, R_00B830_COMPUTE_PGM_LO, shader_va >> 8);
|
||||
radeon_set_sh_reg(R_00B830_COMPUTE_PGM_LO, shader_va >> 8);
|
||||
|
||||
radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
|
||||
radeon_set_sh_reg_seq(R_00B848_COMPUTE_PGM_RSRC1, 2);
|
||||
radeon_emit(config->rsrc1);
|
||||
radeon_emit(config->rsrc2);
|
||||
|
||||
|
|
@ -552,7 +552,7 @@ static bool si_switch_compute_shader(struct si_context *sctx, struct si_compute
|
|||
sctx->max_seen_compute_scratch_bytes_per_wave =
|
||||
MAX2(sctx->max_seen_compute_scratch_bytes_per_wave, config->scratch_bytes_per_wave);
|
||||
|
||||
radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
|
||||
radeon_set_sh_reg(R_00B860_COMPUTE_TMPRING_SIZE,
|
||||
S_00B860_WAVES(sctx->scratch_waves) |
|
||||
S_00B860_WAVESIZE(sctx->max_seen_compute_scratch_bytes_per_wave >> 10));
|
||||
radeon_end();
|
||||
|
|
@ -595,7 +595,7 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
|
|||
}
|
||||
|
||||
radeon_begin(cs);
|
||||
radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 4);
|
||||
radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 4);
|
||||
radeon_emit(scratch_dword0);
|
||||
radeon_emit(scratch_dword1);
|
||||
radeon_emit(scratch_dword2);
|
||||
|
|
@ -659,7 +659,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx, const amd_kernel_
|
|||
|
||||
dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
|
||||
|
||||
radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 2);
|
||||
radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 2);
|
||||
radeon_emit(dispatch_va);
|
||||
radeon_emit(S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) | S_008F04_STRIDE(0));
|
||||
|
||||
|
|
@ -669,7 +669,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx, const amd_kernel_
|
|||
|
||||
if (AMD_HSA_BITS_GET(code_object->code_properties,
|
||||
AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)) {
|
||||
radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 2);
|
||||
radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 2);
|
||||
radeon_emit(kernel_args_va);
|
||||
radeon_emit(S_008F04_BASE_ADDRESS_HI(kernel_args_va >> 32) | S_008F04_STRIDE(0));
|
||||
user_sgpr += 2;
|
||||
|
|
@ -677,7 +677,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx, const amd_kernel_
|
|||
|
||||
for (i = 0; i < 3 && user_sgpr < 16; i++) {
|
||||
if (code_object->code_properties & workgroup_count_masks[i]) {
|
||||
radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 1);
|
||||
radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 + (user_sgpr * 4), 1);
|
||||
radeon_emit(info->grid[i]);
|
||||
user_sgpr += 1;
|
||||
}
|
||||
|
|
@ -743,7 +743,7 @@ static void si_setup_nir_user_data(struct si_context *sctx, const struct pipe_gr
|
|||
}
|
||||
radeon_begin_again(cs);
|
||||
} else {
|
||||
radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
|
||||
radeon_set_sh_reg_seq(grid_size_reg, 3);
|
||||
radeon_emit(info->grid[0]);
|
||||
radeon_emit(info->grid[1]);
|
||||
radeon_emit(info->grid[2]);
|
||||
|
|
@ -751,12 +751,12 @@ static void si_setup_nir_user_data(struct si_context *sctx, const struct pipe_gr
|
|||
}
|
||||
|
||||
if (sel->info.uses_variable_block_size) {
|
||||
radeon_set_sh_reg(cs, block_size_reg,
|
||||
radeon_set_sh_reg(block_size_reg,
|
||||
info->block[0] | (info->block[1] << 10) | (info->block[2] << 20));
|
||||
}
|
||||
|
||||
if (sel->info.base.cs.user_data_components_amd) {
|
||||
radeon_set_sh_reg_seq(cs, cs_user_data_reg, sel->info.base.cs.user_data_components_amd);
|
||||
radeon_set_sh_reg_seq(cs_user_data_reg, sel->info.base.cs.user_data_components_amd);
|
||||
radeon_emit_array(sctx->cs_user_data, sel->info.base.cs.user_data_components_amd);
|
||||
}
|
||||
radeon_end();
|
||||
|
|
@ -783,7 +783,7 @@ static void si_emit_dispatch_packets(struct si_context *sctx, const struct pipe_
|
|||
|
||||
radeon_begin(cs);
|
||||
radeon_set_sh_reg(
|
||||
cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
|
||||
R_00B854_COMPUTE_RESOURCE_LIMITS,
|
||||
ac_get_compute_resource_limits(&sscreen->info, waves_per_threadgroup,
|
||||
sctx->cs_max_waves_per_sh, threadgroups_per_cu));
|
||||
|
||||
|
|
@ -796,7 +796,7 @@ static void si_emit_dispatch_packets(struct si_context *sctx, const struct pipe_
|
|||
const uint *last_block = info->last_block;
|
||||
bool partial_block_en = last_block[0] || last_block[1] || last_block[2];
|
||||
|
||||
radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
|
||||
radeon_set_sh_reg_seq(R_00B81C_COMPUTE_NUM_THREAD_X, 3);
|
||||
|
||||
if (partial_block_en) {
|
||||
unsigned partial[3];
|
||||
|
|
|
|||
|
|
@ -2068,7 +2068,7 @@ void si_shader_change_notify(struct si_context *sctx)
|
|||
struct si_descriptors *descs = &sctx->descriptors[start]; \
|
||||
unsigned sh_offset = sh_reg_base + descs->shader_userdata_offset; \
|
||||
\
|
||||
radeon_set_sh_reg_seq(&sctx->gfx_cs, sh_offset, count); \
|
||||
radeon_set_sh_reg_seq(sh_offset, count); \
|
||||
for (int i = 0; i < count; i++) \
|
||||
radeon_emit_32bit_pointer(sctx->screen, cs, descs[i].gpu_address); \
|
||||
} \
|
||||
|
|
@ -2161,7 +2161,7 @@ void si_emit_compute_shader_pointers(struct si_context *sctx)
|
|||
if (num_shaderbufs && sctx->compute_shaderbuf_sgprs_dirty) {
|
||||
struct si_descriptors *desc = si_const_and_shader_buffer_descriptors(sctx, PIPE_SHADER_COMPUTE);
|
||||
|
||||
radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
|
||||
radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 +
|
||||
shader->cs_shaderbufs_sgpr_index * 4,
|
||||
num_shaderbufs * 4);
|
||||
|
||||
|
|
@ -2176,7 +2176,7 @@ void si_emit_compute_shader_pointers(struct si_context *sctx)
|
|||
if (num_images && sctx->compute_image_sgprs_dirty) {
|
||||
struct si_descriptors *desc = si_sampler_and_image_descriptors(sctx, PIPE_SHADER_COMPUTE);
|
||||
|
||||
radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
|
||||
radeon_set_sh_reg_seq(R_00B900_COMPUTE_USER_DATA_0 +
|
||||
shader->cs_images_sgpr_index * 4,
|
||||
shader->cs_images_num_sgprs);
|
||||
|
||||
|
|
|
|||
|
|
@ -215,7 +215,7 @@ si_emit_thread_trace_start(struct si_context* sctx,
|
|||
|
||||
/* Start the thread trace with a different event based on the queue. */
|
||||
if (queue_family_index == RING_COMPUTE) {
|
||||
radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE,
|
||||
radeon_set_sh_reg(R_00B878_COMPUTE_THREAD_TRACE_ENABLE,
|
||||
S_00B878_THREAD_TRACE_ENABLE(1));
|
||||
} else {
|
||||
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
|
||||
|
|
@ -290,7 +290,7 @@ si_emit_thread_trace_stop(struct si_context *sctx,
|
|||
|
||||
/* Stop the thread trace with a different event based on the queue. */
|
||||
if (queue_family_index == RING_COMPUTE) {
|
||||
radeon_set_sh_reg(cs, R_00B878_COMPUTE_THREAD_TRACE_ENABLE,
|
||||
radeon_set_sh_reg(R_00B878_COMPUTE_THREAD_TRACE_ENABLE,
|
||||
S_00B878_THREAD_TRACE_ENABLE(0));
|
||||
} else {
|
||||
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
|
||||
|
|
|
|||
|
|
@ -714,11 +714,11 @@ static void si_emit_derived_tess_state(struct si_context *sctx, unsigned *num_pa
|
|||
else
|
||||
hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX9(lds_size);
|
||||
|
||||
radeon_set_sh_reg(cs, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, hs_rsrc2);
|
||||
radeon_set_sh_reg(R_00B42C_SPI_SHADER_PGM_RSRC2_HS, hs_rsrc2);
|
||||
|
||||
/* Set userdata SGPRs for merged LS-HS. */
|
||||
radeon_set_sh_reg_seq(
|
||||
cs, R_00B430_SPI_SHADER_USER_DATA_LS_0 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT * 4, 3);
|
||||
R_00B430_SPI_SHADER_USER_DATA_LS_0 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT * 4, 3);
|
||||
radeon_emit(offchip_layout);
|
||||
radeon_emit(tcs_out_offsets);
|
||||
radeon_emit(tcs_out_layout);
|
||||
|
|
@ -731,14 +731,14 @@ static void si_emit_derived_tess_state(struct si_context *sctx, unsigned *num_pa
|
|||
/* Due to a hw bug, RSRC2_LS must be written twice with another
|
||||
* LS register written in between. */
|
||||
if (sctx->chip_class == GFX7 && sctx->family != CHIP_HAWAII)
|
||||
radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
|
||||
radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
|
||||
radeon_set_sh_reg(R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
|
||||
radeon_set_sh_reg_seq(R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
|
||||
radeon_emit(ls_current->config.rsrc1);
|
||||
radeon_emit(ls_rsrc2);
|
||||
|
||||
/* Set userdata SGPRs for TCS. */
|
||||
radeon_set_sh_reg_seq(
|
||||
cs, R_00B430_SPI_SHADER_USER_DATA_HS_0 + GFX6_SGPR_TCS_OFFCHIP_LAYOUT * 4, 4);
|
||||
R_00B430_SPI_SHADER_USER_DATA_HS_0 + GFX6_SGPR_TCS_OFFCHIP_LAYOUT * 4, 4);
|
||||
radeon_emit(offchip_layout);
|
||||
radeon_emit(tcs_out_offsets);
|
||||
radeon_emit(tcs_out_layout);
|
||||
|
|
@ -746,7 +746,7 @@ static void si_emit_derived_tess_state(struct si_context *sctx, unsigned *num_pa
|
|||
}
|
||||
|
||||
/* Set userdata SGPRs for TES. */
|
||||
radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4, 2);
|
||||
radeon_set_sh_reg_seq(tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4, 2);
|
||||
radeon_emit(offchip_layout);
|
||||
radeon_emit(ring_va);
|
||||
radeon_end();
|
||||
|
|
@ -1109,7 +1109,7 @@ static void si_emit_vs_state(struct si_context *sctx, unsigned index_size)
|
|||
unsigned vs_base = si_get_user_data_base(GFX_VERSION, HAS_TESS, HAS_GS, NGG,
|
||||
PIPE_SHADER_VERTEX);
|
||||
radeon_begin(cs);
|
||||
radeon_set_sh_reg(cs, vs_base + SI_SGPR_VS_STATE_BITS * 4,
|
||||
radeon_set_sh_reg(vs_base + SI_SGPR_VS_STATE_BITS * 4,
|
||||
sctx->current_vs_state);
|
||||
|
||||
/* Set CLAMP_VERTEX_COLOR and OUTPRIM in the last stage
|
||||
|
|
@ -1118,13 +1118,13 @@ static void si_emit_vs_state(struct si_context *sctx, unsigned index_size)
|
|||
* For TES or the GS copy shader without NGG:
|
||||
*/
|
||||
if (vs_base != R_00B130_SPI_SHADER_USER_DATA_VS_0) {
|
||||
radeon_set_sh_reg(cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + SI_SGPR_VS_STATE_BITS * 4,
|
||||
radeon_set_sh_reg(R_00B130_SPI_SHADER_USER_DATA_VS_0 + SI_SGPR_VS_STATE_BITS * 4,
|
||||
sctx->current_vs_state);
|
||||
}
|
||||
|
||||
/* For NGG: */
|
||||
if (GFX_VERSION >= GFX10 && vs_base != R_00B230_SPI_SHADER_USER_DATA_GS_0) {
|
||||
radeon_set_sh_reg(cs, R_00B230_SPI_SHADER_USER_DATA_GS_0 + SI_SGPR_VS_STATE_BITS * 4,
|
||||
radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + SI_SGPR_VS_STATE_BITS * 4,
|
||||
sctx->current_vs_state);
|
||||
}
|
||||
radeon_end();
|
||||
|
|
@ -1460,7 +1460,7 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
|
|||
si_invalidate_draw_sh_constants(sctx);
|
||||
|
||||
/* Blit VS doesn't use BASE_VERTEX, START_INSTANCE, and DRAWID. */
|
||||
radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_VS_BLIT_DATA * 4, sctx->num_vs_blit_sgprs);
|
||||
radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_VS_BLIT_DATA * 4, sctx->num_vs_blit_sgprs);
|
||||
radeon_emit_array(sctx->vs_blit_sh_data, sctx->num_vs_blit_sgprs);
|
||||
} else if (base_vertex != sctx->last_base_vertex ||
|
||||
sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
|
||||
|
|
@ -1472,7 +1472,7 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
|
|||
sctx->last_drawid == SI_DRAW_ID_UNKNOWN)) ||
|
||||
sh_base_reg != sctx->last_sh_base_reg) {
|
||||
if (set_base_instance) {
|
||||
radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3);
|
||||
radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3);
|
||||
radeon_emit(base_vertex);
|
||||
radeon_emit(drawid_base);
|
||||
radeon_emit(info->start_instance);
|
||||
|
|
@ -1480,13 +1480,13 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
|
|||
sctx->last_start_instance = info->start_instance;
|
||||
sctx->last_drawid = drawid_base;
|
||||
} else if (set_draw_id) {
|
||||
radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
|
||||
radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
|
||||
radeon_emit(base_vertex);
|
||||
radeon_emit(drawid_base);
|
||||
|
||||
sctx->last_drawid = drawid_base;
|
||||
} else {
|
||||
radeon_set_sh_reg(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, base_vertex);
|
||||
radeon_set_sh_reg(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, base_vertex);
|
||||
}
|
||||
|
||||
sctx->last_base_vertex = base_vertex;
|
||||
|
|
@ -1520,7 +1520,7 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
|
|||
uint64_t va = index_va + draws[i].start * index_size;
|
||||
|
||||
if (i > 0) {
|
||||
radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
|
||||
radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
|
||||
radeon_emit(draws[i].index_bias);
|
||||
radeon_emit(drawid_base + i);
|
||||
}
|
||||
|
|
@ -1542,7 +1542,7 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
|
|||
uint64_t va = index_va + draws[i].start * index_size;
|
||||
|
||||
if (i > 0)
|
||||
radeon_set_sh_reg(cs, sh_base_reg + SI_SGPR_DRAWID * 4, drawid_base + i);
|
||||
radeon_set_sh_reg(sh_base_reg + SI_SGPR_DRAWID * 4, drawid_base + i);
|
||||
|
||||
radeon_emit(PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
|
||||
radeon_emit(index_max_size);
|
||||
|
|
@ -1561,7 +1561,7 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
|
|||
uint64_t va = index_va + draws[i].start * index_size;
|
||||
|
||||
if (i > 0)
|
||||
radeon_set_sh_reg(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, draws[i].index_bias);
|
||||
radeon_set_sh_reg(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, draws[i].index_bias);
|
||||
|
||||
radeon_emit(PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
|
||||
radeon_emit(index_max_size);
|
||||
|
|
@ -1609,7 +1609,7 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
|
|||
for (unsigned i = 0; i < num_draws; i++) {
|
||||
uint64_t index_va = base_index_va + draws[i].start * original_index_size;
|
||||
|
||||
radeon_set_sh_reg_seq(cs, R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS, 2);
|
||||
radeon_set_sh_reg_seq(R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS, 2);
|
||||
radeon_emit(index_va);
|
||||
radeon_emit(index_va >> 32);
|
||||
|
||||
|
|
@ -1617,7 +1617,7 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
|
|||
if (increment_draw_id) {
|
||||
unsigned draw_id = drawid_base + i;
|
||||
|
||||
radeon_set_sh_reg(cs, sh_base_reg + SI_SGPR_DRAWID * 4, draw_id);
|
||||
radeon_set_sh_reg(sh_base_reg + SI_SGPR_DRAWID * 4, draw_id);
|
||||
sctx->last_drawid = draw_id;
|
||||
}
|
||||
}
|
||||
|
|
@ -1638,13 +1638,13 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
|
|||
if (increment_draw_id) {
|
||||
unsigned draw_id = drawid_base + i;
|
||||
|
||||
radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
|
||||
radeon_set_sh_reg_seq(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 2);
|
||||
radeon_emit(draws[i].start);
|
||||
radeon_emit(draw_id);
|
||||
|
||||
sctx->last_drawid = draw_id;
|
||||
} else {
|
||||
radeon_set_sh_reg(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, draws[i].start);
|
||||
radeon_set_sh_reg(sh_base_reg + SI_SGPR_BASE_VERTEX * 4, draws[i].start);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1783,7 +1783,7 @@ static bool si_upload_and_prefetch_VB_descriptors(struct si_context *sctx)
|
|||
sh_dw_offset = GFX9_VSGS_NUM_USER_SGPR;
|
||||
}
|
||||
|
||||
radeon_set_sh_reg(cs, sh_base + sh_dw_offset * 4,
|
||||
radeon_set_sh_reg(sh_base + sh_dw_offset * 4,
|
||||
sctx->vb_descriptors_buffer->gpu_address +
|
||||
sctx->vb_descriptors_offset);
|
||||
sctx->vertex_buffer_pointer_dirty = false;
|
||||
|
|
@ -1795,7 +1795,7 @@ static bool si_upload_and_prefetch_VB_descriptors(struct si_context *sctx)
|
|||
|
||||
unsigned num_sgprs = MIN2(count, num_vbos_in_user_sgprs) * 4;
|
||||
|
||||
radeon_set_sh_reg_seq(cs, sh_base + SI_SGPR_VS_VB_DESCRIPTOR_FIRST * 4, num_sgprs);
|
||||
radeon_set_sh_reg_seq(sh_base + SI_SGPR_VS_VB_DESCRIPTOR_FIRST * 4, num_sgprs);
|
||||
radeon_emit_array(sctx->vb_descriptor_user_sgprs, num_sgprs);
|
||||
sctx->vertex_buffer_user_sgprs_dirty = false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ static void si_emit_cull_state(struct si_context *sctx)
|
|||
radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->small_prim_cull_info_buf,
|
||||
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
|
||||
radeon_begin(&sctx->gfx_cs);
|
||||
radeon_set_sh_reg(&sctx->gfx_cs, R_00B220_SPI_SHADER_PGM_LO_GS,
|
||||
radeon_set_sh_reg(R_00B220_SPI_SHADER_PGM_LO_GS,
|
||||
sctx->small_prim_cull_info_address >> 8);
|
||||
radeon_end();
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue