mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-07 02:48:06 +02:00
amd/packets: remove the underscore between opcode number and word index, use %x
we are more used to this format Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Acked-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/40588>
This commit is contained in:
parent
2aa9ec5018
commit
e281b7b653
15 changed files with 92 additions and 92 deletions
|
|
@ -543,8 +543,8 @@ ac_emit_cp_atomic_mem(struct ac_cmdbuf *cs, uint32_t atomic_op,
|
|||
{
|
||||
ac_cmdbuf_begin(cs);
|
||||
ac_cmdbuf_emit(PKT3(PKT3_ATOMIC_MEM, 7, 0));
|
||||
ac_cmdbuf_emit(S_1E_1_ATOMIC(atomic_op) |
|
||||
S_1E_1_COMMAND(atomic_cmd));
|
||||
ac_cmdbuf_emit(S_1E1_ATOMIC(atomic_op) |
|
||||
S_1E1_COMMAND(atomic_cmd));
|
||||
ac_cmdbuf_emit(va); /* addr lo */
|
||||
ac_cmdbuf_emit(va >> 32); /* addr hi */
|
||||
ac_cmdbuf_emit(data); /* data lo */
|
||||
|
|
|
|||
|
|
@ -3069,18 +3069,18 @@ struct ac_pm4_state *ac_create_shadowing_ib_preamble(const struct radeon_info *i
|
|||
|
||||
ac_pm4_cmd_add(pm4, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
|
||||
ac_pm4_cmd_add(pm4,
|
||||
S_28_1_UPDATE_LOAD_ENABLES(1) |
|
||||
S_28_1_LOAD_PER_CONTEXT_STATE(1) |
|
||||
S_28_1_LOAD_CS_SH_REGS(1) |
|
||||
S_28_1_LOAD_GFX_SH_REGS(1) |
|
||||
S_28_1_LOAD_GLOBAL_UCONFIG(1));
|
||||
S_281_UPDATE_LOAD_ENABLES(1) |
|
||||
S_281_LOAD_PER_CONTEXT_STATE(1) |
|
||||
S_281_LOAD_CS_SH_REGS(1) |
|
||||
S_281_LOAD_GFX_SH_REGS(1) |
|
||||
S_281_LOAD_GLOBAL_UCONFIG(1));
|
||||
ac_pm4_cmd_add(pm4,
|
||||
S_28_2_UPDATE_SHADOW_ENABLES(1) |
|
||||
S_28_2_SHADOW_PER_CONTEXT_STATE(1) |
|
||||
S_28_2_SHADOW_CS_SH_REGS(1) |
|
||||
S_28_2_SHADOW_GFX_SH_REGS(1) |
|
||||
S_28_2_SHADOW_GLOBAL_UCONFIG(1) |
|
||||
S_28_2_SHADOW_GLOBAL_CONFIG(1));
|
||||
S_282_UPDATE_SHADOW_ENABLES(1) |
|
||||
S_282_SHADOW_PER_CONTEXT_STATE(1) |
|
||||
S_282_SHADOW_CS_SH_REGS(1) |
|
||||
S_282_SHADOW_GFX_SH_REGS(1) |
|
||||
S_282_SHADOW_GLOBAL_UCONFIG(1) |
|
||||
S_282_SHADOW_GLOBAL_CONFIG(1));
|
||||
|
||||
for (unsigned i = 0; i < SI_NUM_REG_RANGES; i++)
|
||||
ac_build_load_reg(info, pm4, i, gpu_address);
|
||||
|
|
|
|||
|
|
@ -594,7 +594,7 @@ ac_sqtt_copy_info_regs(const struct radeon_info *info, struct ac_pm4_state *pm4,
|
|||
uint32_t init_wptr_value = shifted_data_va & 0x1fffffff;
|
||||
|
||||
ac_pm4_cmd_add(pm4, PKT3(PKT3_ATOMIC_MEM, 7, 0));
|
||||
ac_pm4_cmd_add(pm4, S_1E_1_ATOMIC(V_1E_1_GL2_OP_ATOMIC_SUB_RTN_32));
|
||||
ac_pm4_cmd_add(pm4, S_1E1_ATOMIC(V_1E1_GL2_OP_ATOMIC_SUB_RTN_32));
|
||||
ac_pm4_cmd_add(pm4, info_va); /* addr lo */
|
||||
ac_pm4_cmd_add(pm4, info_va >> 32); /* addr hi */
|
||||
ac_pm4_cmd_add(pm4, init_wptr_value); /* data lo */
|
||||
|
|
|
|||
|
|
@ -45,16 +45,16 @@
|
|||
#define PKT3_ATOMIC_MEM 0x1E
|
||||
#define PKT3_OCCLUSION_QUERY 0x1F /* GFX7+ */
|
||||
#define PKT3_SET_PREDICATION 0x20
|
||||
#define PREDICATION_DRAW_NOT_VISIBLE S_20_1_PRED_BOOL(V_20_1_DRAW_IF_NOT_VISIBLE_OR_OVERFLOW)
|
||||
#define PREDICATION_DRAW_VISIBLE S_20_1_PRED_BOOL(V_20_1_DRAW_IF_VISIBLE_OR_NO_OVERFLOW)
|
||||
#define PREDICATION_HINT_WAIT S_20_1_HINT(V_20_1_WAIT_UNTIL_FINAL_ZPASS_WRITTEN)
|
||||
#define PREDICATION_HINT_NOWAIT_DRAW S_20_1_HINT(V_20_1_DRAW_IF_NOT_FINAL_ZPASS_WRITTEN)
|
||||
#define PREDICATION_OP_CLEAR V_20_1_CLEAR_PREDICATE
|
||||
#define PREDICATION_OP_ZPASS V_20_1_SET_ZPASS_PREDICATE
|
||||
#define PREDICATION_OP_PRIMCOUNT V_20_1_SET_PRIMCOUNT_PREDICATE
|
||||
#define PREDICATION_OP_BOOL64 V_20_1_DX12
|
||||
#define PREDICATION_OP_BOOL32 V_20_1_VULKAN
|
||||
#define PREDICATION_CONTINUE S_20_1_CONTINUE_BIT(V_20_1_CONTINUE_SET_PREDICATION)
|
||||
#define PREDICATION_DRAW_NOT_VISIBLE S_201_PRED_BOOL(V_201_DRAW_IF_NOT_VISIBLE_OR_OVERFLOW)
|
||||
#define PREDICATION_DRAW_VISIBLE S_201_PRED_BOOL(V_201_DRAW_IF_VISIBLE_OR_NO_OVERFLOW)
|
||||
#define PREDICATION_HINT_WAIT S_201_HINT(V_201_WAIT_UNTIL_FINAL_ZPASS_WRITTEN)
|
||||
#define PREDICATION_HINT_NOWAIT_DRAW S_201_HINT(V_201_DRAW_IF_NOT_FINAL_ZPASS_WRITTEN)
|
||||
#define PREDICATION_OP_CLEAR V_201_CLEAR_PREDICATE
|
||||
#define PREDICATION_OP_ZPASS V_201_SET_ZPASS_PREDICATE
|
||||
#define PREDICATION_OP_PRIMCOUNT V_201_SET_PRIMCOUNT_PREDICATE
|
||||
#define PREDICATION_OP_BOOL64 V_201_DX12
|
||||
#define PREDICATION_OP_BOOL32 V_201_VULKAN
|
||||
#define PREDICATION_CONTINUE S_201_CONTINUE_BIT(V_201_CONTINUE_SET_PREDICATION)
|
||||
#define PKT3_COND_EXEC 0x22
|
||||
#define PKT3_PRED_EXEC 0x23
|
||||
#define PKT3_DRAW_INDIRECT 0x24
|
||||
|
|
@ -126,7 +126,7 @@
|
|||
#define PKT3_ME_INITIALIZE 0x44 /* GFX6 only */
|
||||
#define PKT3_COND_WRITE 0x45
|
||||
#define PKT3_EVENT_WRITE 0x46
|
||||
#define EVENT_TYPE(x) S_46_1_EVENT_TYPE(x)
|
||||
#define EVENT_TYPE(x) S_461_EVENT_TYPE(x)
|
||||
/* 0 - any non-TS event
|
||||
* 1 - ZPASS_DONE
|
||||
* 2 - SAMPLE_PIPELINESTAT
|
||||
|
|
@ -134,7 +134,7 @@
|
|||
* 4 - *S_PARTIAL_FLUSH
|
||||
* 5 - TS events
|
||||
*/
|
||||
#define EVENT_INDEX(x) S_46_1_EVENT_INDEX(x)
|
||||
#define EVENT_INDEX(x) S_461_EVENT_INDEX(x)
|
||||
#define PIXEL_PIPE_STATE_CNTL_COUNTER_ID(x) ((x) << 3)
|
||||
#define PIXEL_PIPE_STATE_CNTL_STRIDE(x) ((x) << 9)
|
||||
/* 0 - 32 bits
|
||||
|
|
|
|||
|
|
@ -74,20 +74,20 @@ address_field_map = {
|
|||
'ADDR_HI': ([], '', ''),
|
||||
'CONTROL_BUF_ADDR_HI': ([], '', ''),
|
||||
'COUNT_ADDR_HI': ([], '', ''),
|
||||
'DST_MEM_ADDR_HI': ([], ('G_37_1_DST_SEL(dw0) == V_37_1_MEMORY_SYNC_ACROSS_GRBM || ' +
|
||||
'G_37_1_DST_SEL(dw0) == V_37_1_TC_L2 || ' +
|
||||
'G_37_1_DST_SEL(dw0) == V_37_1_MEMORY'), ''),
|
||||
'DST_MEM_ADDR_HI': ([], ('G_371_DST_SEL(dw0) == V_371_MEMORY_SYNC_ACROSS_GRBM || ' +
|
||||
'G_371_DST_SEL(dw0) == V_371_TC_L2 || ' +
|
||||
'G_371_DST_SEL(dw0) == V_371_MEMORY'), ''),
|
||||
'INDEX_BASE_HI': ([], '', ''),
|
||||
'DST_ADDR_HI': (['DMA_DATA'],
|
||||
('G_50_1_DST_SEL(dw0) == V_50_1_DST_ADDR_USING_DAS || ' +
|
||||
'G_50_1_DST_SEL(dw0) == V_50_1_DST_ADDR_USING_L2'),
|
||||
'G_50_6_BYTE_COUNT(dw5)'),
|
||||
('G_501_DST_SEL(dw0) == V_501_DST_ADDR_USING_DAS || ' +
|
||||
'G_501_DST_SEL(dw0) == V_501_DST_ADDR_USING_L2'),
|
||||
'G_506_BYTE_COUNT(dw5)'),
|
||||
'SRC_ADDR_HI': (['DMA_DATA'],
|
||||
('G_50_1_SRC_SEL(dw0) == V_50_1_SRC_ADDR_USING_SAS || ' +
|
||||
'G_50_1_SRC_SEL(dw0) == V_50_1_SRC_ADDR_USING_L2'),
|
||||
'G_50_6_BYTE_COUNT(dw5)'),
|
||||
('G_501_SRC_SEL(dw0) == V_501_SRC_ADDR_USING_SAS || ' +
|
||||
'G_501_SRC_SEL(dw0) == V_501_SRC_ADDR_USING_L2'),
|
||||
'G_506_BYTE_COUNT(dw5)'),
|
||||
'ADDRESS_HI': (['EVENT_WRITE', 'SET_BASE'],
|
||||
'opcode != PKT3_EVENT_WRITE || G_46_1_EVENT_TYPE(dw0) != V_028A90_PIXEL_PIPE_STAT_CONTROL',
|
||||
'opcode != PKT3_EVENT_WRITE || G_461_EVENT_TYPE(dw0) != V_028A90_PIXEL_PIPE_STAT_CONTROL',
|
||||
''),
|
||||
}
|
||||
|
||||
|
|
@ -411,7 +411,7 @@ def print_packet_definitions():
|
|||
field_suffix, field_comment = (
|
||||
get_field_conflict_suffix(gfx_versions, gfx_version_param, engine_name, packet_name, word_index,
|
||||
word_variant_name, field_name, field['bits']))
|
||||
mangled_prefix = '%02X_%d%s' % (opcode, int(word_index) - 1, variant_letter)
|
||||
mangled_prefix = '%02X%d%s' % (opcode, int(word_index) - 1, variant_letter)
|
||||
mangled_name = '%s_%s%s' % (mangled_prefix, field_name.upper(), field_suffix.upper())
|
||||
|
||||
first_bit, last_bit = get_field_bits(field)
|
||||
|
|
@ -420,8 +420,8 @@ def print_packet_definitions():
|
|||
clear_mask = (bitmask << first_bit) ^ 0xffffffff
|
||||
|
||||
assert num_bits < 32
|
||||
encode_field = '(((unsigned)(x) & 0x%x) << %d)' % (bitmask, first_bit)
|
||||
decode_field = '(((x) >> %d) & 0x%x)' % (first_bit, bitmask)
|
||||
encode_field = '(((unsigned)(x) & 0x%X) << %d)' % (bitmask, first_bit)
|
||||
decode_field = '(((x) >> %d) & 0x%X)' % (first_bit, bitmask)
|
||||
clear_field = '0x%08X' % clear_mask
|
||||
|
||||
print2('#define S_%s(x)' % mangled_name, encode_field + field_comment)
|
||||
|
|
@ -733,7 +733,7 @@ def print_packet_parser(is_header):
|
|||
|
||||
if has_engine_sel:
|
||||
# Generate an expression that checks ENGINE_SEL
|
||||
engine_sel_infix = ('%X_1%s' %
|
||||
engine_sel_infix = ('%02X1%s' %
|
||||
(opcodes[packet_name], '' if len(packet_dict['pfp']['word']['2']) == 1 else 'A'))
|
||||
engine_sel_getter = 'G_%s_ENGINE_SEL' % engine_sel_infix
|
||||
|
||||
|
|
|
|||
|
|
@ -10749,8 +10749,8 @@ radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer, bool index
|
|||
radeon_emit(0);
|
||||
radeon_emit(vertex_offset_reg);
|
||||
radeon_emit(start_instance_reg);
|
||||
radeon_emit(draw_id_reg | S_2C_4_DRAW_INDEX_ENABLE(draw_id_enable) | S_2C_4_COUNT_INDIRECT_ENABLE(!!count_va) |
|
||||
S_2C_4_THREAD_TRACE_MARKER_ENABLE(sqtt_en));
|
||||
radeon_emit(draw_id_reg | S_2C4_DRAW_INDEX_ENABLE(draw_id_enable) | S_2C4_COUNT_INDIRECT_ENABLE(!!count_va) |
|
||||
S_2C4_THREAD_TRACE_MARKER_ENABLE(sqtt_en));
|
||||
radeon_emit(draw_count); /* count */
|
||||
radeon_emit(count_va); /* count_addr */
|
||||
radeon_emit(count_va >> 32);
|
||||
|
|
@ -15331,7 +15331,7 @@ radv_emit_set_predication_state(struct radv_cmd_buffer *cmd_buffer, bool draw_vi
|
|||
if (va) {
|
||||
assert(pred_op == PREDICATION_OP_BOOL32 || pred_op == PREDICATION_OP_BOOL64);
|
||||
|
||||
op = S_20_1_PRED_OP(pred_op);
|
||||
op = S_201_PRED_OP(pred_op);
|
||||
|
||||
/* PREDICATION_DRAW_VISIBLE means that if the 32-bit value is zero, all
|
||||
* rendering commands are discarded. Otherwise, they are discarded if
|
||||
|
|
|
|||
|
|
@ -1410,8 +1410,8 @@ dgc_emit_pkt3_draw_indirect(struct dgc_cmdbuf *cs, nir_def *has_drawid, bool ind
|
|||
dgc_cs_emit_imm(0);
|
||||
dgc_cs_emit(vertex_offset_reg);
|
||||
dgc_cs_emit(nir_bcsel(b, has_baseinstance, start_instance_reg, nir_imm_int(b, 0)));
|
||||
dgc_cs_emit(nir_ior_imm(b, nir_ior(b, draw_id_reg, nir_imm_int(b, S_2C_4_DRAW_INDEX_ENABLE(1))),
|
||||
S_2C_4_THREAD_TRACE_MARKER_ENABLE(sqtt_en)));
|
||||
dgc_cs_emit(nir_ior_imm(b, nir_ior(b, draw_id_reg, nir_imm_int(b, S_2C4_DRAW_INDEX_ENABLE(1))),
|
||||
S_2C4_THREAD_TRACE_MARKER_ENABLE(sqtt_en)));
|
||||
dgc_cs_emit_imm(1); /* draw count */
|
||||
dgc_cs_emit_imm(0); /* count va low */
|
||||
dgc_cs_emit_imm(0); /* count va high */
|
||||
|
|
@ -1542,7 +1542,7 @@ dgc_emit_draw_with_count(struct dgc_cmdbuf *cs, nir_def *stream_addr, nir_def *s
|
|||
nir_def *start_instance_reg =
|
||||
nir_bcsel(b, has_baseinstance, nir_iadd(b, vertex_offset_reg, start_instance_offset), nir_imm_int(b, 0));
|
||||
nir_def *draw_id_reg = nir_bcsel(
|
||||
b, has_drawid, nir_ior_imm(b, nir_iadd(b, vertex_offset_reg, nir_imm_int(b, 1)), S_2C_4_DRAW_INDEX_ENABLE(1)),
|
||||
b, has_drawid, nir_ior_imm(b, nir_iadd(b, vertex_offset_reg, nir_imm_int(b, 1)), S_2C4_DRAW_INDEX_ENABLE(1)),
|
||||
nir_imm_int(b, 0));
|
||||
|
||||
nir_def *di_src_sel = nir_imm_int(b, indexed ? V_0287F0_DI_SRC_SEL_DMA : V_0287F0_DI_SRC_SEL_AUTO_INDEX);
|
||||
|
|
@ -1555,7 +1555,7 @@ dgc_emit_draw_with_count(struct dgc_cmdbuf *cs, nir_def *stream_addr, nir_def *s
|
|||
dgc_cs_emit_imm(0);
|
||||
dgc_cs_emit(vertex_offset_reg);
|
||||
dgc_cs_emit(start_instance_reg);
|
||||
dgc_cs_emit(nir_ior_imm(b, draw_id_reg, S_2C_4_THREAD_TRACE_MARKER_ENABLE(sqtt_en)));
|
||||
dgc_cs_emit(nir_ior_imm(b, draw_id_reg, S_2C4_THREAD_TRACE_MARKER_ENABLE(sqtt_en)));
|
||||
dgc_cs_emit(draw_count);
|
||||
dgc_cs_emit_imm(0);
|
||||
dgc_cs_emit_imm(0);
|
||||
|
|
|
|||
|
|
@ -686,8 +686,8 @@ radv_emit_graphics(struct radv_device *device, struct radv_cmd_stream *cs)
|
|||
|
||||
if (!device->uses_shadow_regs) {
|
||||
ac_pm4_cmd_add(pm4, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
|
||||
ac_pm4_cmd_add(pm4, S_28_1_UPDATE_LOAD_ENABLES(1));
|
||||
ac_pm4_cmd_add(pm4, S_28_2_UPDATE_SHADOW_ENABLES(1));
|
||||
ac_pm4_cmd_add(pm4, S_281_UPDATE_LOAD_ENABLES(1));
|
||||
ac_pm4_cmd_add(pm4, S_282_UPDATE_SHADOW_ENABLES(1));
|
||||
|
||||
if (has_clear_state) {
|
||||
ac_pm4_cmd_add(pm4, PKT3(PKT3_CLEAR_STATE, 0, 0));
|
||||
|
|
@ -1536,7 +1536,7 @@ radv_create_perf_counter_lock_cs(struct radv_device *device, unsigned pass, bool
|
|||
if (!unlock) {
|
||||
uint64_t mutex_va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_LOCK_OFFSET;
|
||||
|
||||
ac_emit_cp_atomic_mem(cs->b, V_1E_1_GL2_OP_ATOMIC_CMPSWAP_32, V_1E_1_LOOP_UNTIL_COMPARE_SATISFIED, mutex_va, 1, 0);
|
||||
ac_emit_cp_atomic_mem(cs->b, V_1E1_GL2_OP_ATOMIC_CMPSWAP_32, V_1E1_LOOP_UNTIL_COMPARE_SATISFIED, mutex_va, 1, 0);
|
||||
}
|
||||
|
||||
uint64_t va = radv_buffer_get_va(device->perf_counter_bo) + PERF_CTR_BO_PASS_OFFSET;
|
||||
|
|
|
|||
|
|
@ -29,13 +29,13 @@ bool si_init_cp_reg_shadowing(struct si_context *sctx)
|
|||
}
|
||||
|
||||
ac_pm4_cmd_add(shadowing_pm4, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
|
||||
ac_pm4_cmd_add(shadowing_pm4, S_28_1_UPDATE_LOAD_ENABLES(1) |
|
||||
S_28_1_LOAD_PER_CONTEXT_STATE(1) | S_28_1_LOAD_CS_SH_REGS(1) |
|
||||
S_28_1_LOAD_GFX_SH_REGS(1) | S_28_1_LOAD_GLOBAL_UCONFIG(1));
|
||||
ac_pm4_cmd_add(shadowing_pm4, S_28_2_UPDATE_SHADOW_ENABLES(1) |
|
||||
S_28_2_SHADOW_PER_CONTEXT_STATE(1) | S_28_2_SHADOW_CS_SH_REGS(1) |
|
||||
S_28_2_SHADOW_GFX_SH_REGS(1) | S_28_2_SHADOW_GLOBAL_UCONFIG(1) |
|
||||
S_28_2_SHADOW_GLOBAL_CONFIG(1));
|
||||
ac_pm4_cmd_add(shadowing_pm4, S_281_UPDATE_LOAD_ENABLES(1) |
|
||||
S_281_LOAD_PER_CONTEXT_STATE(1) | S_281_LOAD_CS_SH_REGS(1) |
|
||||
S_281_LOAD_GFX_SH_REGS(1) | S_281_LOAD_GLOBAL_UCONFIG(1));
|
||||
ac_pm4_cmd_add(shadowing_pm4, S_282_UPDATE_SHADOW_ENABLES(1) |
|
||||
S_282_SHADOW_PER_CONTEXT_STATE(1) | S_282_SHADOW_CS_SH_REGS(1) |
|
||||
S_282_SHADOW_GFX_SH_REGS(1) | S_282_SHADOW_GLOBAL_UCONFIG(1) |
|
||||
S_282_SHADOW_GLOBAL_CONFIG(1));
|
||||
|
||||
for (unsigned i = 0; i < SI_NUM_REG_RANGES; i++)
|
||||
ac_build_load_reg(&sctx->screen->info, shadowing_pm4, i,
|
||||
|
|
|
|||
|
|
@ -1083,7 +1083,7 @@ static void si_emit_query_predication(struct si_context *ctx, unsigned index)
|
|||
struct gfx11_sh_query *gfx10_query = (struct gfx11_sh_query *)query;
|
||||
struct gfx11_sh_query_buffer *qbuf, *first, *last;
|
||||
|
||||
op = S_20_1_PRED_OP(PREDICATION_OP_PRIMCOUNT);
|
||||
op = S_201_PRED_OP(PREDICATION_OP_PRIMCOUNT);
|
||||
|
||||
/* if true then invert, see GL_ARB_conditional_render_inverted */
|
||||
if (!invert)
|
||||
|
|
@ -1131,17 +1131,17 @@ static void si_emit_query_predication(struct si_context *ctx, unsigned index)
|
|||
struct si_query_buffer *qbuf;
|
||||
|
||||
if (query->workaround_buf) {
|
||||
op = S_20_1_PRED_OP(PREDICATION_OP_BOOL64);
|
||||
op = S_201_PRED_OP(PREDICATION_OP_BOOL64);
|
||||
} else {
|
||||
switch (query->b.type) {
|
||||
case PIPE_QUERY_OCCLUSION_COUNTER:
|
||||
case PIPE_QUERY_OCCLUSION_PREDICATE:
|
||||
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
|
||||
op = S_20_1_PRED_OP(PREDICATION_OP_ZPASS);
|
||||
op = S_201_PRED_OP(PREDICATION_OP_ZPASS);
|
||||
break;
|
||||
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
|
||||
case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
|
||||
op = S_20_1_PRED_OP(PREDICATION_OP_PRIMCOUNT);
|
||||
op = S_201_PRED_OP(PREDICATION_OP_PRIMCOUNT);
|
||||
invert = !invert;
|
||||
break;
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -107,8 +107,8 @@ static void si_sqtt_start(struct si_context *sctx, struct radeon_cmdbuf *cs)
|
|||
switch (ip_type) {
|
||||
case AMD_IP_GFX:
|
||||
radeon_emit(PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
|
||||
radeon_emit(S_28_1_UPDATE_LOAD_ENABLES(1));
|
||||
radeon_emit(S_28_2_UPDATE_SHADOW_ENABLES(1));
|
||||
radeon_emit(S_281_UPDATE_LOAD_ENABLES(1));
|
||||
radeon_emit(S_282_UPDATE_SHADOW_ENABLES(1));
|
||||
break;
|
||||
case AMD_IP_COMPUTE:
|
||||
radeon_emit(PKT3(PKT3_NOP, 0, 0));
|
||||
|
|
@ -160,8 +160,8 @@ static void si_sqtt_stop(struct si_context *sctx, struct radeon_cmdbuf *cs)
|
|||
switch (ip_type) {
|
||||
case AMD_IP_GFX:
|
||||
radeon_emit(PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
|
||||
radeon_emit(S_28_1_UPDATE_LOAD_ENABLES(1));
|
||||
radeon_emit(S_28_2_UPDATE_SHADOW_ENABLES(1));
|
||||
radeon_emit(S_281_UPDATE_LOAD_ENABLES(1));
|
||||
radeon_emit(S_282_UPDATE_SHADOW_ENABLES(1));
|
||||
break;
|
||||
case AMD_IP_COMPUTE:
|
||||
radeon_emit(PKT3(PKT3_NOP, 0, 0));
|
||||
|
|
|
|||
|
|
@ -4867,8 +4867,8 @@ static bool gfx6_init_gfx_preamble_state(struct si_context *sctx)
|
|||
|
||||
if (sctx->is_gfx_queue && !sctx->uses_kernelq_reg_shadowing) {
|
||||
ac_pm4_cmd_add(&pm4->base, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
|
||||
ac_pm4_cmd_add(&pm4->base, S_28_1_UPDATE_LOAD_ENABLES(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_28_2_UPDATE_SHADOW_ENABLES(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_281_UPDATE_LOAD_ENABLES(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_282_UPDATE_SHADOW_ENABLES(1));
|
||||
|
||||
if (sscreen->dpbb_allowed) {
|
||||
ac_pm4_cmd_add(&pm4->base, PKT3(PKT3_EVENT_WRITE, 0, 0));
|
||||
|
|
@ -4957,17 +4957,17 @@ static bool gfx10_init_gfx_preamble_state(struct si_context *sctx)
|
|||
*/
|
||||
if (sctx->gfx_level != GFX11_5) {
|
||||
ac_pm4_cmd_add(&pm4->base, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
|
||||
ac_pm4_cmd_add(&pm4->base, S_28_1_UPDATE_LOAD_ENABLES(1) | S_28_1_LOAD_PER_CONTEXT_STATE(1) |
|
||||
S_28_1_LOAD_CS_SH_REGS(1) | S_28_1_LOAD_GFX_SH_REGS(1) |
|
||||
S_28_1_LOAD_GLOBAL_UCONFIG(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_28_2_UPDATE_SHADOW_ENABLES(1) | S_28_2_SHADOW_PER_CONTEXT_STATE(1) |
|
||||
S_28_2_SHADOW_CS_SH_REGS(1) | S_28_2_SHADOW_GFX_SH_REGS(1) |
|
||||
S_28_2_SHADOW_GLOBAL_UCONFIG(1) | S_28_2_SHADOW_GLOBAL_CONFIG(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_281_UPDATE_LOAD_ENABLES(1) | S_281_LOAD_PER_CONTEXT_STATE(1) |
|
||||
S_281_LOAD_CS_SH_REGS(1) | S_281_LOAD_GFX_SH_REGS(1) |
|
||||
S_281_LOAD_GLOBAL_UCONFIG(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_282_UPDATE_SHADOW_ENABLES(1) | S_282_SHADOW_PER_CONTEXT_STATE(1) |
|
||||
S_282_SHADOW_CS_SH_REGS(1) | S_282_SHADOW_GFX_SH_REGS(1) |
|
||||
S_282_SHADOW_GLOBAL_UCONFIG(1) | S_282_SHADOW_GLOBAL_CONFIG(1));
|
||||
}
|
||||
} else if (sctx->is_gfx_queue && !sctx->uses_kernelq_reg_shadowing) {
|
||||
ac_pm4_cmd_add(&pm4->base, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
|
||||
ac_pm4_cmd_add(&pm4->base, S_28_1_UPDATE_LOAD_ENABLES(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_28_2_UPDATE_SHADOW_ENABLES(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_281_UPDATE_LOAD_ENABLES(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_282_UPDATE_SHADOW_ENABLES(1));
|
||||
|
||||
if (sscreen->dpbb_allowed) {
|
||||
ac_pm4_cmd_add(&pm4->base, PKT3(PKT3_EVENT_WRITE, 0, 0));
|
||||
|
|
@ -5038,16 +5038,16 @@ static bool gfx12_init_gfx_preamble_state(struct si_context *sctx)
|
|||
|
||||
if (sctx->uses_userq_reg_shadowing) {
|
||||
ac_pm4_cmd_add(&pm4->base, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
|
||||
ac_pm4_cmd_add(&pm4->base, S_28_1_UPDATE_LOAD_ENABLES(1) | S_28_1_LOAD_PER_CONTEXT_STATE(1) |
|
||||
S_28_1_LOAD_CS_SH_REGS(1) | S_28_1_LOAD_GFX_SH_REGS(1) |
|
||||
S_28_1_LOAD_GLOBAL_UCONFIG(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_28_2_UPDATE_SHADOW_ENABLES(1) | S_28_2_SHADOW_PER_CONTEXT_STATE(1) |
|
||||
S_28_2_SHADOW_CS_SH_REGS(1) | S_28_2_SHADOW_GFX_SH_REGS(1) |
|
||||
S_28_2_SHADOW_GLOBAL_UCONFIG(1) | S_28_2_SHADOW_GLOBAL_CONFIG(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_281_UPDATE_LOAD_ENABLES(1) | S_281_LOAD_PER_CONTEXT_STATE(1) |
|
||||
S_281_LOAD_CS_SH_REGS(1) | S_281_LOAD_GFX_SH_REGS(1) |
|
||||
S_281_LOAD_GLOBAL_UCONFIG(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_282_UPDATE_SHADOW_ENABLES(1) | S_282_SHADOW_PER_CONTEXT_STATE(1) |
|
||||
S_282_SHADOW_CS_SH_REGS(1) | S_282_SHADOW_GFX_SH_REGS(1) |
|
||||
S_282_SHADOW_GLOBAL_UCONFIG(1) | S_282_SHADOW_GLOBAL_CONFIG(1));
|
||||
} else if (sctx->is_gfx_queue && !sctx->uses_kernelq_reg_shadowing) {
|
||||
ac_pm4_cmd_add(&pm4->base, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
|
||||
ac_pm4_cmd_add(&pm4->base, S_28_1_UPDATE_LOAD_ENABLES(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_28_2_UPDATE_SHADOW_ENABLES(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_281_UPDATE_LOAD_ENABLES(1));
|
||||
ac_pm4_cmd_add(&pm4->base, S_282_UPDATE_SHADOW_ENABLES(1));
|
||||
}
|
||||
|
||||
if (sctx->is_gfx_queue && sscreen->dpbb_allowed && !sctx->uses_userq_reg_shadowing) {
|
||||
|
|
|
|||
|
|
@ -1610,8 +1610,8 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
|
|||
radeon_emit((sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
|
||||
radeon_emit((sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
|
||||
radeon_emit(((sh_base_reg + SI_SGPR_DRAWID * 4 - SI_SH_REG_OFFSET) >> 2) |
|
||||
S_2C_4_DRAW_INDEX_ENABLE(sctx->vs_uses_draw_id) |
|
||||
S_2C_4_COUNT_INDIRECT_ENABLE(!!indirect->indirect_draw_count));
|
||||
S_2C4_DRAW_INDEX_ENABLE(sctx->vs_uses_draw_id) |
|
||||
S_2C4_COUNT_INDIRECT_ENABLE(!!indirect->indirect_draw_count));
|
||||
radeon_emit(indirect->draw_count);
|
||||
radeon_emit(count_va);
|
||||
radeon_emit(count_va >> 32);
|
||||
|
|
|
|||
|
|
@ -1547,17 +1547,17 @@ static void amdgpu_cs_add_userq_packets(struct amdgpu_winsys *aws,
|
|||
amdgpu_pkt_add_dw(PKT3(PKT3_INDIRECT_BUFFER, 2, 0));
|
||||
amdgpu_pkt_add_dw(amdgpu_bo_get_va(userq->f32_shadowing_ib_bo));
|
||||
amdgpu_pkt_add_dw(amdgpu_bo_get_va(userq->f32_shadowing_ib_bo) >> 32);
|
||||
amdgpu_pkt_add_dw(userq->f32_shadowing_ib_pm4_dw | S_3F_3_INHERIT_VMID_PFP(1));
|
||||
amdgpu_pkt_add_dw(userq->f32_shadowing_ib_pm4_dw | S_3F3_INHERIT_VMID_PFP(1));
|
||||
}
|
||||
|
||||
amdgpu_pkt_add_dw(PKT3(PKT3_INDIRECT_BUFFER, 2, 0));
|
||||
amdgpu_pkt_add_dw(csc->chunk_ib[IB_MAIN].va_start);
|
||||
amdgpu_pkt_add_dw(csc->chunk_ib[IB_MAIN].va_start >> 32);
|
||||
if (userq->ip_type == AMD_IP_GFX)
|
||||
amdgpu_pkt_add_dw((csc->chunk_ib[IB_MAIN].ib_bytes / 4) | S_3F_3_INHERIT_VMID_PFP(1));
|
||||
amdgpu_pkt_add_dw((csc->chunk_ib[IB_MAIN].ib_bytes / 4) | S_3F3_INHERIT_VMID_PFP(1));
|
||||
else
|
||||
amdgpu_pkt_add_dw((csc->chunk_ib[IB_MAIN].ib_bytes / 4) | S_3F_3_VALID(1) |
|
||||
S_3F_3_INHERIT_VMID_MEC(1));
|
||||
amdgpu_pkt_add_dw((csc->chunk_ib[IB_MAIN].ib_bytes / 4) | S_3F3_VALID(1) |
|
||||
S_3F3_INHERIT_VMID_MEC(1));
|
||||
|
||||
/* Add 8 for release mem packet and 2 for protected fence signal packet.
|
||||
* Calculcating userq_fence_seq_num this way to match with kernel fence that is
|
||||
|
|
@ -1601,7 +1601,7 @@ static void amdgpu_cs_add_userq_packets(struct amdgpu_winsys *aws,
|
|||
for (unsigned i = 0; i < 1 + DIV_ROUND_UP(num_fences, 4); i++)
|
||||
*cond_exec_skip_counts[i].count_dw_ptr = (amdgpu_pkt_get_next_wptr() -
|
||||
cond_exec_skip_counts[i].start_wptr) |
|
||||
S_22_4_EXEC_USERQ_OVERRULE_CMD(1);
|
||||
S_224_EXEC_USERQ_OVERRULE_CMD(1);
|
||||
}
|
||||
} else {
|
||||
mesa_loge("amdgpu: unsupported userq ip submission = %d\n", userq->ip_type);
|
||||
|
|
|
|||
|
|
@ -320,7 +320,7 @@ amdgpu_userq_submit_cs_preamble_ib_once(struct radeon_cmdbuf *rcs, struct ac_pm4
|
|||
amdgpu_pkt_add_dw(PKT3(PKT3_INDIRECT_BUFFER, 2, 0));
|
||||
amdgpu_pkt_add_dw(amdgpu_bo_get_va(userq->cs_preamble_ib_bo));
|
||||
amdgpu_pkt_add_dw(amdgpu_bo_get_va(userq->cs_preamble_ib_bo) >> 32);
|
||||
amdgpu_pkt_add_dw(pm4->ndw | S_3F_3_INHERIT_VMID_PFP(1));
|
||||
amdgpu_pkt_add_dw(pm4->ndw | S_3F3_INHERIT_VMID_PFP(1));
|
||||
amdgpu_pkt_end();
|
||||
|
||||
simple_mtx_unlock(&userq->lock);
|
||||
|
|
@ -367,7 +367,7 @@ amdgpu_userq_f32_init_reg_shadowing(struct radeon_cmdbuf *rcs, struct ac_pm4_sta
|
|||
amdgpu_pkt_add_dw(PKT3(PKT3_INDIRECT_BUFFER, 2, 0));
|
||||
amdgpu_pkt_add_dw(amdgpu_bo_get_va(userq->f32_shadowing_ib_bo));
|
||||
amdgpu_pkt_add_dw(amdgpu_bo_get_va(userq->f32_shadowing_ib_bo) >> 32);
|
||||
amdgpu_pkt_add_dw(pm4->ndw | S_3F_3_INHERIT_VMID_PFP(1));
|
||||
amdgpu_pkt_add_dw(pm4->ndw | S_3F3_INHERIT_VMID_PFP(1));
|
||||
amdgpu_pkt_end();
|
||||
|
||||
simple_mtx_unlock(&userq->lock);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue