2019-05-20 14:58:23 +02:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2018 Valve Corporation
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include "nir.h"
|
|
|
|
|
|
|
|
|
|
/* This pass computes for each ssa definition if it is uniform.
|
|
|
|
|
* That is, the variable has the same value for all invocations
|
|
|
|
|
* of the group.
|
|
|
|
|
*
|
2025-05-31 12:26:16 -04:00
|
|
|
* If the shader is not in LCSSA-form, passes need to use nir_src_is_divergent()
|
|
|
|
|
* instead of reading the value from src->ssa->divergent as without LCSSA a src
|
|
|
|
|
* can have a different divergence than the corresponding SSA-def.
|
2019-05-20 14:58:23 +02:00
|
|
|
*
|
|
|
|
|
* This algorithm implements "The Simple Divergence Analysis" from
|
|
|
|
|
* Diogo Sampaio, Rafael De Souza, Sylvain Collange, Fernando Magno Quintão Pereira.
|
|
|
|
|
* Divergence Analysis. ACM Transactions on Programming Languages and Systems (TOPLAS),
|
|
|
|
|
* ACM, 2013, 35 (4), pp.13:1-13:36. <10.1145/2523815>. <hal-00909072v2>
|
|
|
|
|
*/
|
|
|
|
|
|
2020-02-05 11:53:04 +01:00
|
|
|
struct divergence_state {
|
|
|
|
|
const gl_shader_stage stage;
|
2020-09-02 11:45:46 +01:00
|
|
|
nir_shader *shader;
|
2024-10-19 12:31:21 +02:00
|
|
|
nir_function_impl *impl;
|
2024-04-09 18:14:12 +01:00
|
|
|
nir_divergence_options options;
|
2024-09-03 15:59:36 +02:00
|
|
|
nir_loop *loop;
|
2025-01-13 15:01:51 +00:00
|
|
|
bool loop_all_invariant;
|
2020-02-05 18:36:34 +01:00
|
|
|
|
2023-10-01 00:21:16 -04:00
|
|
|
/* Whether the caller requested vertex divergence (meaning between vertices
|
|
|
|
|
* of the same primitive) instead of subgroup invocation divergence
|
|
|
|
|
* (between invocations of the same subgroup). For example, patch input
|
|
|
|
|
* loads are always convergent, while subgroup intrinsics are divergent
|
|
|
|
|
* because vertices of the same primitive can be processed by different
|
|
|
|
|
* subgroups.
|
|
|
|
|
*/
|
|
|
|
|
bool vertex_divergence;
|
|
|
|
|
|
2020-02-05 18:36:34 +01:00
|
|
|
/** current control flow state */
|
|
|
|
|
/* True if some loop-active invocations might take a different control-flow path.
|
|
|
|
|
* A divergent break does not cause subsequent control-flow to be considered
|
|
|
|
|
* divergent because those invocations are no longer active in the loop.
|
|
|
|
|
* For a divergent if, both sides are considered divergent flow because
|
|
|
|
|
* the other side is still loop-active. */
|
|
|
|
|
bool divergent_loop_cf;
|
|
|
|
|
/* True if a divergent continue happened since the loop header */
|
|
|
|
|
bool divergent_loop_continue;
|
|
|
|
|
/* True if a divergent break happened since the loop header */
|
|
|
|
|
bool divergent_loop_break;
|
2020-04-21 17:07:56 +01:00
|
|
|
|
|
|
|
|
/* True if we visit the block for the fist time */
|
|
|
|
|
bool first_visit;
|
2024-09-04 10:54:50 +02:00
|
|
|
/* True if we visit a block that is dominated by a loop with a divergent break */
|
|
|
|
|
bool consider_loop_invariance;
|
2020-02-05 11:53:04 +01:00
|
|
|
};
|
|
|
|
|
|
2019-05-20 14:58:23 +02:00
|
|
|
static bool
|
2020-02-05 11:53:04 +01:00
|
|
|
visit_cf_list(struct exec_list *list, struct divergence_state *state);
|
2019-05-20 14:58:23 +02:00
|
|
|
|
2024-09-10 12:31:27 +02:00
|
|
|
bool
|
|
|
|
|
nir_src_is_divergent(nir_src *src)
|
|
|
|
|
{
|
2024-09-03 13:00:14 +02:00
|
|
|
if (src->ssa->divergent)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
nir_cf_node *use_node = nir_src_get_block(src)->cf_node.parent;
|
|
|
|
|
nir_cf_node *def_node = src->ssa->parent_instr->block->cf_node.parent;
|
|
|
|
|
|
|
|
|
|
/* Short-cut the common case. */
|
|
|
|
|
if (def_node == use_node)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* If the source was computed in a divergent loop, and is not
|
|
|
|
|
* loop-invariant, then it must also be considered divergent.
|
|
|
|
|
*/
|
|
|
|
|
bool loop_invariant = src->ssa->loop_invariant;
|
|
|
|
|
while (def_node) {
|
|
|
|
|
if (def_node->type == nir_cf_node_loop) {
|
|
|
|
|
/* Check whether the use is inside this loop. */
|
|
|
|
|
for (nir_cf_node *node = use_node; node != NULL; node = node->parent) {
|
|
|
|
|
if (def_node == node)
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Because the use is outside of this loop, it is divergent. */
|
|
|
|
|
if (nir_cf_node_as_loop(def_node)->divergent_break && !loop_invariant)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
/* For outer loops, consider this variable not loop invariant. */
|
|
|
|
|
loop_invariant = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def_node = def_node->parent;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
2024-09-10 12:31:27 +02:00
|
|
|
}
|
|
|
|
|
|
2024-09-03 16:14:25 +02:00
|
|
|
static inline bool
|
|
|
|
|
src_divergent(nir_src src, struct divergence_state *state)
|
|
|
|
|
{
|
2024-09-04 10:54:50 +02:00
|
|
|
if (!state->consider_loop_invariance)
|
|
|
|
|
return src.ssa->divergent;
|
|
|
|
|
|
2024-09-03 16:14:25 +02:00
|
|
|
return nir_src_is_divergent(&src);
|
|
|
|
|
}
|
|
|
|
|
|
2024-09-03 15:59:36 +02:00
|
|
|
static inline bool
|
|
|
|
|
src_invariant(nir_src *src, void *loop)
|
|
|
|
|
{
|
|
|
|
|
nir_block *first_block = nir_loop_first_block(loop);
|
|
|
|
|
|
|
|
|
|
/* Invariant if SSA is defined before the current loop. */
|
|
|
|
|
if (src->ssa->parent_instr->block->index < first_block->index)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
if (!src->ssa->loop_invariant)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* The value might be defined in a nested loop. */
|
|
|
|
|
nir_cf_node *cf_node = src->ssa->parent_instr->block->cf_node.parent;
|
|
|
|
|
while (cf_node->type != nir_cf_node_loop)
|
|
|
|
|
cf_node = cf_node->parent;
|
|
|
|
|
|
|
|
|
|
return nir_cf_node_as_loop(cf_node) == loop;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-20 14:58:23 +02:00
|
|
|
static bool
|
2024-02-17 08:48:31 -05:00
|
|
|
visit_alu(nir_alu_instr *instr, struct divergence_state *state)
|
2019-05-20 14:58:23 +02:00
|
|
|
{
|
2023-08-14 11:43:35 -05:00
|
|
|
if (instr->def.divergent)
|
2019-05-20 14:58:23 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
unsigned num_src = nir_op_infos[instr->op].num_inputs;
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < num_src; i++) {
|
2024-09-03 16:14:25 +02:00
|
|
|
if (src_divergent(instr->src[i].src, state)) {
|
2023-08-14 11:43:35 -05:00
|
|
|
instr->def.divergent = true;
|
2019-05-20 14:58:23 +02:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-14 19:08:13 +01:00
|
|
|
/* On some HW uniform loads where there is a pending store/atomic from another
|
|
|
|
|
* wave can "tear" so that different invocations see the pre-store value and
|
|
|
|
|
* the post-store value even though they are loading from the same location.
|
|
|
|
|
* This means we have to assume it's not uniform unless it's readonly.
|
|
|
|
|
*
|
|
|
|
|
* TODO The Vulkan memory model is much more strict here and requires an
|
|
|
|
|
* atomic or volatile load for the data race to be valid, which could allow us
|
|
|
|
|
* to do better if it's in use, however we currently don't have that
|
|
|
|
|
* information plumbed through.
|
|
|
|
|
*/
|
|
|
|
|
static bool
|
2024-04-09 18:14:12 +01:00
|
|
|
load_may_tear(struct divergence_state *state, nir_intrinsic_instr *instr)
|
2023-03-14 19:08:13 +01:00
|
|
|
{
|
2024-04-09 18:14:12 +01:00
|
|
|
return (state->options & nir_divergence_uniform_load_tears) &&
|
2023-03-14 19:08:13 +01:00
|
|
|
!(nir_intrinsic_access(instr) & ACCESS_NON_WRITEABLE);
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-20 14:58:23 +02:00
|
|
|
static bool
|
2024-02-17 08:48:31 -05:00
|
|
|
visit_intrinsic(nir_intrinsic_instr *instr, struct divergence_state *state)
|
2019-05-20 14:58:23 +02:00
|
|
|
{
|
|
|
|
|
if (!nir_intrinsic_infos[instr->intrinsic].has_dest)
|
|
|
|
|
return false;
|
|
|
|
|
|
2023-08-14 11:56:00 -05:00
|
|
|
if (instr->def.divergent)
|
2019-05-20 14:58:23 +02:00
|
|
|
return false;
|
|
|
|
|
|
2024-04-09 18:14:12 +01:00
|
|
|
nir_divergence_options options = state->options;
|
2024-02-17 08:48:31 -05:00
|
|
|
gl_shader_stage stage = state->stage;
|
2019-05-20 14:58:23 +02:00
|
|
|
bool is_divergent = false;
|
|
|
|
|
switch (instr->intrinsic) {
|
|
|
|
|
case nir_intrinsic_shader_clock:
|
|
|
|
|
case nir_intrinsic_ballot:
|
2024-01-17 12:03:41 +01:00
|
|
|
case nir_intrinsic_ballot_relaxed:
|
|
|
|
|
case nir_intrinsic_as_uniform:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_read_invocation:
|
|
|
|
|
case nir_intrinsic_read_first_invocation:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_read_invocation_cond_ir3:
|
2024-11-29 17:09:15 +01:00
|
|
|
case nir_intrinsic_read_getlast_ir3:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_vote_any:
|
|
|
|
|
case nir_intrinsic_vote_all:
|
|
|
|
|
case nir_intrinsic_vote_feq:
|
|
|
|
|
case nir_intrinsic_vote_ieq:
|
2023-10-01 00:21:16 -04:00
|
|
|
case nir_intrinsic_first_invocation:
|
|
|
|
|
case nir_intrinsic_last_invocation:
|
|
|
|
|
case nir_intrinsic_load_subgroup_id:
|
2024-09-07 14:22:11 +02:00
|
|
|
case nir_intrinsic_shared_append_amd:
|
|
|
|
|
case nir_intrinsic_shared_consume_amd:
|
2025-07-08 16:09:37 -04:00
|
|
|
case nir_intrinsic_load_sm_id_nv:
|
|
|
|
|
case nir_intrinsic_load_warp_id_nv:
|
2023-10-01 00:21:16 -04:00
|
|
|
/* VS/TES/GS invocations of the same primitive can be in different
|
|
|
|
|
* subgroups, so subgroup ops are always divergent between vertices of
|
|
|
|
|
* the same primitive.
|
|
|
|
|
*/
|
2024-02-17 08:48:31 -05:00
|
|
|
is_divergent = state->vertex_divergence;
|
2023-10-01 00:21:16 -04:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
/* Intrinsics which are always uniform */
|
2024-08-01 22:06:17 -04:00
|
|
|
case nir_intrinsic_load_preamble:
|
2021-01-12 13:01:34 +01:00
|
|
|
case nir_intrinsic_load_push_constant:
|
2024-03-27 09:28:26 -04:00
|
|
|
case nir_intrinsic_load_push_constant_zink:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_work_dim:
|
2021-06-04 12:04:15 -07:00
|
|
|
case nir_intrinsic_load_num_workgroups:
|
2021-05-27 14:44:54 -07:00
|
|
|
case nir_intrinsic_load_workgroup_size:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_num_subgroups:
|
2021-04-11 15:45:27 +02:00
|
|
|
case nir_intrinsic_load_ray_launch_size:
|
2022-05-12 20:22:59 +02:00
|
|
|
case nir_intrinsic_load_sbt_base_amd:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_subgroup_size:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_load_subgroup_id_shift_ir3:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_base_instance:
|
|
|
|
|
case nir_intrinsic_load_base_vertex:
|
|
|
|
|
case nir_intrinsic_load_first_vertex:
|
|
|
|
|
case nir_intrinsic_load_draw_id:
|
|
|
|
|
case nir_intrinsic_load_is_indexed_draw:
|
|
|
|
|
case nir_intrinsic_load_viewport_scale:
|
|
|
|
|
case nir_intrinsic_load_user_clip_plane:
|
|
|
|
|
case nir_intrinsic_load_viewport_x_scale:
|
|
|
|
|
case nir_intrinsic_load_viewport_y_scale:
|
|
|
|
|
case nir_intrinsic_load_viewport_z_scale:
|
|
|
|
|
case nir_intrinsic_load_viewport_offset:
|
2021-05-04 13:41:14 +02:00
|
|
|
case nir_intrinsic_load_viewport_x_offset:
|
|
|
|
|
case nir_intrinsic_load_viewport_y_offset:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_viewport_z_offset:
|
2023-12-11 03:10:03 -05:00
|
|
|
case nir_intrinsic_load_cull_triangle_viewport_xy_scale_and_offset_amd:
|
|
|
|
|
case nir_intrinsic_load_cull_line_viewport_xy_scale_and_offset_amd:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_blend_const_color_a_float:
|
|
|
|
|
case nir_intrinsic_load_blend_const_color_b_float:
|
|
|
|
|
case nir_intrinsic_load_blend_const_color_g_float:
|
|
|
|
|
case nir_intrinsic_load_blend_const_color_r_float:
|
|
|
|
|
case nir_intrinsic_load_blend_const_color_rgba:
|
|
|
|
|
case nir_intrinsic_load_blend_const_color_aaaa8888_unorm:
|
|
|
|
|
case nir_intrinsic_load_blend_const_color_rgba8888_unorm:
|
2021-01-03 20:55:31 -06:00
|
|
|
case nir_intrinsic_load_line_width:
|
|
|
|
|
case nir_intrinsic_load_aa_line_width:
|
2022-04-01 17:20:09 -04:00
|
|
|
case nir_intrinsic_load_xfb_address:
|
2025-05-06 16:13:47 -04:00
|
|
|
case nir_intrinsic_load_rasterization_stream:
|
2022-04-01 17:20:09 -04:00
|
|
|
case nir_intrinsic_load_num_vertices:
|
2020-12-29 12:43:04 -06:00
|
|
|
case nir_intrinsic_load_fb_layers_v3d:
|
2024-03-07 17:50:56 +01:00
|
|
|
case nir_intrinsic_load_fep_w_v3d:
|
2021-02-15 22:01:02 +01:00
|
|
|
case nir_intrinsic_load_tcs_num_patches_amd:
|
2024-03-27 01:08:37 +01:00
|
|
|
case nir_intrinsic_load_tcs_tess_levels_to_tes_amd:
|
|
|
|
|
case nir_intrinsic_load_tcs_primitive_mode_amd:
|
2024-02-08 11:56:36 +01:00
|
|
|
case nir_intrinsic_load_patch_vertices_in:
|
2021-02-15 22:01:02 +01:00
|
|
|
case nir_intrinsic_load_ring_tess_factors_amd:
|
|
|
|
|
case nir_intrinsic_load_ring_tess_offchip_amd:
|
|
|
|
|
case nir_intrinsic_load_ring_tess_factors_offset_amd:
|
|
|
|
|
case nir_intrinsic_load_ring_tess_offchip_offset_amd:
|
2022-06-01 14:32:39 +02:00
|
|
|
case nir_intrinsic_load_ring_mesh_scratch_amd:
|
|
|
|
|
case nir_intrinsic_load_ring_mesh_scratch_offset_amd:
|
2021-02-22 20:16:32 +01:00
|
|
|
case nir_intrinsic_load_ring_esgs_amd:
|
|
|
|
|
case nir_intrinsic_load_ring_es2gs_offset_amd:
|
2022-01-15 13:56:13 +01:00
|
|
|
case nir_intrinsic_load_ring_task_draw_amd:
|
|
|
|
|
case nir_intrinsic_load_ring_task_payload_amd:
|
2022-09-15 14:55:10 +02:00
|
|
|
case nir_intrinsic_load_sample_positions_amd:
|
2022-09-15 14:47:03 +02:00
|
|
|
case nir_intrinsic_load_rasterization_samples_amd:
|
2022-09-28 19:32:26 +01:00
|
|
|
case nir_intrinsic_load_ring_gsvs_amd:
|
2022-11-27 12:25:26 +08:00
|
|
|
case nir_intrinsic_load_ring_gs2vs_offset_amd:
|
2022-09-28 19:32:26 +01:00
|
|
|
case nir_intrinsic_load_streamout_config_amd:
|
|
|
|
|
case nir_intrinsic_load_streamout_write_index_amd:
|
|
|
|
|
case nir_intrinsic_load_streamout_offset_amd:
|
2022-01-15 13:56:13 +01:00
|
|
|
case nir_intrinsic_load_task_ring_entry_amd:
|
2022-05-11 15:14:36 +02:00
|
|
|
case nir_intrinsic_load_ring_attr_amd:
|
|
|
|
|
case nir_intrinsic_load_ring_attr_offset_amd:
|
2022-06-21 09:12:35 +02:00
|
|
|
case nir_intrinsic_load_provoking_vtx_amd:
|
2021-04-03 16:05:09 -04:00
|
|
|
case nir_intrinsic_load_sample_positions_pan:
|
2025-01-30 11:56:16 +00:00
|
|
|
case nir_intrinsic_load_shader_output_pan:
|
2021-05-04 13:40:28 +02:00
|
|
|
case nir_intrinsic_load_workgroup_num_input_vertices_amd:
|
|
|
|
|
case nir_intrinsic_load_workgroup_num_input_primitives_amd:
|
2022-10-10 11:08:40 +08:00
|
|
|
case nir_intrinsic_load_pipeline_stat_query_enabled_amd:
|
|
|
|
|
case nir_intrinsic_load_prim_gen_query_enabled_amd:
|
2022-10-25 14:20:04 +08:00
|
|
|
case nir_intrinsic_load_prim_xfb_query_enabled_amd:
|
2022-10-18 20:52:53 +01:00
|
|
|
case nir_intrinsic_load_merged_wave_info_amd:
|
2022-10-28 10:15:21 +08:00
|
|
|
case nir_intrinsic_load_clamp_vertex_color_amd:
|
2021-05-04 13:41:14 +02:00
|
|
|
case nir_intrinsic_load_cull_front_face_enabled_amd:
|
|
|
|
|
case nir_intrinsic_load_cull_back_face_enabled_amd:
|
|
|
|
|
case nir_intrinsic_load_cull_ccw_amd:
|
2023-12-11 02:50:01 -05:00
|
|
|
case nir_intrinsic_load_cull_small_triangles_enabled_amd:
|
|
|
|
|
case nir_intrinsic_load_cull_small_lines_enabled_amd:
|
2021-05-04 13:41:14 +02:00
|
|
|
case nir_intrinsic_load_cull_any_enabled_amd:
|
2023-12-11 02:50:01 -05:00
|
|
|
case nir_intrinsic_load_cull_small_triangle_precision_amd:
|
|
|
|
|
case nir_intrinsic_load_cull_small_line_precision_amd:
|
2021-11-19 08:26:57 -05:00
|
|
|
case nir_intrinsic_load_user_data_amd:
|
2022-01-25 08:45:12 +01:00
|
|
|
case nir_intrinsic_load_force_vrs_rates_amd:
|
2021-11-19 08:26:57 -05:00
|
|
|
case nir_intrinsic_load_tess_level_inner_default:
|
|
|
|
|
case nir_intrinsic_load_tess_level_outer_default:
|
2020-09-21 13:54:54 +01:00
|
|
|
case nir_intrinsic_load_scalar_arg_amd:
|
|
|
|
|
case nir_intrinsic_load_smem_amd:
|
2022-05-12 23:21:57 +02:00
|
|
|
case nir_intrinsic_load_resume_shader_address_amd:
|
2022-03-16 02:14:52 -07:00
|
|
|
case nir_intrinsic_load_reloc_const_intel:
|
2022-04-21 02:48:58 -07:00
|
|
|
case nir_intrinsic_load_btd_global_arg_addr_intel:
|
|
|
|
|
case nir_intrinsic_load_btd_local_arg_addr_intel:
|
2024-09-30 08:45:21 +03:00
|
|
|
case nir_intrinsic_load_inline_data_intel:
|
2022-05-30 13:53:34 +03:00
|
|
|
case nir_intrinsic_load_ray_num_dss_rt_stacks_intel:
|
2022-05-07 17:34:54 +08:00
|
|
|
case nir_intrinsic_load_lshs_vertex_stride_amd:
|
2023-02-20 00:50:24 -05:00
|
|
|
case nir_intrinsic_load_esgs_vertex_stride_amd:
|
2022-05-23 17:23:57 +08:00
|
|
|
case nir_intrinsic_load_hs_out_patch_data_offset_amd:
|
2022-06-06 16:37:16 +08:00
|
|
|
case nir_intrinsic_load_clip_half_line_width_amd:
|
2022-06-30 17:06:51 +08:00
|
|
|
case nir_intrinsic_load_num_vertices_per_primitive_amd:
|
2022-06-30 17:37:03 +08:00
|
|
|
case nir_intrinsic_load_streamout_buffer_amd:
|
2022-06-30 17:56:50 +08:00
|
|
|
case nir_intrinsic_load_ordered_id_amd:
|
2023-04-26 14:27:10 +02:00
|
|
|
case nir_intrinsic_load_gs_wave_id_amd:
|
2022-06-12 17:57:00 +08:00
|
|
|
case nir_intrinsic_load_provoking_vtx_in_prim_amd:
|
2022-07-22 15:25:45 +08:00
|
|
|
case nir_intrinsic_load_lds_ngg_gs_out_vertex_base_amd:
|
2022-11-23 10:56:49 +02:00
|
|
|
case nir_intrinsic_load_btd_shader_type_intel:
|
2024-03-25 14:09:18 +01:00
|
|
|
case nir_intrinsic_load_base_global_invocation_id:
|
2022-11-29 13:52:43 -06:00
|
|
|
case nir_intrinsic_load_base_workgroup_id:
|
2023-02-24 14:39:17 +08:00
|
|
|
case nir_intrinsic_load_alpha_reference_amd:
|
2023-06-06 18:02:42 +03:00
|
|
|
case nir_intrinsic_load_ubo_uniform_block_intel:
|
2023-03-07 12:11:25 +02:00
|
|
|
case nir_intrinsic_load_ssbo_uniform_block_intel:
|
|
|
|
|
case nir_intrinsic_load_shared_uniform_block_intel:
|
2023-03-02 20:04:52 +08:00
|
|
|
case nir_intrinsic_load_barycentric_optimize_amd:
|
2023-02-28 09:03:52 +01:00
|
|
|
case nir_intrinsic_load_poly_line_smooth_enabled:
|
2023-05-31 16:26:57 +02:00
|
|
|
case nir_intrinsic_load_rasterization_primitive_amd:
|
2024-03-20 12:30:45 +00:00
|
|
|
case nir_intrinsic_unit_test_uniform_amd:
|
2023-06-09 13:57:43 +03:00
|
|
|
case nir_intrinsic_load_global_constant_uniform_block_intel:
|
2023-11-10 19:14:34 -05:00
|
|
|
case nir_intrinsic_load_debug_log_desc_amd:
|
2024-03-04 05:34:30 -05:00
|
|
|
case nir_intrinsic_load_xfb_state_address_gfx12_amd:
|
2023-08-08 11:02:33 -07:00
|
|
|
case nir_intrinsic_cmat_length:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_load_vs_primitive_stride_ir3:
|
|
|
|
|
case nir_intrinsic_load_vs_vertex_stride_ir3:
|
|
|
|
|
case nir_intrinsic_load_hs_patch_stride_ir3:
|
|
|
|
|
case nir_intrinsic_load_tess_factor_base_ir3:
|
|
|
|
|
case nir_intrinsic_load_tess_param_base_ir3:
|
|
|
|
|
case nir_intrinsic_load_primitive_location_ir3:
|
|
|
|
|
case nir_intrinsic_preamble_start_ir3:
|
2024-04-05 21:51:04 -04:00
|
|
|
case nir_intrinsic_optimization_barrier_sgpr_amd:
|
2024-12-31 09:39:44 -05:00
|
|
|
case nir_intrinsic_load_fbfetch_image_fmask_desc_amd:
|
|
|
|
|
case nir_intrinsic_load_fbfetch_image_desc_amd:
|
2024-12-26 07:01:44 -05:00
|
|
|
case nir_intrinsic_load_polygon_stipple_buffer_amd:
|
2025-06-04 14:08:07 -04:00
|
|
|
case nir_intrinsic_load_tcs_mem_attrib_stride:
|
2024-04-19 10:48:52 +03:00
|
|
|
case nir_intrinsic_load_printf_buffer_address:
|
2025-01-16 09:24:54 -05:00
|
|
|
case nir_intrinsic_load_printf_buffer_size:
|
2024-08-01 22:06:17 -04:00
|
|
|
case nir_intrinsic_load_core_id_agx:
|
|
|
|
|
case nir_intrinsic_load_samples_log2_agx:
|
2024-08-01 22:45:59 -04:00
|
|
|
case nir_intrinsic_load_active_subgroup_count_agx:
|
2025-05-30 12:31:43 -04:00
|
|
|
case nir_intrinsic_load_root_agx:
|
2025-07-08 16:09:37 -04:00
|
|
|
case nir_intrinsic_load_sm_count_nv:
|
|
|
|
|
case nir_intrinsic_load_warps_per_sm_nv:
|
2024-05-12 14:39:14 +03:00
|
|
|
case nir_intrinsic_load_fs_msaa_intel:
|
2024-07-19 19:44:44 +03:00
|
|
|
case nir_intrinsic_load_constant_base_ptr:
|
2025-01-09 22:06:30 +01:00
|
|
|
case nir_intrinsic_load_const_buf_base_addr_lvp:
|
2025-04-29 12:50:42 +03:00
|
|
|
case nir_intrinsic_load_max_polygon_intel:
|
2025-05-30 00:35:28 +03:00
|
|
|
case nir_intrinsic_load_ray_base_mem_addr_intel:
|
|
|
|
|
case nir_intrinsic_load_ray_hw_stack_size_intel:
|
2025-05-19 17:05:15 +03:00
|
|
|
case nir_intrinsic_load_per_primitive_remap_intel:
|
2019-05-20 14:58:23 +02:00
|
|
|
is_divergent = false;
|
|
|
|
|
break;
|
|
|
|
|
|
2024-01-11 13:14:47 -08:00
|
|
|
/* This is divergent because it specifically loads sequential values into
|
|
|
|
|
* successive SIMD lanes.
|
|
|
|
|
*/
|
|
|
|
|
case nir_intrinsic_load_global_block_intel:
|
|
|
|
|
is_divergent = true;
|
|
|
|
|
break;
|
|
|
|
|
|
2023-11-30 13:30:53 -08:00
|
|
|
case nir_intrinsic_decl_reg:
|
|
|
|
|
is_divergent = nir_intrinsic_divergent(instr);
|
2023-07-19 11:20:16 -05:00
|
|
|
break;
|
|
|
|
|
|
2019-05-20 14:58:23 +02:00
|
|
|
/* Intrinsics with divergence depending on shader stage and hardware */
|
2022-12-22 01:32:44 +02:00
|
|
|
case nir_intrinsic_load_shader_record_ptr:
|
|
|
|
|
is_divergent = !(options & nir_divergence_shader_record_ptr_uniform);
|
|
|
|
|
break;
|
2020-10-20 10:41:00 +03:00
|
|
|
case nir_intrinsic_load_frag_shading_rate:
|
|
|
|
|
is_divergent = !(options & nir_divergence_single_frag_shading_rate_per_subgroup);
|
|
|
|
|
break;
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_input:
|
2024-07-06 04:24:31 -04:00
|
|
|
case nir_intrinsic_load_per_primitive_input:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[0], state);
|
2023-10-01 00:21:16 -04:00
|
|
|
|
2024-02-08 11:32:55 +01:00
|
|
|
if (stage == MESA_SHADER_FRAGMENT) {
|
2019-05-20 14:58:23 +02:00
|
|
|
is_divergent |= !(options & nir_divergence_single_prim_per_subgroup);
|
2024-02-08 11:32:55 +01:00
|
|
|
} else if (stage == MESA_SHADER_TESS_EVAL) {
|
|
|
|
|
/* Patch input loads are uniform between vertices of the same primitive. */
|
2024-02-17 08:48:31 -05:00
|
|
|
if (state->vertex_divergence)
|
2024-02-08 11:32:55 +01:00
|
|
|
is_divergent = false;
|
|
|
|
|
else
|
|
|
|
|
is_divergent |= !(options & nir_divergence_single_patch_per_tes_subgroup);
|
|
|
|
|
} else {
|
2019-05-20 14:58:23 +02:00
|
|
|
is_divergent = true;
|
2024-02-08 11:32:55 +01:00
|
|
|
}
|
2019-05-20 14:58:23 +02:00
|
|
|
break;
|
2024-12-12 21:22:11 +00:00
|
|
|
case nir_intrinsic_load_attribute_pan:
|
|
|
|
|
assert(stage == MESA_SHADER_VERTEX);
|
|
|
|
|
is_divergent = src_divergent(instr->src[0], state) ||
|
|
|
|
|
src_divergent(instr->src[1], state) ||
|
|
|
|
|
src_divergent(instr->src[2], state);
|
|
|
|
|
break;
|
2020-05-05 11:03:12 -07:00
|
|
|
case nir_intrinsic_load_per_vertex_input:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[0], state) ||
|
|
|
|
|
src_divergent(instr->src[1], state);
|
2020-05-05 11:03:12 -07:00
|
|
|
if (stage == MESA_SHADER_TESS_CTRL)
|
|
|
|
|
is_divergent |= !(options & nir_divergence_single_patch_per_tcs_subgroup);
|
|
|
|
|
if (stage == MESA_SHADER_TESS_EVAL)
|
|
|
|
|
is_divergent |= !(options & nir_divergence_single_patch_per_tes_subgroup);
|
|
|
|
|
else
|
|
|
|
|
is_divergent = true;
|
|
|
|
|
break;
|
2020-01-27 11:34:00 +01:00
|
|
|
case nir_intrinsic_load_input_vertex:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[1], state);
|
2020-01-27 11:34:00 +01:00
|
|
|
assert(stage == MESA_SHADER_FRAGMENT);
|
|
|
|
|
is_divergent |= !(options & nir_divergence_single_prim_per_subgroup);
|
|
|
|
|
break;
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_output:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[0], state);
|
2021-08-26 11:26:04 -07:00
|
|
|
switch (stage) {
|
|
|
|
|
case MESA_SHADER_TESS_CTRL:
|
2019-05-20 14:58:23 +02:00
|
|
|
is_divergent |= !(options & nir_divergence_single_patch_per_tcs_subgroup);
|
2021-08-26 11:26:04 -07:00
|
|
|
break;
|
|
|
|
|
case MESA_SHADER_FRAGMENT:
|
2019-05-20 14:58:23 +02:00
|
|
|
is_divergent = true;
|
2021-08-26 11:26:04 -07:00
|
|
|
break;
|
|
|
|
|
case MESA_SHADER_TASK:
|
|
|
|
|
case MESA_SHADER_MESH:
|
2024-02-08 11:29:11 +01:00
|
|
|
/* NV_mesh_shader only (EXT_mesh_shader does not allow loading outputs).
|
|
|
|
|
* Divergent if src[0] is, so nothing else to do.
|
|
|
|
|
*/
|
2021-08-26 11:26:04 -07:00
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid stage for load_output");
|
|
|
|
|
}
|
2019-05-20 14:58:23 +02:00
|
|
|
break;
|
2024-10-19 21:03:13 -07:00
|
|
|
case nir_intrinsic_load_per_view_output:
|
|
|
|
|
is_divergent = instr->src[0].ssa->divergent ||
|
|
|
|
|
instr->src[1].ssa->divergent ||
|
|
|
|
|
(stage == MESA_SHADER_TESS_CTRL &&
|
|
|
|
|
!(options & nir_divergence_single_patch_per_tcs_subgroup));
|
|
|
|
|
break;
|
2020-05-05 11:03:12 -07:00
|
|
|
case nir_intrinsic_load_per_vertex_output:
|
2024-02-08 11:29:11 +01:00
|
|
|
/* TCS and NV_mesh_shader only (EXT_mesh_shader does not allow loading outputs). */
|
2021-08-26 11:26:04 -07:00
|
|
|
assert(stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_MESH);
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[0], state) ||
|
|
|
|
|
src_divergent(instr->src[1], state) ||
|
2021-08-26 11:26:04 -07:00
|
|
|
(stage == MESA_SHADER_TESS_CTRL &&
|
|
|
|
|
!(options & nir_divergence_single_patch_per_tcs_subgroup));
|
|
|
|
|
break;
|
|
|
|
|
case nir_intrinsic_load_per_primitive_output:
|
2024-02-08 11:29:11 +01:00
|
|
|
/* NV_mesh_shader only (EXT_mesh_shader does not allow loading outputs). */
|
2021-08-26 11:26:04 -07:00
|
|
|
assert(stage == MESA_SHADER_MESH);
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[0], state) ||
|
|
|
|
|
src_divergent(instr->src[1], state);
|
2020-05-05 11:03:12 -07:00
|
|
|
break;
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_layer_id:
|
|
|
|
|
case nir_intrinsic_load_front_face:
|
2024-12-27 18:49:22 +01:00
|
|
|
case nir_intrinsic_load_front_face_fsign:
|
2024-08-01 22:06:17 -04:00
|
|
|
case nir_intrinsic_load_back_face_agx:
|
|
|
|
|
assert(stage == MESA_SHADER_FRAGMENT || state->shader->info.internal);
|
2019-05-20 14:58:23 +02:00
|
|
|
is_divergent = !(options & nir_divergence_single_prim_per_subgroup);
|
|
|
|
|
break;
|
|
|
|
|
case nir_intrinsic_load_view_index:
|
|
|
|
|
assert(stage != MESA_SHADER_COMPUTE && stage != MESA_SHADER_KERNEL);
|
|
|
|
|
if (options & nir_divergence_view_index_uniform)
|
|
|
|
|
is_divergent = false;
|
|
|
|
|
else if (stage == MESA_SHADER_FRAGMENT)
|
|
|
|
|
is_divergent = !(options & nir_divergence_single_prim_per_subgroup);
|
2023-03-17 13:08:05 +01:00
|
|
|
else
|
|
|
|
|
is_divergent = true;
|
2019-05-20 14:58:23 +02:00
|
|
|
break;
|
|
|
|
|
case nir_intrinsic_load_fs_input_interp_deltas:
|
|
|
|
|
assert(stage == MESA_SHADER_FRAGMENT);
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[0], state);
|
2019-05-20 14:58:23 +02:00
|
|
|
is_divergent |= !(options & nir_divergence_single_prim_per_subgroup);
|
|
|
|
|
break;
|
2024-03-07 09:19:08 -05:00
|
|
|
case nir_intrinsic_load_instance_id:
|
|
|
|
|
is_divergent = !state->vertex_divergence;
|
|
|
|
|
break;
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_primitive_id:
|
|
|
|
|
if (stage == MESA_SHADER_FRAGMENT)
|
|
|
|
|
is_divergent = !(options & nir_divergence_single_prim_per_subgroup);
|
|
|
|
|
else if (stage == MESA_SHADER_TESS_CTRL)
|
2024-03-07 08:52:39 -05:00
|
|
|
is_divergent = !state->vertex_divergence &&
|
|
|
|
|
!(options & nir_divergence_single_patch_per_tcs_subgroup);
|
2019-05-20 14:58:23 +02:00
|
|
|
else if (stage == MESA_SHADER_TESS_EVAL)
|
2024-03-07 08:52:39 -05:00
|
|
|
is_divergent = !state->vertex_divergence &&
|
|
|
|
|
!(options & nir_divergence_single_patch_per_tes_subgroup);
|
2021-05-10 22:37:46 +02:00
|
|
|
else if (stage == MESA_SHADER_GEOMETRY || stage == MESA_SHADER_VERTEX)
|
2024-03-07 08:52:39 -05:00
|
|
|
is_divergent = !state->vertex_divergence;
|
2022-10-26 13:54:26 +03:00
|
|
|
else if (stage == MESA_SHADER_ANY_HIT ||
|
|
|
|
|
stage == MESA_SHADER_CLOSEST_HIT ||
|
|
|
|
|
stage == MESA_SHADER_INTERSECTION)
|
|
|
|
|
is_divergent = true;
|
2019-05-20 14:58:23 +02:00
|
|
|
else
|
|
|
|
|
unreachable("Invalid stage for load_primitive_id");
|
|
|
|
|
break;
|
|
|
|
|
case nir_intrinsic_load_tess_level_inner:
|
|
|
|
|
case nir_intrinsic_load_tess_level_outer:
|
|
|
|
|
if (stage == MESA_SHADER_TESS_CTRL)
|
|
|
|
|
is_divergent = !(options & nir_divergence_single_patch_per_tcs_subgroup);
|
|
|
|
|
else if (stage == MESA_SHADER_TESS_EVAL)
|
|
|
|
|
is_divergent = !(options & nir_divergence_single_patch_per_tes_subgroup);
|
|
|
|
|
else
|
|
|
|
|
unreachable("Invalid stage for load_primitive_tess_level_*");
|
|
|
|
|
break;
|
|
|
|
|
|
2022-02-24 10:27:30 +01:00
|
|
|
case nir_intrinsic_load_workgroup_index:
|
2021-06-04 12:04:15 -07:00
|
|
|
case nir_intrinsic_load_workgroup_id:
|
2024-08-01 22:06:17 -04:00
|
|
|
assert(gl_shader_stage_uses_workgroup(stage) || stage == MESA_SHADER_TESS_CTRL);
|
2021-08-26 11:26:04 -07:00
|
|
|
if (stage == MESA_SHADER_COMPUTE)
|
|
|
|
|
is_divergent |= (options & nir_divergence_multiple_workgroup_per_compute_subgroup);
|
2021-04-29 11:10:32 +02:00
|
|
|
break;
|
|
|
|
|
|
2019-05-20 14:58:23 +02:00
|
|
|
/* Clustered reductions are uniform if cluster_size == subgroup_size or
|
|
|
|
|
* the source is uniform and the operation is invariant.
|
|
|
|
|
* Inclusive scans are uniform if
|
|
|
|
|
* the source is uniform and the operation is invariant
|
|
|
|
|
*/
|
|
|
|
|
case nir_intrinsic_reduce:
|
2024-02-08 11:14:21 +01:00
|
|
|
if (nir_intrinsic_cluster_size(instr) == 0) {
|
|
|
|
|
/* Cluster size of 0 means the subgroup size.
|
|
|
|
|
* This is uniform within a subgroup, but divergent between
|
|
|
|
|
* vertices of the same primitive because they may be in
|
|
|
|
|
* different subgroups.
|
|
|
|
|
*/
|
2024-02-17 08:48:31 -05:00
|
|
|
is_divergent = state->vertex_divergence;
|
2024-02-08 11:14:21 +01:00
|
|
|
break;
|
|
|
|
|
}
|
2020-11-24 11:02:00 +01:00
|
|
|
FALLTHROUGH;
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_inclusive_scan:
|
|
|
|
|
case nir_intrinsic_inclusive_scan_clusters_ir3: {
|
2019-05-20 14:58:23 +02:00
|
|
|
nir_op op = nir_intrinsic_reduction_op(instr);
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[0], state) ||
|
|
|
|
|
state->vertex_divergence;
|
2019-05-20 14:58:23 +02:00
|
|
|
if (op != nir_op_umin && op != nir_op_imin && op != nir_op_fmin &&
|
|
|
|
|
op != nir_op_umax && op != nir_op_imax && op != nir_op_fmax &&
|
|
|
|
|
op != nir_op_iand && op != nir_op_ior)
|
|
|
|
|
is_divergent = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_reduce_clusters_ir3:
|
|
|
|
|
/* This reduces the last invocations in all 8-wide clusters. It should
|
|
|
|
|
* behave the same as reduce with cluster_size == subgroup_size.
|
|
|
|
|
*/
|
|
|
|
|
is_divergent = state->vertex_divergence;
|
|
|
|
|
break;
|
|
|
|
|
|
2020-05-22 13:39:06 +01:00
|
|
|
case nir_intrinsic_load_ubo:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_load_ubo_vec4:
|
2024-05-22 14:40:20 -05:00
|
|
|
case nir_intrinsic_ldc_nv:
|
|
|
|
|
case nir_intrinsic_ldcx_nv:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = (src_divergent(instr->src[0], state) &&
|
|
|
|
|
(nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM)) ||
|
|
|
|
|
src_divergent(instr->src[1], state);
|
2023-03-14 19:08:13 +01:00
|
|
|
break;
|
|
|
|
|
|
2020-05-22 13:39:06 +01:00
|
|
|
case nir_intrinsic_load_ssbo:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_load_ssbo_ir3:
|
2024-03-15 09:54:54 -04:00
|
|
|
case nir_intrinsic_load_uav_ir3:
|
2025-05-29 17:05:10 +03:00
|
|
|
case nir_intrinsic_load_ssbo_intel:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = (src_divergent(instr->src[0], state) &&
|
|
|
|
|
(nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM)) ||
|
|
|
|
|
src_divergent(instr->src[1], state) ||
|
2024-04-09 18:14:12 +01:00
|
|
|
load_may_tear(state, instr);
|
2023-03-14 19:08:13 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case nir_intrinsic_load_shared:
|
|
|
|
|
case nir_intrinsic_load_shared_ir3:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[0], state) ||
|
|
|
|
|
(options & nir_divergence_uniform_load_tears);
|
2020-05-22 13:39:06 +01:00
|
|
|
break;
|
|
|
|
|
|
2023-03-14 19:08:13 +01:00
|
|
|
case nir_intrinsic_load_global:
|
|
|
|
|
case nir_intrinsic_load_global_2x32:
|
|
|
|
|
case nir_intrinsic_load_global_ir3:
|
|
|
|
|
case nir_intrinsic_load_deref: {
|
2024-04-09 18:14:12 +01:00
|
|
|
if (load_may_tear(state, instr)) {
|
2023-03-14 19:08:13 +01:00
|
|
|
is_divergent = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
|
|
|
|
|
for (unsigned i = 0; i < num_srcs; i++) {
|
2024-09-03 16:14:25 +02:00
|
|
|
if (src_divergent(instr->src[i], state)) {
|
2023-03-14 19:08:13 +01:00
|
|
|
is_divergent = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-12 15:03:28 +01:00
|
|
|
case nir_intrinsic_get_ssbo_size:
|
|
|
|
|
case nir_intrinsic_deref_buffer_array_length:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[0], state) &&
|
|
|
|
|
(nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM);
|
2020-10-12 15:03:28 +01:00
|
|
|
break;
|
|
|
|
|
|
2022-07-19 01:23:44 -04:00
|
|
|
case nir_intrinsic_image_samples_identical:
|
|
|
|
|
case nir_intrinsic_image_deref_samples_identical:
|
|
|
|
|
case nir_intrinsic_bindless_image_samples_identical:
|
2022-09-07 17:53:33 +08:00
|
|
|
case nir_intrinsic_image_fragment_mask_load_amd:
|
|
|
|
|
case nir_intrinsic_image_deref_fragment_mask_load_amd:
|
|
|
|
|
case nir_intrinsic_bindless_image_fragment_mask_load_amd:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = (src_divergent(instr->src[0], state) &&
|
|
|
|
|
(nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM)) ||
|
|
|
|
|
src_divergent(instr->src[1], state) ||
|
2024-04-09 18:14:12 +01:00
|
|
|
load_may_tear(state, instr);
|
2022-07-19 01:23:44 -04:00
|
|
|
break;
|
|
|
|
|
|
2023-05-18 23:16:55 -04:00
|
|
|
case nir_intrinsic_image_texel_address:
|
|
|
|
|
case nir_intrinsic_image_deref_texel_address:
|
|
|
|
|
case nir_intrinsic_bindless_image_texel_address:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = (src_divergent(instr->src[0], state) &&
|
|
|
|
|
(nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM)) ||
|
|
|
|
|
src_divergent(instr->src[1], state) ||
|
|
|
|
|
src_divergent(instr->src[2], state);
|
2023-05-18 23:16:55 -04:00
|
|
|
break;
|
|
|
|
|
|
2020-05-22 13:39:06 +01:00
|
|
|
case nir_intrinsic_image_load:
|
|
|
|
|
case nir_intrinsic_image_deref_load:
|
|
|
|
|
case nir_intrinsic_bindless_image_load:
|
2020-11-20 16:14:26 +00:00
|
|
|
case nir_intrinsic_image_sparse_load:
|
|
|
|
|
case nir_intrinsic_image_deref_sparse_load:
|
|
|
|
|
case nir_intrinsic_bindless_image_sparse_load:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = (src_divergent(instr->src[0], state) &&
|
|
|
|
|
(nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM)) ||
|
|
|
|
|
src_divergent(instr->src[1], state) ||
|
|
|
|
|
src_divergent(instr->src[2], state) ||
|
|
|
|
|
src_divergent(instr->src[3], state) ||
|
2024-04-09 18:14:12 +01:00
|
|
|
load_may_tear(state, instr);
|
2020-05-22 13:39:06 +01:00
|
|
|
break;
|
|
|
|
|
|
2025-06-09 23:40:04 -07:00
|
|
|
case nir_intrinsic_load_converted_output_pan:
|
|
|
|
|
case nir_intrinsic_load_readonly_output_pan:
|
|
|
|
|
is_divergent = ((src_divergent(instr->src[0], state) ||
|
|
|
|
|
src_divergent(instr->src[2], state)) &&
|
|
|
|
|
(nir_intrinsic_access(instr) & ACCESS_NON_UNIFORM)) ||
|
|
|
|
|
src_divergent(instr->src[1], state);
|
|
|
|
|
break;
|
|
|
|
|
|
2022-10-21 20:29:31 -04:00
|
|
|
case nir_intrinsic_optimization_barrier_vgpr_amd:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[0], state);
|
2022-10-21 20:29:31 -04:00
|
|
|
break;
|
2020-05-22 13:39:06 +01:00
|
|
|
|
2019-05-20 14:58:23 +02:00
|
|
|
/* Intrinsics with divergence depending on sources */
|
2023-04-10 17:23:30 -05:00
|
|
|
case nir_intrinsic_convert_alu_types:
|
2024-07-24 11:10:30 -04:00
|
|
|
case nir_intrinsic_ddx:
|
|
|
|
|
case nir_intrinsic_ddx_fine:
|
|
|
|
|
case nir_intrinsic_ddx_coarse:
|
|
|
|
|
case nir_intrinsic_ddy:
|
|
|
|
|
case nir_intrinsic_ddy_fine:
|
|
|
|
|
case nir_intrinsic_ddy_coarse:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_ballot_bitfield_extract:
|
|
|
|
|
case nir_intrinsic_ballot_find_lsb:
|
|
|
|
|
case nir_intrinsic_ballot_find_msb:
|
|
|
|
|
case nir_intrinsic_ballot_bit_count_reduce:
|
2022-11-16 23:54:21 -08:00
|
|
|
case nir_intrinsic_rotate:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_shuffle_xor:
|
|
|
|
|
case nir_intrinsic_shuffle_up:
|
|
|
|
|
case nir_intrinsic_shuffle_down:
|
2024-10-16 20:25:32 +02:00
|
|
|
case nir_intrinsic_shuffle_xor_uniform_ir3:
|
|
|
|
|
case nir_intrinsic_shuffle_up_uniform_ir3:
|
|
|
|
|
case nir_intrinsic_shuffle_down_uniform_ir3:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_quad_broadcast:
|
|
|
|
|
case nir_intrinsic_quad_swap_horizontal:
|
|
|
|
|
case nir_intrinsic_quad_swap_vertical:
|
|
|
|
|
case nir_intrinsic_quad_swap_diagonal:
|
2019-02-04 12:47:53 +01:00
|
|
|
case nir_intrinsic_quad_vote_any:
|
|
|
|
|
case nir_intrinsic_quad_vote_all:
|
2021-11-12 10:26:30 +00:00
|
|
|
case nir_intrinsic_load_shared2_amd:
|
2020-08-29 00:59:22 -05:00
|
|
|
case nir_intrinsic_load_global_constant:
|
2021-12-02 14:33:17 +00:00
|
|
|
case nir_intrinsic_load_global_amd:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_uniform:
|
|
|
|
|
case nir_intrinsic_load_constant:
|
|
|
|
|
case nir_intrinsic_load_sample_pos_from_id:
|
|
|
|
|
case nir_intrinsic_load_kernel_input:
|
2022-05-23 16:43:47 +02:00
|
|
|
case nir_intrinsic_load_task_payload:
|
2021-01-25 19:12:18 +01:00
|
|
|
case nir_intrinsic_load_buffer_amd:
|
2023-02-02 10:47:58 +01:00
|
|
|
case nir_intrinsic_load_typed_buffer_amd:
|
2024-08-26 01:46:03 +02:00
|
|
|
case nir_intrinsic_image_levels:
|
|
|
|
|
case nir_intrinsic_image_deref_levels:
|
|
|
|
|
case nir_intrinsic_bindless_image_levels:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_image_samples:
|
|
|
|
|
case nir_intrinsic_image_deref_samples:
|
|
|
|
|
case nir_intrinsic_bindless_image_samples:
|
|
|
|
|
case nir_intrinsic_image_size:
|
|
|
|
|
case nir_intrinsic_image_deref_size:
|
|
|
|
|
case nir_intrinsic_bindless_image_size:
|
2022-07-20 07:55:04 -04:00
|
|
|
case nir_intrinsic_image_descriptor_amd:
|
|
|
|
|
case nir_intrinsic_image_deref_descriptor_amd:
|
|
|
|
|
case nir_intrinsic_bindless_image_descriptor_amd:
|
2023-04-24 12:21:04 +01:00
|
|
|
case nir_intrinsic_strict_wqm_coord_amd:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_copy_deref:
|
|
|
|
|
case nir_intrinsic_vulkan_resource_index:
|
|
|
|
|
case nir_intrinsic_vulkan_resource_reindex:
|
|
|
|
|
case nir_intrinsic_load_vulkan_descriptor:
|
2025-06-09 23:40:04 -07:00
|
|
|
case nir_intrinsic_load_input_attachment_target_pan:
|
|
|
|
|
case nir_intrinsic_load_input_attachment_conv_pan:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_atomic_counter_read:
|
|
|
|
|
case nir_intrinsic_atomic_counter_read_deref:
|
|
|
|
|
case nir_intrinsic_quad_swizzle_amd:
|
2020-11-25 17:07:20 +00:00
|
|
|
case nir_intrinsic_masked_swizzle_amd:
|
2020-12-07 13:36:22 +00:00
|
|
|
case nir_intrinsic_is_sparse_texels_resident:
|
2024-03-27 09:28:26 -04:00
|
|
|
case nir_intrinsic_is_sparse_resident_zink:
|
2021-01-03 20:55:31 -06:00
|
|
|
case nir_intrinsic_sparse_residency_code_and:
|
2021-04-06 11:30:34 +02:00
|
|
|
case nir_intrinsic_bvh64_intersect_ray_amd:
|
2025-03-12 22:43:57 +01:00
|
|
|
case nir_intrinsic_bvh8_intersect_ray_amd:
|
2022-03-16 02:14:52 -07:00
|
|
|
case nir_intrinsic_image_deref_load_param_intel:
|
|
|
|
|
case nir_intrinsic_image_load_raw_intel:
|
2021-04-03 16:05:09 -04:00
|
|
|
case nir_intrinsic_get_ubo_size:
|
2022-10-26 13:54:57 +03:00
|
|
|
case nir_intrinsic_load_ssbo_address:
|
2025-06-24 11:47:51 -04:00
|
|
|
case nir_intrinsic_load_global_bounded:
|
2023-04-21 12:28:21 +03:00
|
|
|
case nir_intrinsic_load_global_constant_bounded:
|
2022-12-22 17:27:58 +02:00
|
|
|
case nir_intrinsic_load_global_constant_offset:
|
2023-11-30 13:30:53 -08:00
|
|
|
case nir_intrinsic_load_reg:
|
2024-08-01 22:06:17 -04:00
|
|
|
case nir_intrinsic_load_constant_agx:
|
2025-05-30 12:31:43 -04:00
|
|
|
case nir_intrinsic_load_texture_handle_agx:
|
2025-07-02 17:35:01 -04:00
|
|
|
case nir_intrinsic_bindless_image_agx:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_load_reg_indirect:
|
2024-08-15 08:46:36 +02:00
|
|
|
case nir_intrinsic_load_const_ir3:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_load_frag_size_ir3:
|
|
|
|
|
case nir_intrinsic_load_frag_offset_ir3:
|
2024-03-15 10:41:49 -04:00
|
|
|
case nir_intrinsic_bindless_resource_ir3:
|
2025-04-29 12:50:42 +03:00
|
|
|
case nir_intrinsic_ray_intersection_ir3:
|
|
|
|
|
case nir_intrinsic_read_attribute_payload_intel: {
|
2019-05-20 14:58:23 +02:00
|
|
|
unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
|
|
|
|
|
for (unsigned i = 0; i < num_srcs; i++) {
|
2024-09-03 16:14:25 +02:00
|
|
|
if (src_divergent(instr->src[i], state)) {
|
2019-05-20 14:58:23 +02:00
|
|
|
is_divergent = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-08 09:00:17 -07:00
|
|
|
case nir_intrinsic_resource_intel:
|
|
|
|
|
/* Not having the non_uniform flag with divergent sources is undefined
|
|
|
|
|
* behavior. The Intel driver defines it pick the lowest numbered live
|
|
|
|
|
* SIMD lane (via emit_uniformize).
|
|
|
|
|
*/
|
|
|
|
|
if ((nir_intrinsic_resource_access_intel(instr) &
|
|
|
|
|
nir_resource_intel_non_uniform) != 0) {
|
|
|
|
|
unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
|
|
|
|
|
for (unsigned i = 0; i < num_srcs; i++) {
|
2024-09-03 16:14:25 +02:00
|
|
|
if (src_divergent(instr->src[i], state)) {
|
2024-07-08 09:00:17 -07:00
|
|
|
is_divergent = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2019-11-12 15:29:45 +00:00
|
|
|
case nir_intrinsic_shuffle:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(instr->src[0], state) &&
|
|
|
|
|
src_divergent(instr->src[1], state);
|
2019-11-12 15:29:45 +00:00
|
|
|
break;
|
|
|
|
|
|
2024-10-19 12:31:21 +02:00
|
|
|
case nir_intrinsic_load_param:
|
|
|
|
|
is_divergent =
|
|
|
|
|
!state->impl->function->params[nir_intrinsic_param_idx(instr)].is_uniform;
|
|
|
|
|
break;
|
|
|
|
|
|
2019-05-20 14:58:23 +02:00
|
|
|
/* Intrinsics which are always divergent */
|
nir/spirv: Add inverse_ballot intrinsic
This is actually a no-op on AMD, so we really don't want to lower it to
something more complicated. There may be a more efficient way to do
this on Intel too. In addition, in the future we'll want to use this for
lowering boolean reduce operations, where the inverse ballot will
operate on the backend's "natural" ballot type as indicated by
options->ballot_bit_size, instead of uvec4 as produced by SPIR-V. In
total, there are now three possible lowerings we may have to perform:
- inverse_ballot with source type of uvec4 from SPIR-V to inverse_ballot
with natural source type, when the backend supports inverse_ballot
natively.
- inverse_ballot with source type of uvec4 from SPIR-V to arithmetic,
when the backend doesn't support inverse_ballot.
- inverse_ballot with natural source type from reduce operation, when
the backend doesn't support inverse_ballot.
Previously we just did the second lowering unconditionally in vtn, but
it's just a combination of the first and third. We add support here for
the first and third lowerings in nir_lower_subgroups, instead of simply
moving the second lowering, to avoid unnecessary churn.
Reviewed-by: Ian Romanick <ian.d.romanick@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/25123>
2019-02-01 11:37:50 +01:00
|
|
|
case nir_intrinsic_inverse_ballot:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_color0:
|
|
|
|
|
case nir_intrinsic_load_color1:
|
|
|
|
|
case nir_intrinsic_load_sample_id:
|
|
|
|
|
case nir_intrinsic_load_sample_id_no_per_sample:
|
|
|
|
|
case nir_intrinsic_load_sample_mask_in:
|
|
|
|
|
case nir_intrinsic_load_interpolated_input:
|
2022-02-22 21:35:58 +05:30
|
|
|
case nir_intrinsic_load_point_coord_maybe_flipped:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_barycentric_pixel:
|
|
|
|
|
case nir_intrinsic_load_barycentric_centroid:
|
|
|
|
|
case nir_intrinsic_load_barycentric_sample:
|
2020-01-27 11:34:00 +01:00
|
|
|
case nir_intrinsic_load_barycentric_model:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_barycentric_at_sample:
|
|
|
|
|
case nir_intrinsic_load_barycentric_at_offset:
|
2023-09-15 17:22:52 -05:00
|
|
|
case nir_intrinsic_load_barycentric_at_offset_nv:
|
2022-05-12 15:50:04 +02:00
|
|
|
case nir_intrinsic_load_barycentric_coord_pixel:
|
|
|
|
|
case nir_intrinsic_load_barycentric_coord_centroid:
|
|
|
|
|
case nir_intrinsic_load_barycentric_coord_sample:
|
|
|
|
|
case nir_intrinsic_load_barycentric_coord_at_sample:
|
|
|
|
|
case nir_intrinsic_load_barycentric_coord_at_offset:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_load_persp_center_rhw_ir3:
|
2025-06-16 09:26:03 -04:00
|
|
|
case nir_intrinsic_load_input_attachment_coord:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_interp_deref_at_offset:
|
|
|
|
|
case nir_intrinsic_interp_deref_at_sample:
|
|
|
|
|
case nir_intrinsic_interp_deref_at_centroid:
|
2020-01-24 16:01:04 +01:00
|
|
|
case nir_intrinsic_interp_deref_at_vertex:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_tess_coord:
|
2023-07-14 10:26:47 -04:00
|
|
|
case nir_intrinsic_load_tess_coord_xy:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_point_coord:
|
2020-07-03 12:57:36 +02:00
|
|
|
case nir_intrinsic_load_line_coord:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_frag_coord:
|
2025-02-20 20:31:18 -08:00
|
|
|
case nir_intrinsic_load_frag_coord_z:
|
|
|
|
|
case nir_intrinsic_load_frag_coord_w:
|
2024-12-11 19:07:10 -08:00
|
|
|
case nir_intrinsic_load_frag_coord_zw_pan:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_load_frag_coord_unscaled_ir3:
|
2023-06-14 18:26:28 -04:00
|
|
|
case nir_intrinsic_load_pixel_coord:
|
2023-02-22 16:31:18 +01:00
|
|
|
case nir_intrinsic_load_fully_covered:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_sample_pos:
|
2021-12-02 14:11:21 -06:00
|
|
|
case nir_intrinsic_load_sample_pos_or_center:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_vertex_id_zero_base:
|
|
|
|
|
case nir_intrinsic_load_vertex_id:
|
|
|
|
|
case nir_intrinsic_load_invocation_id:
|
|
|
|
|
case nir_intrinsic_load_local_invocation_id:
|
|
|
|
|
case nir_intrinsic_load_local_invocation_index:
|
|
|
|
|
case nir_intrinsic_load_global_invocation_id:
|
|
|
|
|
case nir_intrinsic_load_global_invocation_index:
|
|
|
|
|
case nir_intrinsic_load_subgroup_invocation:
|
2024-03-04 14:55:14 +01:00
|
|
|
case nir_intrinsic_load_subgroup_eq_mask:
|
|
|
|
|
case nir_intrinsic_load_subgroup_ge_mask:
|
|
|
|
|
case nir_intrinsic_load_subgroup_gt_mask:
|
|
|
|
|
case nir_intrinsic_load_subgroup_le_mask:
|
|
|
|
|
case nir_intrinsic_load_subgroup_lt_mask:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_load_helper_invocation:
|
|
|
|
|
case nir_intrinsic_is_helper_invocation:
|
|
|
|
|
case nir_intrinsic_load_scratch:
|
nir: Add unified atomics
Currently, we have an atomic intrinsic for each combination of memory type
(global, shared, image, etc) and atomic operation (add, sub, etc). So for m
types of memory supported by the driver and n atomic opcodes, the driver has to
handle O(mn) intrinsics. This makes a total mess in every single backend I've
looked at, without fail.
It would be a lot nicer to unify the intrinsics. There are two obvious ways:
1. Make the memory type a constant index, keep different intrinsics for
different operations. The problem with this is that different memory types
imply different intrinsic signatures (number of sources, etc). As an
example, it doesn't make sense to unify global_atomic_amd with
global_atomic_2x32, as an example. The first takes 3 scalar sources, the
second takes 1 vector and 1 scalar. Also, in any single backend, there are a
lot more operations than there are memory types.
2. Make the opcode a constant index, keep different intrinsics for different
operations. This works well, with one exception: compswap and fcompswap
take an extra argument that other atomics don't, so there's an extra axis of
variation for the intrinsic signatures.
So, the solution is to have 2 intrinsics for each memory type -- for atomics
taking 1 argument and atomics taking 2 respectively. Both of these intrinsics
take an nir_atomic_op enum to describe its operation. We don't use a nir_op for
this purpose, as there are some atomics (cmpxchg, inc_wrap, etc) that don't
cleanly map to any ALU op and it would be weird to force it.
The plan is to transition to these new opcodes gradually. This series adds a
lowering pass producing these opcodes from the existing opcodes, so that
backends can opt-in to the new forms one-by-one. Then we can convert backends
separately without any cross-tree flag day. Once everything is converted, we can
convert the producers and core NIR as a flag day, but we have far fewer
producers than backends so this should be fine. Finally we can drop the old
stuff.
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Jesse Natalie <jenatali@microsoft.com>
Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Reviewed-by: Rob Clark <robclark@freedesktop.org>
Reviewed-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22914>
2023-05-08 15:29:31 -04:00
|
|
|
case nir_intrinsic_deref_atomic:
|
|
|
|
|
case nir_intrinsic_deref_atomic_swap:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic_swap:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_ssbo_atomic_ir3:
|
|
|
|
|
case nir_intrinsic_ssbo_atomic_swap_ir3:
|
nir: Add unified atomics
Currently, we have an atomic intrinsic for each combination of memory type
(global, shared, image, etc) and atomic operation (add, sub, etc). So for m
types of memory supported by the driver and n atomic opcodes, the driver has to
handle O(mn) intrinsics. This makes a total mess in every single backend I've
looked at, without fail.
It would be a lot nicer to unify the intrinsics. There are two obvious ways:
1. Make the memory type a constant index, keep different intrinsics for
different operations. The problem with this is that different memory types
imply different intrinsic signatures (number of sources, etc). As an
example, it doesn't make sense to unify global_atomic_amd with
global_atomic_2x32, as an example. The first takes 3 scalar sources, the
second takes 1 vector and 1 scalar. Also, in any single backend, there are a
lot more operations than there are memory types.
2. Make the opcode a constant index, keep different intrinsics for different
operations. This works well, with one exception: compswap and fcompswap
take an extra argument that other atomics don't, so there's an extra axis of
variation for the intrinsic signatures.
So, the solution is to have 2 intrinsics for each memory type -- for atomics
taking 1 argument and atomics taking 2 respectively. Both of these intrinsics
take an nir_atomic_op enum to describe its operation. We don't use a nir_op for
this purpose, as there are some atomics (cmpxchg, inc_wrap, etc) that don't
cleanly map to any ALU op and it would be weird to force it.
The plan is to transition to these new opcodes gradually. This series adds a
lowering pass producing these opcodes from the existing opcodes, so that
backends can opt-in to the new forms one-by-one. Then we can convert backends
separately without any cross-tree flag day. Once everything is converted, we can
convert the producers and core NIR as a flag day, but we have far fewer
producers than backends so this should be fine. Finally we can drop the old
stuff.
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Jesse Natalie <jenatali@microsoft.com>
Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Reviewed-by: Rob Clark <robclark@freedesktop.org>
Reviewed-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22914>
2023-05-08 15:29:31 -04:00
|
|
|
case nir_intrinsic_image_deref_atomic:
|
|
|
|
|
case nir_intrinsic_image_deref_atomic_swap:
|
|
|
|
|
case nir_intrinsic_image_atomic:
|
|
|
|
|
case nir_intrinsic_image_atomic_swap:
|
|
|
|
|
case nir_intrinsic_bindless_image_atomic:
|
|
|
|
|
case nir_intrinsic_bindless_image_atomic_swap:
|
|
|
|
|
case nir_intrinsic_shared_atomic:
|
|
|
|
|
case nir_intrinsic_shared_atomic_swap:
|
|
|
|
|
case nir_intrinsic_task_payload_atomic:
|
|
|
|
|
case nir_intrinsic_task_payload_atomic_swap:
|
|
|
|
|
case nir_intrinsic_global_atomic:
|
|
|
|
|
case nir_intrinsic_global_atomic_swap:
|
2025-04-14 22:53:46 +01:00
|
|
|
case nir_intrinsic_alpha_to_coverage:
|
nir: Add unified atomics
Currently, we have an atomic intrinsic for each combination of memory type
(global, shared, image, etc) and atomic operation (add, sub, etc). So for m
types of memory supported by the driver and n atomic opcodes, the driver has to
handle O(mn) intrinsics. This makes a total mess in every single backend I've
looked at, without fail.
It would be a lot nicer to unify the intrinsics. There are two obvious ways:
1. Make the memory type a constant index, keep different intrinsics for
different operations. The problem with this is that different memory types
imply different intrinsic signatures (number of sources, etc). As an
example, it doesn't make sense to unify global_atomic_amd with
global_atomic_2x32, as an example. The first takes 3 scalar sources, the
second takes 1 vector and 1 scalar. Also, in any single backend, there are a
lot more operations than there are memory types.
2. Make the opcode a constant index, keep different intrinsics for different
operations. This works well, with one exception: compswap and fcompswap
take an extra argument that other atomics don't, so there's an extra axis of
variation for the intrinsic signatures.
So, the solution is to have 2 intrinsics for each memory type -- for atomics
taking 1 argument and atomics taking 2 respectively. Both of these intrinsics
take an nir_atomic_op enum to describe its operation. We don't use a nir_op for
this purpose, as there are some atomics (cmpxchg, inc_wrap, etc) that don't
cleanly map to any ALU op and it would be weird to force it.
The plan is to transition to these new opcodes gradually. This series adds a
lowering pass producing these opcodes from the existing opcodes, so that
backends can opt-in to the new forms one-by-one. Then we can convert backends
separately without any cross-tree flag day. Once everything is converted, we can
convert the producers and core NIR as a flag day, but we have far fewer
producers than backends so this should be fine. Finally we can drop the old
stuff.
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Jesse Natalie <jenatali@microsoft.com>
Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Reviewed-by: Rob Clark <robclark@freedesktop.org>
Reviewed-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22914>
2023-05-08 15:29:31 -04:00
|
|
|
case nir_intrinsic_global_atomic_amd:
|
2024-10-28 12:23:25 -04:00
|
|
|
case nir_intrinsic_global_atomic_agx:
|
nir: Add unified atomics
Currently, we have an atomic intrinsic for each combination of memory type
(global, shared, image, etc) and atomic operation (add, sub, etc). So for m
types of memory supported by the driver and n atomic opcodes, the driver has to
handle O(mn) intrinsics. This makes a total mess in every single backend I've
looked at, without fail.
It would be a lot nicer to unify the intrinsics. There are two obvious ways:
1. Make the memory type a constant index, keep different intrinsics for
different operations. The problem with this is that different memory types
imply different intrinsic signatures (number of sources, etc). As an
example, it doesn't make sense to unify global_atomic_amd with
global_atomic_2x32, as an example. The first takes 3 scalar sources, the
second takes 1 vector and 1 scalar. Also, in any single backend, there are a
lot more operations than there are memory types.
2. Make the opcode a constant index, keep different intrinsics for different
operations. This works well, with one exception: compswap and fcompswap
take an extra argument that other atomics don't, so there's an extra axis of
variation for the intrinsic signatures.
So, the solution is to have 2 intrinsics for each memory type -- for atomics
taking 1 argument and atomics taking 2 respectively. Both of these intrinsics
take an nir_atomic_op enum to describe its operation. We don't use a nir_op for
this purpose, as there are some atomics (cmpxchg, inc_wrap, etc) that don't
cleanly map to any ALU op and it would be weird to force it.
The plan is to transition to these new opcodes gradually. This series adds a
lowering pass producing these opcodes from the existing opcodes, so that
backends can opt-in to the new forms one-by-one. Then we can convert backends
separately without any cross-tree flag day. Once everything is converted, we can
convert the producers and core NIR as a flag day, but we have far fewer
producers than backends so this should be fine. Finally we can drop the old
stuff.
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Jesse Natalie <jenatali@microsoft.com>
Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Reviewed-by: Rob Clark <robclark@freedesktop.org>
Reviewed-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22914>
2023-05-08 15:29:31 -04:00
|
|
|
case nir_intrinsic_global_atomic_swap_amd:
|
2024-10-28 12:23:25 -04:00
|
|
|
case nir_intrinsic_global_atomic_swap_agx:
|
nir: Add unified atomics
Currently, we have an atomic intrinsic for each combination of memory type
(global, shared, image, etc) and atomic operation (add, sub, etc). So for m
types of memory supported by the driver and n atomic opcodes, the driver has to
handle O(mn) intrinsics. This makes a total mess in every single backend I've
looked at, without fail.
It would be a lot nicer to unify the intrinsics. There are two obvious ways:
1. Make the memory type a constant index, keep different intrinsics for
different operations. The problem with this is that different memory types
imply different intrinsic signatures (number of sources, etc). As an
example, it doesn't make sense to unify global_atomic_amd with
global_atomic_2x32, as an example. The first takes 3 scalar sources, the
second takes 1 vector and 1 scalar. Also, in any single backend, there are a
lot more operations than there are memory types.
2. Make the opcode a constant index, keep different intrinsics for different
operations. This works well, with one exception: compswap and fcompswap
take an extra argument that other atomics don't, so there's an extra axis of
variation for the intrinsic signatures.
So, the solution is to have 2 intrinsics for each memory type -- for atomics
taking 1 argument and atomics taking 2 respectively. Both of these intrinsics
take an nir_atomic_op enum to describe its operation. We don't use a nir_op for
this purpose, as there are some atomics (cmpxchg, inc_wrap, etc) that don't
cleanly map to any ALU op and it would be weird to force it.
The plan is to transition to these new opcodes gradually. This series adds a
lowering pass producing these opcodes from the existing opcodes, so that
backends can opt-in to the new forms one-by-one. Then we can convert backends
separately without any cross-tree flag day. Once everything is converted, we can
convert the producers and core NIR as a flag day, but we have far fewer
producers than backends so this should be fine. Finally we can drop the old
stuff.
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Jesse Natalie <jenatali@microsoft.com>
Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Reviewed-by: Rob Clark <robclark@freedesktop.org>
Reviewed-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22914>
2023-05-08 15:29:31 -04:00
|
|
|
case nir_intrinsic_global_atomic_2x32:
|
|
|
|
|
case nir_intrinsic_global_atomic_swap_2x32:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_atomic_counter_add:
|
|
|
|
|
case nir_intrinsic_atomic_counter_min:
|
|
|
|
|
case nir_intrinsic_atomic_counter_max:
|
|
|
|
|
case nir_intrinsic_atomic_counter_and:
|
|
|
|
|
case nir_intrinsic_atomic_counter_or:
|
|
|
|
|
case nir_intrinsic_atomic_counter_xor:
|
|
|
|
|
case nir_intrinsic_atomic_counter_inc:
|
|
|
|
|
case nir_intrinsic_atomic_counter_pre_dec:
|
|
|
|
|
case nir_intrinsic_atomic_counter_post_dec:
|
|
|
|
|
case nir_intrinsic_atomic_counter_exchange:
|
|
|
|
|
case nir_intrinsic_atomic_counter_comp_swap:
|
|
|
|
|
case nir_intrinsic_atomic_counter_add_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_min_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_max_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_and_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_or_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_xor_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_inc_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_pre_dec_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_post_dec_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_exchange_deref:
|
|
|
|
|
case nir_intrinsic_atomic_counter_comp_swap_deref:
|
|
|
|
|
case nir_intrinsic_exclusive_scan:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_exclusive_scan_clusters_ir3:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_intrinsic_ballot_bit_count_exclusive:
|
|
|
|
|
case nir_intrinsic_ballot_bit_count_inclusive:
|
|
|
|
|
case nir_intrinsic_write_invocation_amd:
|
|
|
|
|
case nir_intrinsic_mbcnt_amd:
|
2021-05-28 21:57:19 +02:00
|
|
|
case nir_intrinsic_lane_permute_16_amd:
|
2023-08-12 16:49:00 +02:00
|
|
|
case nir_intrinsic_dpp16_shift_amd:
|
2020-02-06 17:35:25 +01:00
|
|
|
case nir_intrinsic_elect:
|
2024-06-26 07:14:00 -04:00
|
|
|
case nir_intrinsic_elect_any_ir3:
|
2024-05-09 13:03:17 +02:00
|
|
|
case nir_intrinsic_load_tlb_color_brcm:
|
2021-02-15 22:01:02 +01:00
|
|
|
case nir_intrinsic_load_tess_rel_patch_id_amd:
|
2021-02-22 20:16:32 +01:00
|
|
|
case nir_intrinsic_load_gs_vertex_offset_amd:
|
2022-10-18 20:52:53 +01:00
|
|
|
case nir_intrinsic_is_subgroup_invocation_lt_amd:
|
2021-05-04 13:40:28 +02:00
|
|
|
case nir_intrinsic_load_packed_passthrough_primitive_amd:
|
2021-07-15 13:56:18 +02:00
|
|
|
case nir_intrinsic_load_initial_edgeflags_amd:
|
2021-05-04 13:40:28 +02:00
|
|
|
case nir_intrinsic_gds_atomic_add_amd:
|
2021-03-31 02:53:33 +02:00
|
|
|
case nir_intrinsic_load_rt_arg_scratch_offset_amd:
|
|
|
|
|
case nir_intrinsic_load_intersection_opaque_amd:
|
2020-09-21 13:54:54 +01:00
|
|
|
case nir_intrinsic_load_vector_arg_amd:
|
2022-04-21 02:48:58 -07:00
|
|
|
case nir_intrinsic_load_btd_stack_id_intel:
|
|
|
|
|
case nir_intrinsic_load_topology_id_intel:
|
2022-05-02 16:32:07 +03:00
|
|
|
case nir_intrinsic_load_scratch_base_ptr:
|
2023-11-10 19:02:49 -05:00
|
|
|
case nir_intrinsic_ordered_xfb_counter_add_gfx11_amd:
|
2024-06-22 23:07:37 -04:00
|
|
|
case nir_intrinsic_ordered_add_loop_gfx12_amd:
|
2023-11-10 19:02:49 -05:00
|
|
|
case nir_intrinsic_xfb_counter_sub_gfx11_amd:
|
2024-03-20 12:30:45 +00:00
|
|
|
case nir_intrinsic_unit_test_divergent_amd:
|
2022-05-18 18:29:10 +03:00
|
|
|
case nir_intrinsic_load_stack:
|
2022-10-26 13:54:26 +03:00
|
|
|
case nir_intrinsic_load_ray_launch_id:
|
|
|
|
|
case nir_intrinsic_load_ray_instance_custom_index:
|
|
|
|
|
case nir_intrinsic_load_ray_geometry_index:
|
|
|
|
|
case nir_intrinsic_load_ray_world_direction:
|
|
|
|
|
case nir_intrinsic_load_ray_world_origin:
|
|
|
|
|
case nir_intrinsic_load_ray_object_origin:
|
|
|
|
|
case nir_intrinsic_load_ray_object_direction:
|
|
|
|
|
case nir_intrinsic_load_ray_t_min:
|
|
|
|
|
case nir_intrinsic_load_ray_t_max:
|
|
|
|
|
case nir_intrinsic_load_ray_object_to_world:
|
|
|
|
|
case nir_intrinsic_load_ray_world_to_object:
|
|
|
|
|
case nir_intrinsic_load_ray_hit_kind:
|
|
|
|
|
case nir_intrinsic_load_ray_flags:
|
|
|
|
|
case nir_intrinsic_load_cull_mask:
|
2023-09-18 21:14:35 -05:00
|
|
|
case nir_intrinsic_load_sysval_nv:
|
2023-09-22 18:30:51 +02:00
|
|
|
case nir_intrinsic_emit_vertex_nv:
|
|
|
|
|
case nir_intrinsic_end_primitive_nv:
|
2023-01-18 10:37:45 +02:00
|
|
|
case nir_intrinsic_report_ray_intersection:
|
|
|
|
|
case nir_intrinsic_rq_proceed:
|
|
|
|
|
case nir_intrinsic_rq_load:
|
2022-12-01 17:09:22 +02:00
|
|
|
case nir_intrinsic_load_ray_triangle_vertex_positions:
|
2023-08-08 11:02:33 -07:00
|
|
|
case nir_intrinsic_cmat_extract:
|
2023-07-15 19:24:07 +02:00
|
|
|
case nir_intrinsic_cmat_muladd_amd:
|
2023-10-09 13:54:38 -07:00
|
|
|
case nir_intrinsic_dpas_intel:
|
2025-06-09 16:04:47 -04:00
|
|
|
case nir_intrinsic_convert_cmat_intel:
|
2023-09-30 02:16:51 -05:00
|
|
|
case nir_intrinsic_isberd_nv:
|
2025-05-07 17:31:11 +02:00
|
|
|
case nir_intrinsic_vild_nv:
|
2023-09-30 02:16:51 -05:00
|
|
|
case nir_intrinsic_al2p_nv:
|
|
|
|
|
case nir_intrinsic_ald_nv:
|
2025-04-18 11:27:10 -05:00
|
|
|
case nir_intrinsic_suclamp_nv:
|
|
|
|
|
case nir_intrinsic_subfm_nv:
|
|
|
|
|
case nir_intrinsic_sueau_nv:
|
|
|
|
|
case nir_intrinsic_imadsp_nv:
|
|
|
|
|
case nir_intrinsic_suldga_nv:
|
|
|
|
|
case nir_intrinsic_sustga_nv:
|
2023-11-10 11:51:11 -06:00
|
|
|
case nir_intrinsic_ipa_nv:
|
2023-10-13 12:42:53 +02:00
|
|
|
case nir_intrinsic_ldtram_nv:
|
2023-11-30 19:13:07 +02:00
|
|
|
case nir_intrinsic_printf:
|
2023-02-13 17:33:45 +01:00
|
|
|
case nir_intrinsic_load_gs_header_ir3:
|
|
|
|
|
case nir_intrinsic_load_tcs_header_ir3:
|
|
|
|
|
case nir_intrinsic_load_rel_patch_id_ir3:
|
|
|
|
|
case nir_intrinsic_brcst_active_ir3:
|
2024-08-01 22:06:17 -04:00
|
|
|
case nir_intrinsic_load_helper_op_id_agx:
|
|
|
|
|
case nir_intrinsic_load_helper_arg_lo_agx:
|
|
|
|
|
case nir_intrinsic_load_helper_arg_hi_agx:
|
|
|
|
|
case nir_intrinsic_stack_map_agx:
|
|
|
|
|
case nir_intrinsic_stack_unmap_agx:
|
|
|
|
|
case nir_intrinsic_load_exported_agx:
|
|
|
|
|
case nir_intrinsic_load_local_pixel_agx:
|
|
|
|
|
case nir_intrinsic_load_coefficients_agx:
|
|
|
|
|
case nir_intrinsic_load_active_subgroup_invocation_agx:
|
|
|
|
|
case nir_intrinsic_load_sample_mask:
|
|
|
|
|
case nir_intrinsic_quad_ballot_agx:
|
2024-08-17 16:26:30 -04:00
|
|
|
case nir_intrinsic_load_agx:
|
2025-05-19 20:58:41 +02:00
|
|
|
case nir_intrinsic_load_shared_lock_nv:
|
|
|
|
|
case nir_intrinsic_store_shared_unlock_nv:
|
2019-05-20 14:58:23 +02:00
|
|
|
is_divergent = true;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
#ifdef NDEBUG
|
|
|
|
|
is_divergent = true;
|
|
|
|
|
break;
|
|
|
|
|
#else
|
|
|
|
|
nir_print_instr(&instr->instr, stderr);
|
|
|
|
|
unreachable("\nNIR divergence analysis: Unhandled intrinsic.");
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-14 11:56:00 -05:00
|
|
|
instr->def.divergent = is_divergent;
|
2019-05-20 14:58:23 +02:00
|
|
|
return is_divergent;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2024-02-17 08:48:31 -05:00
|
|
|
visit_tex(nir_tex_instr *instr, struct divergence_state *state)
|
2019-05-20 14:58:23 +02:00
|
|
|
{
|
2023-08-14 11:56:00 -05:00
|
|
|
if (instr->def.divergent)
|
2019-05-20 14:58:23 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
bool is_divergent = false;
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < instr->num_srcs; i++) {
|
|
|
|
|
switch (instr->src[i].src_type) {
|
|
|
|
|
case nir_tex_src_sampler_deref:
|
|
|
|
|
case nir_tex_src_sampler_handle:
|
|
|
|
|
case nir_tex_src_sampler_offset:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent |= src_divergent(instr->src[i].src, state) &&
|
2019-05-20 14:58:23 +02:00
|
|
|
instr->sampler_non_uniform;
|
|
|
|
|
break;
|
|
|
|
|
case nir_tex_src_texture_deref:
|
|
|
|
|
case nir_tex_src_texture_handle:
|
|
|
|
|
case nir_tex_src_texture_offset:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent |= src_divergent(instr->src[i].src, state) &&
|
2019-05-20 14:58:23 +02:00
|
|
|
instr->texture_non_uniform;
|
|
|
|
|
break;
|
2025-01-21 15:52:59 +02:00
|
|
|
case nir_tex_src_offset:
|
|
|
|
|
instr->offset_non_uniform = src_divergent(instr->src[i].src, state);
|
|
|
|
|
is_divergent |= instr->offset_non_uniform;
|
|
|
|
|
break;
|
2019-05-20 14:58:23 +02:00
|
|
|
default:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent |= src_divergent(instr->src[i].src, state);
|
2019-05-20 14:58:23 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-02-12 04:05:34 -06:00
|
|
|
/* If the texture instruction skips helpers, that may add divergence even
|
|
|
|
|
* if none of the sources of the texture op diverge.
|
|
|
|
|
*/
|
|
|
|
|
if (instr->skip_helpers)
|
|
|
|
|
is_divergent = true;
|
|
|
|
|
|
2023-08-14 11:56:00 -05:00
|
|
|
instr->def.divergent = is_divergent;
|
2019-05-20 14:58:23 +02:00
|
|
|
return is_divergent;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2024-02-17 08:48:31 -05:00
|
|
|
visit_def(nir_def *def, struct divergence_state *state)
|
2019-05-20 14:58:23 +02:00
|
|
|
{
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2023-08-08 12:00:35 -05:00
|
|
|
nir_variable_mode_is_uniform(nir_variable_mode mode)
|
|
|
|
|
{
|
2019-05-20 14:58:23 +02:00
|
|
|
switch (mode) {
|
|
|
|
|
case nir_var_uniform:
|
|
|
|
|
case nir_var_mem_ubo:
|
|
|
|
|
case nir_var_mem_ssbo:
|
|
|
|
|
case nir_var_mem_shared:
|
2022-05-23 07:54:58 +02:00
|
|
|
case nir_var_mem_task_payload:
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_var_mem_global:
|
2021-10-15 12:58:22 -05:00
|
|
|
case nir_var_image:
|
2019-05-20 14:58:23 +02:00
|
|
|
return true;
|
|
|
|
|
default:
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2024-03-07 09:51:46 -05:00
|
|
|
nir_variable_is_uniform(nir_shader *shader, nir_variable *var,
|
|
|
|
|
struct divergence_state *state)
|
2019-05-20 14:58:23 +02:00
|
|
|
{
|
|
|
|
|
if (nir_variable_mode_is_uniform(var->data.mode))
|
|
|
|
|
return true;
|
|
|
|
|
|
2024-03-07 09:51:46 -05:00
|
|
|
/* Handle system value variables. */
|
|
|
|
|
if (var->data.mode == nir_var_system_value) {
|
|
|
|
|
/* Fake the instruction to reuse visit_intrinsic for all sysvals. */
|
|
|
|
|
nir_intrinsic_instr fake_instr;
|
|
|
|
|
|
|
|
|
|
memset(&fake_instr, 0, sizeof(fake_instr));
|
|
|
|
|
fake_instr.intrinsic =
|
|
|
|
|
nir_intrinsic_from_system_value(var->data.location);
|
|
|
|
|
|
|
|
|
|
visit_intrinsic(&fake_instr, state);
|
|
|
|
|
return !fake_instr.def.divergent;
|
|
|
|
|
}
|
|
|
|
|
|
2024-04-09 18:14:12 +01:00
|
|
|
nir_divergence_options options = state->options;
|
2020-09-02 11:45:46 +01:00
|
|
|
gl_shader_stage stage = shader->info.stage;
|
|
|
|
|
|
|
|
|
|
if (stage == MESA_SHADER_FRAGMENT &&
|
|
|
|
|
(options & nir_divergence_single_prim_per_subgroup) &&
|
2019-05-20 14:58:23 +02:00
|
|
|
var->data.mode == nir_var_shader_in &&
|
|
|
|
|
var->data.interpolation == INTERP_MODE_FLAT)
|
|
|
|
|
return true;
|
|
|
|
|
|
2020-09-02 11:45:46 +01:00
|
|
|
if (stage == MESA_SHADER_TESS_CTRL &&
|
|
|
|
|
(options & nir_divergence_single_patch_per_tcs_subgroup) &&
|
2019-05-20 14:58:23 +02:00
|
|
|
var->data.mode == nir_var_shader_out && var->data.patch)
|
|
|
|
|
return true;
|
|
|
|
|
|
2020-09-02 11:45:46 +01:00
|
|
|
if (stage == MESA_SHADER_TESS_EVAL &&
|
|
|
|
|
(options & nir_divergence_single_patch_per_tes_subgroup) &&
|
2019-05-20 14:58:23 +02:00
|
|
|
var->data.mode == nir_var_shader_in && var->data.patch)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2024-03-07 09:51:46 -05:00
|
|
|
visit_deref(nir_shader *shader, nir_deref_instr *deref,
|
|
|
|
|
struct divergence_state *state)
|
2019-05-20 14:58:23 +02:00
|
|
|
{
|
2023-08-14 11:56:00 -05:00
|
|
|
if (deref->def.divergent)
|
2019-05-20 14:58:23 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
bool is_divergent = false;
|
|
|
|
|
switch (deref->deref_type) {
|
|
|
|
|
case nir_deref_type_var:
|
2024-03-07 09:51:46 -05:00
|
|
|
is_divergent = !nir_variable_is_uniform(shader, deref->var, state);
|
2019-05-20 14:58:23 +02:00
|
|
|
break;
|
|
|
|
|
case nir_deref_type_array:
|
|
|
|
|
case nir_deref_type_ptr_as_array:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent = src_divergent(deref->arr.index, state);
|
2020-11-24 11:02:00 +01:00
|
|
|
FALLTHROUGH;
|
2019-05-20 14:58:23 +02:00
|
|
|
case nir_deref_type_struct:
|
|
|
|
|
case nir_deref_type_array_wildcard:
|
2024-09-03 16:14:25 +02:00
|
|
|
is_divergent |= src_divergent(deref->parent, state);
|
2019-05-20 14:58:23 +02:00
|
|
|
break;
|
|
|
|
|
case nir_deref_type_cast:
|
|
|
|
|
is_divergent = !nir_variable_mode_is_uniform(deref->var->data.mode) ||
|
2024-09-03 16:14:25 +02:00
|
|
|
src_divergent(deref->parent, state);
|
2019-05-20 14:58:23 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-14 11:56:00 -05:00
|
|
|
deref->def.divergent = is_divergent;
|
2019-05-20 14:58:23 +02:00
|
|
|
return is_divergent;
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-05 18:36:34 +01:00
|
|
|
static bool
|
|
|
|
|
visit_jump(nir_jump_instr *jump, struct divergence_state *state)
|
|
|
|
|
{
|
|
|
|
|
switch (jump->type) {
|
|
|
|
|
case nir_jump_continue:
|
|
|
|
|
if (state->divergent_loop_continue)
|
|
|
|
|
return false;
|
|
|
|
|
if (state->divergent_loop_cf)
|
|
|
|
|
state->divergent_loop_continue = true;
|
|
|
|
|
return state->divergent_loop_continue;
|
|
|
|
|
case nir_jump_break:
|
|
|
|
|
if (state->divergent_loop_break)
|
|
|
|
|
return false;
|
|
|
|
|
if (state->divergent_loop_cf)
|
|
|
|
|
state->divergent_loop_break = true;
|
|
|
|
|
return state->divergent_loop_break;
|
2020-05-15 15:46:08 -05:00
|
|
|
case nir_jump_halt:
|
|
|
|
|
/* This totally kills invocations so it doesn't add divergence */
|
|
|
|
|
break;
|
2020-02-05 18:36:34 +01:00
|
|
|
case nir_jump_return:
|
|
|
|
|
unreachable("NIR divergence analysis: Unsupported return instruction.");
|
2020-07-02 14:32:04 +02:00
|
|
|
break;
|
|
|
|
|
case nir_jump_goto:
|
|
|
|
|
case nir_jump_goto_if:
|
|
|
|
|
unreachable("NIR divergence analysis: Unsupported goto_if instruction.");
|
|
|
|
|
break;
|
2020-02-05 18:36:34 +01:00
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-21 17:07:56 +01:00
|
|
|
static bool
|
2024-09-03 15:59:36 +02:00
|
|
|
set_ssa_def_not_divergent(nir_def *def, void *invariant)
|
2020-04-21 17:07:56 +01:00
|
|
|
{
|
|
|
|
|
def->divergent = false;
|
2024-09-03 15:59:36 +02:00
|
|
|
def->loop_invariant = *(bool *)invariant;
|
2020-04-21 17:07:56 +01:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2024-09-03 15:59:36 +02:00
|
|
|
static bool
|
|
|
|
|
instr_is_loop_invariant(nir_instr *instr, struct divergence_state *state)
|
|
|
|
|
{
|
|
|
|
|
if (!state->loop)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
switch (instr->type) {
|
|
|
|
|
case nir_instr_type_load_const:
|
|
|
|
|
case nir_instr_type_undef:
|
|
|
|
|
case nir_instr_type_jump:
|
|
|
|
|
return true;
|
|
|
|
|
case nir_instr_type_intrinsic:
|
|
|
|
|
if (!nir_intrinsic_can_reorder(nir_instr_as_intrinsic(instr)))
|
|
|
|
|
return false;
|
|
|
|
|
FALLTHROUGH;
|
|
|
|
|
case nir_instr_type_alu:
|
|
|
|
|
case nir_instr_type_deref:
|
|
|
|
|
case nir_instr_type_tex:
|
|
|
|
|
return nir_foreach_src(instr, src_invariant, state->loop);
|
|
|
|
|
case nir_instr_type_call:
|
2024-01-07 22:14:51 +01:00
|
|
|
return false;
|
|
|
|
|
case nir_instr_type_phi:
|
2024-09-03 15:59:36 +02:00
|
|
|
case nir_instr_type_parallel_copy:
|
|
|
|
|
default:
|
|
|
|
|
unreachable("NIR divergence analysis: Unsupported instruction type.");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-02 11:48:20 +01:00
|
|
|
static bool
|
2024-02-17 08:48:31 -05:00
|
|
|
update_instr_divergence(nir_instr *instr, struct divergence_state *state)
|
2020-09-02 11:48:20 +01:00
|
|
|
{
|
|
|
|
|
switch (instr->type) {
|
|
|
|
|
case nir_instr_type_alu:
|
2024-02-17 08:48:31 -05:00
|
|
|
return visit_alu(nir_instr_as_alu(instr), state);
|
2020-09-02 11:48:20 +01:00
|
|
|
case nir_instr_type_intrinsic:
|
2024-02-17 08:48:31 -05:00
|
|
|
return visit_intrinsic(nir_instr_as_intrinsic(instr), state);
|
2020-09-02 11:48:20 +01:00
|
|
|
case nir_instr_type_tex:
|
2024-02-17 08:48:31 -05:00
|
|
|
return visit_tex(nir_instr_as_tex(instr), state);
|
2020-09-02 11:48:20 +01:00
|
|
|
case nir_instr_type_load_const:
|
2024-02-17 08:48:31 -05:00
|
|
|
return visit_def(&nir_instr_as_load_const(instr)->def, state);
|
2023-08-15 09:59:06 -05:00
|
|
|
case nir_instr_type_undef:
|
2024-02-17 08:48:31 -05:00
|
|
|
return visit_def(&nir_instr_as_undef(instr)->def, state);
|
2020-09-02 11:48:20 +01:00
|
|
|
case nir_instr_type_deref:
|
2024-03-07 09:51:46 -05:00
|
|
|
return visit_deref(state->shader, nir_instr_as_deref(instr), state);
|
2024-01-07 22:14:51 +01:00
|
|
|
case nir_instr_type_call:
|
|
|
|
|
return false;
|
2020-09-02 11:48:20 +01:00
|
|
|
case nir_instr_type_jump:
|
|
|
|
|
case nir_instr_type_phi:
|
|
|
|
|
case nir_instr_type_parallel_copy:
|
|
|
|
|
default:
|
|
|
|
|
unreachable("NIR divergence analysis: Unsupported instruction type.");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-20 14:58:23 +02:00
|
|
|
static bool
|
2020-02-05 11:53:04 +01:00
|
|
|
visit_block(nir_block *block, struct divergence_state *state)
|
2019-05-20 14:58:23 +02:00
|
|
|
{
|
|
|
|
|
bool has_changed = false;
|
|
|
|
|
|
|
|
|
|
nir_foreach_instr(instr, block) {
|
2020-04-21 17:07:56 +01:00
|
|
|
/* phis are handled when processing the branches */
|
|
|
|
|
if (instr->type == nir_instr_type_phi)
|
|
|
|
|
continue;
|
|
|
|
|
|
2024-09-03 15:59:36 +02:00
|
|
|
if (state->first_visit) {
|
2025-01-13 15:01:51 +00:00
|
|
|
bool invariant = state->loop_all_invariant || instr_is_loop_invariant(instr, state);
|
2024-09-03 15:59:36 +02:00
|
|
|
nir_foreach_def(instr, set_ssa_def_not_divergent, &invariant);
|
|
|
|
|
}
|
2020-04-21 17:07:56 +01:00
|
|
|
|
2023-10-01 00:21:16 -04:00
|
|
|
if (instr->type == nir_instr_type_jump) {
|
2020-02-05 18:36:34 +01:00
|
|
|
has_changed |= visit_jump(nir_instr_as_jump(instr), state);
|
2023-10-01 00:21:16 -04:00
|
|
|
} else {
|
2024-02-17 08:48:31 -05:00
|
|
|
has_changed |= update_instr_divergence(instr, state);
|
2023-10-01 00:21:16 -04:00
|
|
|
}
|
2019-05-20 14:58:23 +02:00
|
|
|
}
|
|
|
|
|
|
2024-03-09 12:59:21 -05:00
|
|
|
bool divergent = state->divergent_loop_cf ||
|
|
|
|
|
state->divergent_loop_continue ||
|
|
|
|
|
state->divergent_loop_break;
|
|
|
|
|
if (divergent != block->divergent) {
|
|
|
|
|
block->divergent = divergent;
|
|
|
|
|
has_changed = true;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-20 14:58:23 +02:00
|
|
|
return has_changed;
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-05 13:08:27 +01:00
|
|
|
/* There are 3 types of phi instructions:
|
|
|
|
|
* (1) gamma: represent the joining point of different paths
|
|
|
|
|
* created by an “if-then-else” branch.
|
|
|
|
|
* The resulting value is divergent if the branch condition
|
|
|
|
|
* or any of the source values is divergent. */
|
|
|
|
|
static bool
|
2024-04-09 18:14:12 +01:00
|
|
|
visit_if_merge_phi(nir_phi_instr *phi, bool if_cond_divergent, bool ignore_undef)
|
2020-02-05 13:08:27 +01:00
|
|
|
{
|
2023-08-14 11:56:00 -05:00
|
|
|
if (phi->def.divergent)
|
2020-02-05 13:08:27 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
unsigned defined_srcs = 0;
|
|
|
|
|
nir_foreach_phi_src(src, phi) {
|
|
|
|
|
/* if any source value is divergent, the resulting value is divergent */
|
2024-09-03 16:14:25 +02:00
|
|
|
if (nir_src_is_divergent(&src->src)) {
|
2023-08-14 11:56:00 -05:00
|
|
|
phi->def.divergent = true;
|
2020-02-05 13:08:27 +01:00
|
|
|
return true;
|
|
|
|
|
}
|
2023-08-15 09:59:06 -05:00
|
|
|
if (src->src.ssa->parent_instr->type != nir_instr_type_undef) {
|
2020-02-05 13:08:27 +01:00
|
|
|
defined_srcs++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-04-09 18:14:12 +01:00
|
|
|
if (!(ignore_undef && defined_srcs <= 1) && if_cond_divergent) {
|
2023-08-14 11:56:00 -05:00
|
|
|
phi->def.divergent = true;
|
2020-02-05 13:08:27 +01:00
|
|
|
return true;
|
|
|
|
|
}
|
2020-02-05 18:36:34 +01:00
|
|
|
|
2020-02-05 13:08:27 +01:00
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* There are 3 types of phi instructions:
|
|
|
|
|
* (2) mu: which only exist at loop headers,
|
|
|
|
|
* merge initial and loop-carried values.
|
|
|
|
|
* The resulting value is divergent if any source value
|
|
|
|
|
* is divergent or a divergent loop continue condition
|
|
|
|
|
* is associated with a different ssa-def. */
|
|
|
|
|
static bool
|
2020-02-05 18:36:34 +01:00
|
|
|
visit_loop_header_phi(nir_phi_instr *phi, nir_block *preheader, bool divergent_continue)
|
2020-02-05 13:08:27 +01:00
|
|
|
{
|
2023-08-14 11:56:00 -05:00
|
|
|
if (phi->def.divergent)
|
2020-02-05 13:08:27 +01:00
|
|
|
return false;
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *same = NULL;
|
2020-02-05 13:08:27 +01:00
|
|
|
nir_foreach_phi_src(src, phi) {
|
|
|
|
|
/* if any source value is divergent, the resulting value is divergent */
|
2024-09-03 16:14:25 +02:00
|
|
|
if (nir_src_is_divergent(&src->src)) {
|
2023-08-14 11:56:00 -05:00
|
|
|
phi->def.divergent = true;
|
2020-02-05 13:08:27 +01:00
|
|
|
return true;
|
|
|
|
|
}
|
2020-02-05 18:36:34 +01:00
|
|
|
/* if this loop is uniform, we're done here */
|
|
|
|
|
if (!divergent_continue)
|
2020-02-05 13:08:27 +01:00
|
|
|
continue;
|
|
|
|
|
/* skip the loop preheader */
|
2020-02-05 18:36:34 +01:00
|
|
|
if (src->pred == preheader)
|
2020-02-05 13:08:27 +01:00
|
|
|
continue;
|
|
|
|
|
|
2020-02-05 18:36:34 +01:00
|
|
|
/* check if all loop-carried values are from the same ssa-def */
|
|
|
|
|
if (!same)
|
|
|
|
|
same = src->src.ssa;
|
|
|
|
|
else if (same != src->src.ssa) {
|
2023-08-14 11:56:00 -05:00
|
|
|
phi->def.divergent = true;
|
2020-02-05 18:36:34 +01:00
|
|
|
return true;
|
2020-02-05 13:08:27 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* There are 3 types of phi instructions:
|
|
|
|
|
* (3) eta: represent values that leave a loop.
|
|
|
|
|
* The resulting value is divergent if the source value is divergent
|
|
|
|
|
* or any loop exit condition is divergent for a value which is
|
2024-09-04 10:25:27 +02:00
|
|
|
* not loop-invariant (see nir_src_is_divergent()).
|
|
|
|
|
*/
|
2020-02-05 13:08:27 +01:00
|
|
|
static bool
|
2024-09-04 10:25:27 +02:00
|
|
|
visit_loop_exit_phi(nir_phi_instr *phi, nir_loop *loop)
|
2020-02-05 13:08:27 +01:00
|
|
|
{
|
2023-08-14 11:56:00 -05:00
|
|
|
if (phi->def.divergent)
|
2020-02-05 13:08:27 +01:00
|
|
|
return false;
|
|
|
|
|
|
2024-09-04 10:25:27 +02:00
|
|
|
nir_def *same = NULL;
|
2020-02-05 13:08:27 +01:00
|
|
|
nir_foreach_phi_src(src, phi) {
|
2024-09-04 10:25:27 +02:00
|
|
|
/* If any loop exit condition is divergent and this value is not loop
|
|
|
|
|
* invariant, or if the source value is divergent, then the resulting
|
|
|
|
|
* value is divergent.
|
|
|
|
|
*/
|
|
|
|
|
if ((loop->divergent_break && !src_invariant(&src->src, loop)) ||
|
|
|
|
|
nir_src_is_divergent(&src->src)) {
|
|
|
|
|
phi->def.divergent = true;
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* if this loop is uniform, we're done here */
|
|
|
|
|
if (!loop->divergent_break)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* check if all loop-exit values are from the same ssa-def */
|
|
|
|
|
if (!same)
|
|
|
|
|
same = src->src.ssa;
|
|
|
|
|
else if (same != src->src.ssa) {
|
2023-08-14 11:56:00 -05:00
|
|
|
phi->def.divergent = true;
|
2020-02-05 13:08:27 +01:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-02-05 18:36:34 +01:00
|
|
|
|
2020-02-05 13:08:27 +01:00
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-20 14:58:23 +02:00
|
|
|
static bool
|
2020-02-05 11:53:04 +01:00
|
|
|
visit_if(nir_if *if_stmt, struct divergence_state *state)
|
2019-05-20 14:58:23 +02:00
|
|
|
{
|
2020-02-05 18:36:34 +01:00
|
|
|
bool progress = false;
|
2024-09-03 16:14:25 +02:00
|
|
|
bool cond_divergent = src_divergent(if_stmt->condition, state);
|
2020-02-05 18:36:34 +01:00
|
|
|
|
|
|
|
|
struct divergence_state then_state = *state;
|
2024-09-03 16:14:25 +02:00
|
|
|
then_state.divergent_loop_cf |= cond_divergent;
|
2020-02-05 18:36:34 +01:00
|
|
|
progress |= visit_cf_list(&if_stmt->then_list, &then_state);
|
|
|
|
|
|
|
|
|
|
struct divergence_state else_state = *state;
|
2024-09-03 16:14:25 +02:00
|
|
|
else_state.divergent_loop_cf |= cond_divergent;
|
2020-02-05 18:36:34 +01:00
|
|
|
progress |= visit_cf_list(&if_stmt->else_list, &else_state);
|
2020-02-05 13:08:27 +01:00
|
|
|
|
|
|
|
|
/* handle phis after the IF */
|
2024-09-03 15:59:36 +02:00
|
|
|
bool invariant = state->loop && src_invariant(&if_stmt->condition, state->loop);
|
2023-05-11 13:20:43 -04:00
|
|
|
nir_foreach_phi(phi, nir_cf_node_cf_tree_next(&if_stmt->cf_node)) {
|
2024-09-03 15:59:36 +02:00
|
|
|
if (state->first_visit) {
|
2023-08-14 11:56:00 -05:00
|
|
|
phi->def.divergent = false;
|
2024-09-03 15:59:36 +02:00
|
|
|
phi->def.loop_invariant =
|
|
|
|
|
invariant && nir_foreach_src(&phi->instr, src_invariant, state->loop);
|
|
|
|
|
}
|
2024-04-09 18:14:12 +01:00
|
|
|
bool ignore_undef = state->options & nir_divergence_ignore_undef_if_phi_srcs;
|
2024-09-03 16:14:25 +02:00
|
|
|
progress |= visit_if_merge_phi(phi, cond_divergent, ignore_undef);
|
2020-02-05 13:08:27 +01:00
|
|
|
}
|
|
|
|
|
|
2020-02-05 18:36:34 +01:00
|
|
|
/* join loop divergence information from both branch legs */
|
|
|
|
|
state->divergent_loop_continue |= then_state.divergent_loop_continue ||
|
|
|
|
|
else_state.divergent_loop_continue;
|
|
|
|
|
state->divergent_loop_break |= then_state.divergent_loop_break ||
|
|
|
|
|
else_state.divergent_loop_break;
|
|
|
|
|
|
|
|
|
|
/* A divergent continue makes succeeding loop CF divergent:
|
|
|
|
|
* not all loop-active invocations participate in the remaining loop-body
|
|
|
|
|
* which means that a following break might be taken by some invocations, only */
|
|
|
|
|
state->divergent_loop_cf |= state->divergent_loop_continue;
|
|
|
|
|
|
2024-09-04 10:54:50 +02:00
|
|
|
state->consider_loop_invariance |= then_state.consider_loop_invariance ||
|
|
|
|
|
else_state.consider_loop_invariance;
|
|
|
|
|
|
2020-02-05 13:08:27 +01:00
|
|
|
return progress;
|
2019-05-20 14:58:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2020-02-05 11:53:04 +01:00
|
|
|
visit_loop(nir_loop *loop, struct divergence_state *state)
|
2019-05-20 14:58:23 +02:00
|
|
|
{
|
2021-12-02 10:31:56 +01:00
|
|
|
assert(!nir_loop_has_continue_construct(loop));
|
2020-02-05 13:08:27 +01:00
|
|
|
bool progress = false;
|
2020-02-05 18:36:34 +01:00
|
|
|
nir_block *loop_header = nir_loop_first_block(loop);
|
|
|
|
|
nir_block *loop_preheader = nir_block_cf_tree_prev(loop_header);
|
2020-02-05 13:08:27 +01:00
|
|
|
|
2020-04-21 17:07:56 +01:00
|
|
|
/* handle loop header phis first: we have no knowledge yet about
|
|
|
|
|
* the loop's control flow or any loop-carried sources. */
|
2023-05-11 13:20:43 -04:00
|
|
|
nir_foreach_phi(phi, loop_header) {
|
2023-08-14 11:56:00 -05:00
|
|
|
if (!state->first_visit && phi->def.divergent)
|
2020-04-21 17:07:56 +01:00
|
|
|
continue;
|
2020-02-05 18:36:34 +01:00
|
|
|
|
2024-09-03 15:59:36 +02:00
|
|
|
phi->def.loop_invariant = false;
|
2020-04-21 17:07:56 +01:00
|
|
|
nir_foreach_phi_src(src, phi) {
|
|
|
|
|
if (src->pred == loop_preheader) {
|
2024-09-03 16:14:25 +02:00
|
|
|
phi->def.divergent = nir_src_is_divergent(&src->src);
|
2020-04-21 17:07:56 +01:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-08-14 11:56:00 -05:00
|
|
|
progress |= phi->def.divergent;
|
2020-02-05 13:08:27 +01:00
|
|
|
}
|
2019-05-20 14:58:23 +02:00
|
|
|
|
2020-02-05 18:36:34 +01:00
|
|
|
/* setup loop state */
|
|
|
|
|
struct divergence_state loop_state = *state;
|
2024-09-03 15:59:36 +02:00
|
|
|
loop_state.loop = loop;
|
2025-01-13 15:01:51 +00:00
|
|
|
loop_state.loop_all_invariant = loop_header->predecessors->entries == 1;
|
2020-02-05 18:36:34 +01:00
|
|
|
loop_state.divergent_loop_cf = false;
|
|
|
|
|
loop_state.divergent_loop_continue = false;
|
|
|
|
|
loop_state.divergent_loop_break = false;
|
|
|
|
|
|
|
|
|
|
/* process loop body until no further changes are made */
|
|
|
|
|
bool repeat;
|
|
|
|
|
do {
|
|
|
|
|
progress |= visit_cf_list(&loop->body, &loop_state);
|
|
|
|
|
repeat = false;
|
|
|
|
|
|
|
|
|
|
/* revisit loop header phis to see if something has changed */
|
2023-05-11 13:20:43 -04:00
|
|
|
nir_foreach_phi(phi, loop_header) {
|
|
|
|
|
repeat |= visit_loop_header_phi(phi, loop_preheader,
|
2020-02-05 18:36:34 +01:00
|
|
|
loop_state.divergent_loop_continue);
|
2020-02-05 13:08:27 +01:00
|
|
|
}
|
2020-02-05 18:36:34 +01:00
|
|
|
|
|
|
|
|
loop_state.divergent_loop_cf = false;
|
2020-04-21 17:07:56 +01:00
|
|
|
loop_state.first_visit = false;
|
2020-02-05 18:36:34 +01:00
|
|
|
} while (repeat);
|
2019-05-20 14:58:23 +02:00
|
|
|
|
2024-09-03 16:14:25 +02:00
|
|
|
loop->divergent_continue = loop_state.divergent_loop_continue;
|
|
|
|
|
loop->divergent_break = loop_state.divergent_loop_break;
|
|
|
|
|
|
2020-02-05 13:08:27 +01:00
|
|
|
/* handle phis after the loop */
|
2023-05-11 13:20:43 -04:00
|
|
|
nir_foreach_phi(phi, nir_cf_node_cf_tree_next(&loop->cf_node)) {
|
2024-09-03 15:59:36 +02:00
|
|
|
if (state->first_visit) {
|
2023-08-14 11:56:00 -05:00
|
|
|
phi->def.divergent = false;
|
2024-09-03 15:59:36 +02:00
|
|
|
phi->def.loop_invariant = false;
|
|
|
|
|
}
|
2024-09-04 10:25:27 +02:00
|
|
|
progress |= visit_loop_exit_phi(phi, loop);
|
2020-02-05 13:08:27 +01:00
|
|
|
}
|
|
|
|
|
|
2024-09-04 10:54:50 +02:00
|
|
|
state->consider_loop_invariance |= loop_state.consider_loop_invariance ||
|
|
|
|
|
loop->divergent_break;
|
2020-02-05 13:08:27 +01:00
|
|
|
return progress;
|
2019-05-20 14:58:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2020-02-05 11:53:04 +01:00
|
|
|
visit_cf_list(struct exec_list *list, struct divergence_state *state)
|
2019-05-20 14:58:23 +02:00
|
|
|
{
|
|
|
|
|
bool has_changed = false;
|
|
|
|
|
|
|
|
|
|
foreach_list_typed(nir_cf_node, node, node, list) {
|
|
|
|
|
switch (node->type) {
|
|
|
|
|
case nir_cf_node_block:
|
2020-02-05 11:53:04 +01:00
|
|
|
has_changed |= visit_block(nir_cf_node_as_block(node), state);
|
2019-05-20 14:58:23 +02:00
|
|
|
break;
|
|
|
|
|
case nir_cf_node_if:
|
2020-02-05 11:53:04 +01:00
|
|
|
has_changed |= visit_if(nir_cf_node_as_if(node), state);
|
2019-05-20 14:58:23 +02:00
|
|
|
break;
|
|
|
|
|
case nir_cf_node_loop:
|
2020-02-05 11:53:04 +01:00
|
|
|
has_changed |= visit_loop(nir_cf_node_as_loop(node), state);
|
2019-05-20 14:58:23 +02:00
|
|
|
break;
|
|
|
|
|
case nir_cf_node_function:
|
|
|
|
|
unreachable("NIR divergence analysis: Unsupported cf_node type.");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return has_changed;
|
|
|
|
|
}
|
|
|
|
|
|
2019-10-15 14:48:10 -05:00
|
|
|
void
|
2024-04-09 18:14:12 +01:00
|
|
|
nir_divergence_analysis_impl(nir_function_impl *impl, nir_divergence_options options)
|
2019-05-20 14:58:23 +02:00
|
|
|
{
|
2024-09-04 10:04:41 +02:00
|
|
|
nir_metadata_require(impl, nir_metadata_block_index);
|
|
|
|
|
|
2020-02-05 11:53:04 +01:00
|
|
|
struct divergence_state state = {
|
2024-04-09 18:14:12 +01:00
|
|
|
.stage = impl->function->shader->info.stage,
|
|
|
|
|
.shader = impl->function->shader,
|
2024-10-19 12:31:21 +02:00
|
|
|
.impl = impl,
|
2024-04-09 18:14:12 +01:00
|
|
|
.options = options,
|
2024-09-03 15:59:36 +02:00
|
|
|
.loop = NULL,
|
2025-01-13 15:01:51 +00:00
|
|
|
.loop_all_invariant = false,
|
2020-02-05 18:36:34 +01:00
|
|
|
.divergent_loop_cf = false,
|
|
|
|
|
.divergent_loop_continue = false,
|
|
|
|
|
.divergent_loop_break = false,
|
2020-04-21 17:07:56 +01:00
|
|
|
.first_visit = true,
|
2020-02-05 11:53:04 +01:00
|
|
|
};
|
|
|
|
|
|
2024-04-09 18:14:12 +01:00
|
|
|
visit_cf_list(&impl->body, &state);
|
2024-09-04 10:04:41 +02:00
|
|
|
|
2024-08-23 11:42:37 +02:00
|
|
|
/* Unless this pass is called with shader->options->divergence_analysis_options,
|
|
|
|
|
* it invalidates nir_metadata_divergence.
|
|
|
|
|
*/
|
treewide: Switch to nir_progress
Via the Coccinelle patch at the end of the commit message, followed by
sed -ie 's/progress = progress | /progress |=/g' $(git grep -l 'progress = prog')
ninja -C ~/mesa/build clang-format
cd ~/mesa/src/compiler/nir && clang-format -i *.c
agxfmt
@@
identifier prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
-return prog;
+return nir_progress(prog, impl, metadata);
@@
expression prog_expr, impl, metadata;
@@
-if (prog_expr) {
-nir_metadata_preserve(impl, metadata);
-return true;
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-return false;
-}
+bool progress = prog_expr;
+return nir_progress(progress, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-nir_metadata_preserve(impl, prog ? (metadata) : nir_metadata_all);
-return prog;
+return nir_progress(prog, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-nir_metadata_preserve(impl, prog ? (metadata) : nir_metadata_all);
+nir_progress(prog, impl, metadata);
@@
expression impl, metadata;
@@
-nir_metadata_preserve(impl, metadata);
-return true;
+return nir_progress(true, impl, metadata);
@@
expression impl;
@@
-nir_metadata_preserve(impl, nir_metadata_all);
-return false;
+return nir_no_progress(impl);
@@
identifier other_prog, prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
-other_prog |= prog;
+other_prog = other_prog | nir_progress(prog, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+nir_progress(prog, impl, metadata);
@@
identifier other_prog, prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-other_prog = true;
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+other_prog = other_prog | nir_progress(prog, impl, metadata);
@@
expression prog_expr, impl, metadata;
identifier prog;
@@
-if (prog_expr) {
-nir_metadata_preserve(impl, metadata);
-prog = true;
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+bool impl_progress = prog_expr;
+prog = prog | nir_progress(impl_progress, impl, metadata);
@@
identifier other_prog, prog;
expression impl, metadata;
@@
-if (prog) {
-other_prog = true;
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+other_prog = other_prog | nir_progress(prog, impl, metadata);
@@
expression prog_expr, impl, metadata;
identifier prog;
@@
-if (prog_expr) {
-prog = true;
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+bool impl_progress = prog_expr;
+prog = prog | nir_progress(impl_progress, impl, metadata);
@@
expression prog_expr, impl, metadata;
@@
-if (prog_expr) {
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+bool impl_progress = prog_expr;
+nir_progress(impl_progress, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-nir_metadata_preserve(impl, metadata);
-prog = true;
+prog = nir_progress(true, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-}
-return prog;
+return nir_progress(prog, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-}
+nir_progress(prog, impl, metadata);
@@
expression impl;
@@
-nir_metadata_preserve(impl, nir_metadata_all);
+nir_no_progress(impl);
@@
expression impl, metadata;
@@
-nir_metadata_preserve(impl, metadata);
+nir_progress(true, impl, metadata);
squashme! sed -ie 's/progress = progress | /progress |=/g' $(git grep -l 'progress = prog')
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Georg Lehmann <dadschoorse@gmail.com>
Acked-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33722>
2025-02-24 15:10:33 -05:00
|
|
|
nir_progress(true, impl, ~nir_metadata_divergence);
|
2024-04-09 18:14:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
nir_divergence_analysis(nir_shader *shader)
|
|
|
|
|
{
|
2024-01-07 22:41:33 +01:00
|
|
|
nir_foreach_function_impl(impl, shader) {
|
2024-08-23 11:42:37 +02:00
|
|
|
nir_metadata_require(impl, nir_metadata_divergence);
|
2024-01-07 22:41:33 +01:00
|
|
|
}
|
2019-05-20 14:58:23 +02:00
|
|
|
}
|
2020-02-05 11:53:04 +01:00
|
|
|
|
2023-10-01 00:21:16 -04:00
|
|
|
/* Compute divergence between vertices of the same primitive. This uses
|
|
|
|
|
* the same divergent field in nir_def and nir_loop as the regular divergence
|
|
|
|
|
* pass.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
nir_vertex_divergence_analysis(nir_shader *shader)
|
|
|
|
|
{
|
|
|
|
|
struct divergence_state state = {
|
|
|
|
|
.stage = shader->info.stage,
|
|
|
|
|
.shader = shader,
|
2024-04-09 18:14:12 +01:00
|
|
|
.options = shader->options->divergence_analysis_options,
|
2024-09-03 15:59:36 +02:00
|
|
|
.loop = NULL,
|
2025-01-13 15:01:51 +00:00
|
|
|
.loop_all_invariant = false,
|
2023-10-01 00:21:16 -04:00
|
|
|
.vertex_divergence = true,
|
|
|
|
|
.first_visit = true,
|
|
|
|
|
};
|
|
|
|
|
|
2024-01-07 22:41:33 +01:00
|
|
|
nir_foreach_function_impl(impl, shader) {
|
|
|
|
|
nir_metadata_require(impl, nir_metadata_block_index);
|
|
|
|
|
state.impl = impl;
|
|
|
|
|
visit_cf_list(&impl->body, &state);
|
treewide: Switch to nir_progress
Via the Coccinelle patch at the end of the commit message, followed by
sed -ie 's/progress = progress | /progress |=/g' $(git grep -l 'progress = prog')
ninja -C ~/mesa/build clang-format
cd ~/mesa/src/compiler/nir && clang-format -i *.c
agxfmt
@@
identifier prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
-return prog;
+return nir_progress(prog, impl, metadata);
@@
expression prog_expr, impl, metadata;
@@
-if (prog_expr) {
-nir_metadata_preserve(impl, metadata);
-return true;
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-return false;
-}
+bool progress = prog_expr;
+return nir_progress(progress, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-nir_metadata_preserve(impl, prog ? (metadata) : nir_metadata_all);
-return prog;
+return nir_progress(prog, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-nir_metadata_preserve(impl, prog ? (metadata) : nir_metadata_all);
+nir_progress(prog, impl, metadata);
@@
expression impl, metadata;
@@
-nir_metadata_preserve(impl, metadata);
-return true;
+return nir_progress(true, impl, metadata);
@@
expression impl;
@@
-nir_metadata_preserve(impl, nir_metadata_all);
-return false;
+return nir_no_progress(impl);
@@
identifier other_prog, prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
-other_prog |= prog;
+other_prog = other_prog | nir_progress(prog, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+nir_progress(prog, impl, metadata);
@@
identifier other_prog, prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-other_prog = true;
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+other_prog = other_prog | nir_progress(prog, impl, metadata);
@@
expression prog_expr, impl, metadata;
identifier prog;
@@
-if (prog_expr) {
-nir_metadata_preserve(impl, metadata);
-prog = true;
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+bool impl_progress = prog_expr;
+prog = prog | nir_progress(impl_progress, impl, metadata);
@@
identifier other_prog, prog;
expression impl, metadata;
@@
-if (prog) {
-other_prog = true;
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+other_prog = other_prog | nir_progress(prog, impl, metadata);
@@
expression prog_expr, impl, metadata;
identifier prog;
@@
-if (prog_expr) {
-prog = true;
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+bool impl_progress = prog_expr;
+prog = prog | nir_progress(impl_progress, impl, metadata);
@@
expression prog_expr, impl, metadata;
@@
-if (prog_expr) {
-nir_metadata_preserve(impl, metadata);
-} else {
-nir_metadata_preserve(impl, nir_metadata_all);
-}
+bool impl_progress = prog_expr;
+nir_progress(impl_progress, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-nir_metadata_preserve(impl, metadata);
-prog = true;
+prog = nir_progress(true, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-}
-return prog;
+return nir_progress(prog, impl, metadata);
@@
identifier prog;
expression impl, metadata;
@@
-if (prog) {
-nir_metadata_preserve(impl, metadata);
-}
+nir_progress(prog, impl, metadata);
@@
expression impl;
@@
-nir_metadata_preserve(impl, nir_metadata_all);
+nir_no_progress(impl);
@@
expression impl, metadata;
@@
-nir_metadata_preserve(impl, metadata);
+nir_progress(true, impl, metadata);
squashme! sed -ie 's/progress = progress | /progress |=/g' $(git grep -l 'progress = prog')
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Georg Lehmann <dadschoorse@gmail.com>
Acked-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33722>
2025-02-24 15:10:33 -05:00
|
|
|
nir_progress(true, impl, nir_metadata_all & ~nir_metadata_divergence);
|
2024-01-07 22:41:33 +01:00
|
|
|
}
|
2023-10-01 00:21:16 -04:00
|
|
|
}
|
|
|
|
|
|
2021-11-18 23:14:26 -05:00
|
|
|
bool
|
|
|
|
|
nir_has_divergent_loop(nir_shader *shader)
|
|
|
|
|
{
|
|
|
|
|
nir_function_impl *func = nir_shader_get_entrypoint(shader);
|
|
|
|
|
|
|
|
|
|
foreach_list_typed(nir_cf_node, node, node, &func->body) {
|
2024-04-05 17:00:33 +02:00
|
|
|
if (node->type == nir_cf_node_loop) {
|
2024-04-05 17:02:46 +02:00
|
|
|
if (nir_cf_node_as_loop(node)->divergent_break)
|
2024-04-05 17:00:33 +02:00
|
|
|
return true;
|
2021-11-18 23:14:26 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-04-05 17:00:33 +02:00
|
|
|
return false;
|
2021-11-18 23:14:26 -05:00
|
|
|
}
|