nir: Drop most uses if nir_instr_rewrite_src()

Generated by the following semantic patch:

    @@
    expression I, S, D;
    @@

    -nir_instr_rewrite_src(I, S, nir_src_for_ssa(D));
    +nir_src_rewrite(S, D);

Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24729>
This commit is contained in:
Faith Ekstrand 2023-08-17 16:27:15 -05:00 committed by Marge Bot
parent 71ddaca2e2
commit b5d6b7c402
82 changed files with 151 additions and 260 deletions

View file

@ -91,7 +91,7 @@ radv_nir_lower_primitive_shading_rate(nir_shader *nir, enum amd_gfx_level gfx_le
out = nir_ior(&b, nir_ishl_imm(&b, x_rate, x_rate_shift), nir_ishl_imm(&b, y_rate, y_rate_shift));
nir_instr_rewrite_src(&intr->instr, &intr->src[1], nir_src_for_ssa(out));
nir_src_rewrite(&intr->src[1], out);
progress = true;
if (nir->info.stage == MESA_SHADER_VERTEX)

View file

@ -164,8 +164,7 @@ v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr)
pack_mask);
}
nir_instr_rewrite_src(&instr->instr, &instr->src[3],
nir_src_for_ssa(formatted));
nir_src_rewrite(&instr->src[3], formatted);
instr->num_components = formatted->num_components;
return true;

View file

@ -49,9 +49,7 @@ lower_line_smooth_intrinsic(struct lower_line_smooth_state *state,
nir_def *new_val = nir_fmul(b, nir_vec4(b, one, one, one, coverage),
intr->src[0].ssa);
nir_instr_rewrite_src(&intr->instr,
&intr->src[0],
nir_src_for_ssa(new_val));
nir_src_rewrite(&intr->src[0], new_val);
}
static bool

View file

@ -339,8 +339,7 @@ v3d_nir_lower_logic_op_instr(struct v3d_compile *c,
nir_def *result =
v3d_nir_emit_logic_op(c, b, frag_color, rt, 0);
nir_instr_rewrite_src(&intr->instr, &intr->src[0],
nir_src_for_ssa(result));
nir_src_rewrite(&intr->src[0], result);
intr->num_components = result->num_components;
}
}

View file

@ -637,8 +637,7 @@ lower_tex_src(nir_builder *b,
* instr if needed
*/
if (index) {
nir_instr_rewrite_src(&instr->instr, &src->src,
nir_src_for_ssa(index));
nir_src_rewrite(&src->src, index);
src->src_type = is_sampler ?
nir_tex_src_sampler_offset :

View file

@ -138,8 +138,7 @@ lower_deref_instr(nir_builder *b, nir_intrinsic_instr *instr,
instr->intrinsic = op;
nir_intrinsic_set_range_base(instr, range_base);
nir_instr_rewrite_src(&instr->instr, &instr->src[0],
nir_src_for_ssa(offset));
nir_src_rewrite(&instr->src[0], offset);
nir_intrinsic_set_base(instr, idx);
nir_deref_instr_remove_if_unused(deref);

View file

@ -289,8 +289,7 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
if (glsl_type_is_boolean(deref->type)) {
b.cursor = nir_before_instr(&intrin->instr);
nir_def *ival = nir_b2i32(&b, intrin->src[1].ssa);
nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
nir_src_for_ssa(ival));
nir_src_rewrite(&intrin->src[1], ival);
progress = true;
}
break;

View file

@ -285,8 +285,7 @@ lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state,
lower_deref(b, state, nir_src_as_deref(instr->src[texture_idx].src));
/* only lower non-bindless: */
if (texture_deref) {
nir_instr_rewrite_src(&instr->instr, &instr->src[texture_idx].src,
nir_src_for_ssa(&texture_deref->def));
nir_src_rewrite(&instr->src[texture_idx].src, &texture_deref->def);
record_textures_used(&b->shader->info, texture_deref, instr->op);
}
}
@ -296,8 +295,7 @@ lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state,
lower_deref(b, state, nir_src_as_deref(instr->src[sampler_idx].src));
/* only lower non-bindless: */
if (sampler_deref) {
nir_instr_rewrite_src(&instr->instr, &instr->src[sampler_idx].src,
nir_src_for_ssa(&sampler_deref->def));
nir_src_rewrite(&instr->src[sampler_idx].src, &sampler_deref->def);
record_samplers_used(&b->shader->info, sampler_deref, instr->op);
}
}
@ -328,8 +326,7 @@ lower_intrinsic(nir_intrinsic_instr *instr,
/* don't lower bindless: */
if (!deref)
return false;
nir_instr_rewrite_src(&instr->instr, &instr->src[0],
nir_src_for_ssa(&deref->def));
nir_src_rewrite(&instr->src[0], &deref->def);
return true;
}
if (instr->intrinsic == nir_intrinsic_image_deref_order ||

View file

@ -2536,8 +2536,7 @@ nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin, nir_def *src,
if (nir_intrinsic_has_atomic_op(intrin))
nir_intrinsic_set_atomic_op(intrin, atomic_op);
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
nir_src_for_ssa(src));
nir_src_rewrite(&intrin->src[0], src);
}
unsigned

View file

@ -824,8 +824,7 @@ rematerialize_deref_src(nir_src *src, void *_state)
nir_deref_instr *block_deref = rematerialize_deref_in_block(deref, state);
if (block_deref != deref) {
nir_instr_rewrite_src(src->parent_instr, src,
nir_src_for_ssa(&block_deref->def));
nir_src_rewrite(src, &block_deref->def);
nir_deref_instr_remove_if_unused(deref);
state->progress = true;
}
@ -1060,8 +1059,7 @@ opt_remove_cast_cast(nir_deref_instr *cast)
cast->cast.align_offset = parent->cast.align_offset;
}
nir_instr_rewrite_src(&cast->instr, &cast->parent,
nir_src_for_ssa(parent->parent.ssa));
nir_src_rewrite(&cast->parent, parent->parent.ssa);
return true;
}
@ -1258,8 +1256,7 @@ opt_deref_ptr_as_array(nir_builder *b, nir_deref_instr *deref)
deref->deref_type = parent->deref_type;
nir_instr_rewrite_src(&deref->instr, &deref->parent, parent->parent);
nir_instr_rewrite_src(&deref->instr, &deref->arr.index,
nir_src_for_ssa(new_idx));
nir_src_rewrite(&deref->arr.index, new_idx);
return true;
}
@ -1345,8 +1342,7 @@ opt_load_vec_deref(nir_builder *b, nir_intrinsic_instr *load)
const unsigned new_bit_size = glsl_get_bit_size(parent->type);
/* Stomp it to reference the parent */
nir_instr_rewrite_src(&load->instr, &load->src[0],
nir_src_for_ssa(&parent->def));
nir_src_rewrite(&load->src[0], &parent->def);
load->def.bit_size = new_bit_size;
load->def.num_components = new_num_comps;
load->num_components = new_num_comps;
@ -1384,16 +1380,14 @@ opt_store_vec_deref(nir_builder *b, nir_intrinsic_instr *store)
const unsigned new_num_comps = glsl_get_vector_elements(parent->type);
const unsigned new_bit_size = glsl_get_bit_size(parent->type);
nir_instr_rewrite_src(&store->instr, &store->src[0],
nir_src_for_ssa(&parent->def));
nir_src_rewrite(&store->src[0], &parent->def);
/* Restrict things down as needed so the bitcast doesn't fail */
data = nir_trim_vector(b, data, util_last_bit(write_mask));
if (old_bit_size != new_bit_size)
data = nir_bitcast_vector(b, data, new_bit_size);
data = resize_vector(b, data, new_num_comps);
nir_instr_rewrite_src(&store->instr, &store->src[1],
nir_src_for_ssa(data));
nir_src_rewrite(&store->src[1], data);
store->num_components = new_num_comps;
/* Adjust the write mask */

View file

@ -413,8 +413,7 @@ isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx)
exec_list_push_tail(&pcopy->entries, &entry->node);
nir_instr_rewrite_src(&phi->instr, &src->src,
nir_src_for_ssa(&entry->dest.def));
nir_src_rewrite(&src->src, &entry->dest.def);
}
nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
@ -748,8 +747,7 @@ resolve_registers_impl(nir_function_impl *impl, struct from_ssa_state *state)
continue;
entry->src_is_reg = true;
nir_instr_rewrite_src(&pcopy->instr, &entry->src,
nir_src_for_ssa(reg));
nir_src_rewrite(&entry->src, reg);
}
break;
}

View file

@ -139,8 +139,7 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
}
/* Turn the load into a vector load */
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
nir_src_for_ssa(&vec_deref->def));
nir_src_rewrite(&intrin->src[0], &vec_deref->def);
intrin->def.num_components = num_components;
intrin->num_components = num_components;

View file

@ -225,7 +225,7 @@ lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
b->cursor = nir_after_block_before_jump(src->pred);
nir_def *new_src = nir_u2uN(b, src->src.ssa, bit_size);
nir_instr_rewrite_src(&phi->instr, &src->src, nir_src_for_ssa(new_src));
nir_src_rewrite(&src->src, new_src);
}
phi->def.bit_size = bit_size;

View file

@ -81,8 +81,7 @@ make_sources_canonical(nir_builder *b, nir_alu_instr *alu, uint32_t start_idx)
memcpy(conv_instr->src[0].swizzle,
alu->src[i].swizzle,
sizeof(conv_instr->src[0].swizzle));
nir_instr_rewrite_src(&alu->instr,
&alu->src[i].src, nir_src_for_ssa(new_src));
nir_src_rewrite(&alu->src[i].src, new_src);
/* The swizzle will have been handled by the conversion instruction
* so we can reset it back to the default
*/
@ -358,8 +357,7 @@ lower_phi_instr(nir_builder *b, nir_phi_instr *phi)
nir_op convert_op = get_bool_convert_opcode(dst_bit_size);
nir_def *new_src =
nir_build_alu(b, convert_op, phi_src->src.ssa, NULL, NULL, NULL);
nir_instr_rewrite_src(&phi->instr, &phi_src->src,
nir_src_for_ssa(new_src));
nir_src_rewrite(&phi_src->src, new_src);
}
}

View file

@ -221,8 +221,7 @@ nir_lower_cl_images(nir_shader *shader, bool lower_image_derefs, bool lower_samp
tex->src[count].src_type = nir_tex_src_texture_offset;
else
tex->src[count].src_type = nir_tex_src_sampler_offset;
nir_instr_rewrite_src(&tex->instr, &tex->src[count].src,
nir_src_for_ssa(offset));
nir_src_rewrite(&tex->src[count].src, offset);
}
} else {
/* If we've removed a source, move this one down */

View file

@ -82,7 +82,7 @@ lower_intrinsic(nir_builder *b, nir_intrinsic_instr *intr, nir_shader *shader)
int src = intr->intrinsic == nir_intrinsic_store_deref ? 1 : 0;
s = nir_ssa_for_src(b, intr->src[src], intr->num_components);
s = nir_fsat(b, s);
nir_instr_rewrite_src(&intr->instr, &intr->src[src], nir_src_for_ssa(s));
nir_src_rewrite(&intr->src[src], s);
}
return true;

View file

@ -50,7 +50,7 @@ lower_pos_write(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
nir_channel(b, pos, 3)),
0.5),
nir_channel(b, pos, 3));
nir_instr_rewrite_src(&intr->instr, intr->src + 1, nir_src_for_ssa(def));
nir_src_rewrite(intr->src + 1, def);
return true;
}

View file

@ -173,7 +173,7 @@ try_lower_input_texop(nir_builder *b, nir_tex_instr *tex,
tex->coord_components = 3;
nir_instr_rewrite_src(&tex->instr, &tex->src[1].src, nir_src_for_ssa(coord));
nir_src_rewrite(&tex->src[1].src, coord);
return true;
}

View file

@ -2894,8 +2894,7 @@ add_const_offset_to_base_block(nir_block *block, nir_builder *b,
nir_intrinsic_set_io_semantics(intrin, sem);
b->cursor = nir_before_instr(&intrin->instr);
nir_instr_rewrite_src(&intrin->instr, offset,
nir_src_for_ssa(nir_imm_int(b, 0)));
nir_src_rewrite(offset, nir_imm_int(b, 0));
progress = true;
}
}

View file

@ -504,8 +504,7 @@ nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
assert(glsl_type_is_vector(new_deref->type));
}
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
nir_src_for_ssa(&new_deref->def));
nir_src_rewrite(&intrin->src[0], &new_deref->def);
intrin->num_components =
glsl_get_components(new_deref->type);
@ -551,8 +550,7 @@ nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
new_deref = build_array_deref_of_new_var(&b, new_var, old_deref);
assert(glsl_type_is_vector(new_deref->type));
}
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
nir_src_for_ssa(&new_deref->def));
nir_src_rewrite(&intrin->src[0], &new_deref->def);
intrin->num_components =
glsl_get_components(new_deref->type);
@ -573,8 +571,7 @@ nir_lower_io_to_vector_impl(nir_function_impl *impl, nir_variable_mode modes)
}
}
nir_def *new_value = nir_vec_scalars(&b, comps, intrin->num_components);
nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
nir_src_for_ssa(new_value));
nir_src_rewrite(&intrin->src[1], new_value);
nir_intrinsic_set_write_mask(intrin,
old_wrmask << (old_frac - new_frac));
@ -650,7 +647,7 @@ nir_vectorize_tess_levels_impl(nir_function_impl *impl)
b.cursor = nir_before_instr(instr);
nir_def *new_deref = &nir_build_deref_var(&b, var)->def;
nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(new_deref));
nir_src_rewrite(&intrin->src[0], new_deref);
nir_deref_instr_remove_if_unused(deref);
@ -676,7 +673,7 @@ nir_vectorize_tess_levels_impl(nir_function_impl *impl)
nir_intrinsic_set_write_mask(intrin, 1 << index);
nir_def *new_val = nir_undef(&b, intrin->num_components, 32);
new_val = nir_vector_insert_imm(&b, new_val, intrin->src[1].ssa, index);
nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(new_val));
nir_src_rewrite(&intrin->src[1], new_val);
} else {
b.cursor = nir_after_instr(instr);
nir_def *val = &intrin->def;

View file

@ -506,8 +506,7 @@ nir_lower_mediump_vars_impl(nir_function_impl *impl, nir_variable_mode modes,
unreachable("Invalid 16-bit type");
}
nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
nir_src_for_ssa(replace));
nir_src_rewrite(&intrin->src[1], replace);
progress = true;
break;
}

View file

@ -284,8 +284,7 @@ nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
if (var == pos_var) {
nir_deref_instr *old_deref = nir_src_as_deref(intrin->src[0]);
nir_instr_rewrite_src(instr, &intrin->src[0],
nir_src_for_ssa(&pos_deref->def));
nir_src_rewrite(&intrin->src[0], &pos_deref->def);
/* Remove old deref since it has the wrong type. */
nir_deref_instr_remove_if_unused(old_deref);

View file

@ -60,7 +60,7 @@ lower_point_size_instr(nir_builder *b, nir_instr *instr, void *data)
if (minmax[1] > 0.0f)
psiz = nir_fmin(b, psiz, nir_imm_float(b, minmax[1]));
nir_instr_rewrite_src(instr, &intr->src[1], nir_src_for_ssa(psiz));
nir_src_rewrite(&intr->src[1], psiz);
return true;
}

View file

@ -90,7 +90,7 @@ lower_point_smooth(nir_builder *b, nir_instr *instr, UNUSED void *_state)
nir_def *one = nir_imm_float(b, 1.0f);
nir_def *new_val = nir_fmul(b, nir_vec4(b, one, one, one, coverage),
intr->src[out_src_idx].ssa);
nir_instr_rewrite_src(instr, &intr->src[out_src_idx], nir_src_for_ssa(new_val));
nir_src_rewrite(&intr->src[out_src_idx], new_val);
return true;
}

View file

@ -74,7 +74,7 @@ lower_polylinesmooth(nir_builder *b, nir_instr *instr, void *data)
nir_def *new_dest = nir_if_phi(b, res1, res2);
nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(new_dest));
nir_src_rewrite(&intr->src[0], new_dest);
return true;
}

View file

@ -97,8 +97,7 @@ lower_tex_src_to_offset(nir_builder *b,
* instr if needed
*/
if (index) {
nir_instr_rewrite_src(&instr->instr, &src->src,
nir_src_for_ssa(index));
nir_src_rewrite(&src->src, index);
src->src_type = is_sampler ? nir_tex_src_sampler_offset : nir_tex_src_texture_offset;
} else {

View file

@ -392,7 +392,7 @@ rewrite_instr_src_from_phi_builder(nir_src *src, void *_pbv_arr)
nir_def *new_def = get_phi_builder_def_for_src(src, _pbv_arr, block);
if (new_def != NULL)
nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(new_def));
nir_src_rewrite(src, new_def);
return true;
}

View file

@ -148,9 +148,7 @@ project_src(nir_builder *b, nir_tex_instr *tex)
}
}
nir_instr_rewrite_src(&tex->instr,
&tex->src[i].src,
nir_src_for_ssa(projected));
nir_src_rewrite(&tex->src[i].src, projected);
}
return true;
@ -208,8 +206,7 @@ lower_offset(nir_builder *b, nir_tex_instr *tex)
}
}
nir_instr_rewrite_src(&tex->instr, &tex->src[coord_index].src,
nir_src_for_ssa(offset_coord));
nir_src_rewrite(&tex->src[coord_index].src, offset_coord);
return true;
}
@ -229,9 +226,7 @@ lower_rect(nir_builder *b, nir_tex_instr *tex)
if (coord_index != -1) {
nir_def *coords =
nir_ssa_for_src(b, tex->src[coord_index].src, tex->coord_components);
nir_instr_rewrite_src(&tex->instr,
&tex->src[coord_index].src,
nir_src_for_ssa(nir_fmul(b, coords, scale)));
nir_src_rewrite(&tex->src[coord_index].src, nir_fmul(b, coords, scale));
}
}
@ -247,9 +242,7 @@ lower_rect_tex_scale(nir_builder *b, nir_tex_instr *tex)
if (coord_index != -1) {
nir_def *coords =
nir_ssa_for_src(b, tex->src[coord_index].src, tex->coord_components);
nir_instr_rewrite_src(&tex->instr,
&tex->src[coord_index].src,
nir_src_for_ssa(nir_fmul(b, coords, scale)));
nir_src_rewrite(&tex->src[coord_index].src, nir_fmul(b, coords, scale));
}
}
@ -981,9 +974,7 @@ saturate_src(nir_builder *b, nir_tex_instr *tex, unsigned sat_mask)
/* and move the result back into a single vecN: */
src = nir_vec(b, comp, tex->coord_components);
nir_instr_rewrite_src(&tex->instr,
&tex->src[coord_index].src,
nir_src_for_ssa(src));
nir_src_rewrite(&tex->src[coord_index].src, src);
}
return tex;
}
@ -1257,8 +1248,7 @@ nir_lower_txs_lod(nir_builder *b, nir_tex_instr *tex)
nir_def *lod = nir_ssa_for_src(b, tex->src[lod_idx].src, 1);
/* Replace the non-0-LOD in the initial TXS operation by a 0-LOD. */
nir_instr_rewrite_src(&tex->instr, &tex->src[lod_idx].src,
nir_src_for_ssa(nir_imm_int(b, 0)));
nir_src_rewrite(&tex->src[lod_idx].src, nir_imm_int(b, 0));
/* TXS(LOD) = max(TXS(0) >> LOD, 1)
* But we do min(TXS(0), TXS(LOD)) to catch the case of a null surface,
@ -1437,8 +1427,7 @@ lower_index_to_offset(nir_builder *b, nir_tex_instr *tex)
continue;
nir_def *sum = nir_iadd_imm(b, tex->src[i].src.ssa, *index);
nir_instr_rewrite_src(&tex->instr, &tex->src[i].src,
nir_src_for_ssa(sum));
nir_src_rewrite(&tex->src[i].src, sum);
*index = 0;
progress = true;
}

View file

@ -60,8 +60,7 @@ nir_lower_uniforms_to_ubo_instr(nir_builder *b, nir_instr *instr, void *data)
!b->shader->info.first_ubo_is_default_ubo) {
nir_def *old_idx = nir_ssa_for_src(b, intr->src[0], 1);
nir_def *new_idx = nir_iadd_imm(b, old_idx, 1);
nir_instr_rewrite_src(&intr->instr, &intr->src[0],
nir_src_for_ssa(new_idx));
nir_src_rewrite(&intr->src[0], new_idx);
return true;
}

View file

@ -80,8 +80,7 @@ lower_vec3_to_vec4_instr(nir_builder *b, nir_instr *instr, void *data)
data = nir_swizzle(b, data, swiz, 4);
intrin->num_components = 4;
nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
nir_src_for_ssa(data));
nir_src_rewrite(&intrin->src[1], data);
return true;
}

View file

@ -86,8 +86,7 @@ lower_viewport_transform_instr(nir_builder *b, nir_instr *instr,
nir_channel(b, screen, 2),
w_recip);
nir_instr_rewrite_src(instr, &intr->src[1],
nir_src_for_ssa(screen_space));
nir_src_rewrite(&intr->src[1], screen_space);
return true;
}

View file

@ -237,9 +237,7 @@ lower_fddy(lower_wpos_ytransform_state *state, nir_alu_instr *fddy)
pt = nir_fmul(b, p, trans);
nir_instr_rewrite_src(&fddy->instr,
&fddy->src[0].src,
nir_src_for_ssa(pt));
nir_src_rewrite(&fddy->src[0].src, pt);
for (unsigned i = 0; i < 4; i++)
fddy->src[0].swizzle[i] = MIN2(i, pt->num_components - 1);
@ -262,9 +260,8 @@ lower_interp_deref_or_load_baryc_at_offset(lower_wpos_ytransform_state *state,
offset = nir_ssa_for_src(b, intr->src[offset_src], 2);
flip_y = nir_fmul(b, nir_channel(b, offset, 1),
nir_channel(b, get_transform(state), 0));
nir_instr_rewrite_src(&intr->instr, &intr->src[offset_src],
nir_src_for_ssa(nir_vec2(b, nir_channel(b, offset, 0),
flip_y)));
nir_src_rewrite(&intr->src[offset_src],
nir_vec2(b, nir_channel(b, offset, 0), flip_y));
}
static void

View file

@ -142,8 +142,7 @@ move_vec_src_uses_to_dest_block(nir_block *block)
* reswizzled to actually use the destination of the vecN operation.
* Go ahead and rewrite it as needed.
*/
nir_instr_rewrite_src(use->parent_instr, use,
nir_src_for_ssa(&vec->def));
nir_src_rewrite(use, &vec->def);
for (unsigned j = 0; j < 4; j++) {
if (!nir_alu_instr_channel_used(use_alu, src_idx, j))
continue;

View file

@ -155,14 +155,12 @@ combine_stores(struct combine_stores_state *state,
*/
if (store->num_components == 1) {
store->num_components = num_components;
nir_instr_rewrite_src(&store->instr, &store->src[0],
nir_src_for_ssa(&combo->dst->def));
nir_src_rewrite(&store->src[0], &combo->dst->def);
}
assert(store->num_components == num_components);
nir_intrinsic_set_write_mask(store, combo->write_mask);
nir_instr_rewrite_src(&store->instr, &store->src[1],
nir_src_for_ssa(vec));
nir_src_rewrite(&store->src[1], vec);
state->progress = true;
}

View file

@ -731,8 +731,7 @@ vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx,
nir_def *new_base = first->intrin->src[info->base_src].ssa;
new_base = nir_iadd_imm(b, new_base, -(int)(high_start / 8u));
nir_instr_rewrite_src(first->instr, &first->intrin->src[info->base_src],
nir_src_for_ssa(new_base));
nir_src_rewrite(&first->intrin->src[info->base_src], new_base);
}
/* update the deref */
@ -744,8 +743,8 @@ vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx,
deref = subtract_deref(b, deref, high_start / 8u);
first->deref = cast_deref(b, new_num_components, new_bit_size, deref);
nir_instr_rewrite_src(first->instr, &first->intrin->src[info->deref_src],
nir_src_for_ssa(&first->deref->def));
nir_src_rewrite(&first->intrin->src[info->deref_src],
&first->deref->def);
}
/* update align */

View file

@ -46,8 +46,7 @@ opt_memcpy_deref_cast(nir_intrinsic_instr *cpy, nir_src *deref_src)
/* Casts to uint8 or int8 never do us any good; get rid of them */
if (cast->type == glsl_int8_t_type() ||
cast->type == glsl_uint8_t_type()) {
nir_instr_rewrite_src(&cpy->instr, deref_src,
nir_src_for_ssa(&parent->def));
nir_src_rewrite(deref_src, &parent->def);
return true;
}
@ -64,8 +63,7 @@ opt_memcpy_deref_cast(nir_intrinsic_instr *cpy, nir_src *deref_src)
if (nir_src_as_uint(cpy->src[2]) < (uint64_t)parent_type_size)
return false;
nir_instr_rewrite_src(&cpy->instr, deref_src,
nir_src_for_ssa(&parent->def));
nir_src_rewrite(deref_src, &parent->def);
return true;
}

View file

@ -134,7 +134,7 @@ try_fold_load_store(nir_builder *b,
if (!replace_src)
return false;
nir_instr_rewrite_src(&intrin->instr, &intrin->src[offset_src_idx], nir_src_for_ssa(replace_src));
nir_src_rewrite(&intrin->src[offset_src_idx], replace_src);
assert(off_const <= max);
nir_intrinsic_set_base(intrin, off_const);
@ -165,7 +165,7 @@ try_fold_shared2(nir_builder *b,
return false;
b->cursor = nir_before_instr(&intrin->instr);
nir_instr_rewrite_src(&intrin->instr, off_src, nir_src_for_ssa(nir_imm_zero(b, 1, 32)));
nir_src_rewrite(off_src, nir_imm_zero(b, 1, 32));
nir_intrinsic_set_offset0(intrin, offset0 / stride);
nir_intrinsic_set_offset1(intrin, offset1 / stride);
nir_intrinsic_set_st64(intrin, st64);

View file

@ -342,8 +342,7 @@ nir_opt_collapse_if(nir_if *if_stmt, nir_shader *shader, unsigned limit,
nir_phi_get_src_from_block(nir_instr_as_phi(src->parent_instr),
nir_if_first_else_block(parent_if));
if (phi_src->src.ssa == else_src->src.ssa)
nir_instr_rewrite_src(src->parent_instr, &phi_src->src,
nir_src_for_ssa(&phi->def));
nir_src_rewrite(&phi_src->src, &phi->def);
}
}

View file

@ -141,9 +141,7 @@ nir_opt_rematerialize_compares_impl(nir_shader *shader, nir_function_impl *impl)
nir_alu_instr *const use_alu = nir_instr_as_alu(use_instr);
for (unsigned i = 0; i < nir_op_infos[use_alu->op].num_inputs; i++) {
if (use_alu->src[i].src.ssa == &alu->def) {
nir_instr_rewrite_src(&use_alu->instr,
&use_alu->src[i].src,
nir_src_for_ssa(&clone->def));
nir_src_rewrite(&use_alu->src[i].src, &clone->def);
progress = true;
}
}

View file

@ -49,7 +49,7 @@ opt_shrink_vectors_image_store(nir_builder *b, nir_intrinsic_instr *instr)
return false;
nir_def *data = nir_trim_vector(b, instr->src[3].ssa, components);
nir_instr_rewrite_src(&instr->instr, &instr->src[3], nir_src_for_ssa(data));
nir_src_rewrite(&instr->src[3], data);
instr->num_components = components;
return true;
@ -84,9 +84,7 @@ opt_shrink_store_instr(nir_builder *b, nir_intrinsic_instr *instr, bool shrink_i
unsigned last_bit = util_last_bit(write_mask);
if (last_bit < instr->num_components) {
nir_def *def = nir_trim_vector(b, instr->src[0].ssa, last_bit);
nir_instr_rewrite_src(&instr->instr,
&instr->src[0],
nir_src_for_ssa(def));
nir_src_rewrite(&instr->src[0], def);
instr->num_components = last_bit;
return true;

View file

@ -244,7 +244,7 @@ optimize_atomic(nir_builder *b, nir_intrinsic_instr *intrin, bool return_prev)
nir_def *reduce = NULL, *scan = NULL;
reduce_data(b, op, data, &reduce, combined_scan_reduce ? &scan : NULL);
nir_instr_rewrite_src(&intrin->instr, &intrin->src[data_src], nir_src_for_ssa(reduce));
nir_src_rewrite(&intrin->src[data_src], reduce);
nir_update_instr_divergence(b->shader, &intrin->instr);
nir_def *cond = nir_elect(b, 1);

View file

@ -252,8 +252,7 @@ instr_try_combine(struct set *instr_set, nir_instr *instr1, nir_instr *instr2)
/* For ALU instructions, rewrite the source directly to avoid a
* round-trip through copy propagation.
*/
nir_instr_rewrite_src(user_instr, src,
nir_src_for_ssa(&new_alu->def));
nir_src_rewrite(src, &new_alu->def);
/* Rehash user if it was found in the hashset */
if (entry && entry->key == user_instr) {
@ -268,8 +267,7 @@ instr_try_combine(struct set *instr_set, nir_instr *instr1, nir_instr *instr2)
/* For ALU instructions, rewrite the source directly to avoid a
* round-trip through copy propagation.
*/
nir_instr_rewrite_src(src->parent_instr, src,
nir_src_for_ssa(&new_alu->def));
nir_src_rewrite(src, &new_alu->def);
nir_alu_src *alu_src = container_of(src, nir_alu_src, src);
nir_alu_instr *use = nir_instr_as_alu(src->parent_instr);

View file

@ -136,7 +136,7 @@ repair_ssa_def(nir_def *def, void *void_state)
if (src->is_if)
nir_src_rewrite(&src->parent_if->condition, block_def);
else
nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(block_def));
nir_src_rewrite(src, block_def);
}
return true;

View file

@ -866,8 +866,7 @@ split_array_access_impl(nir_function_impl *impl,
assert(new_deref->type == deref->type);
/* Rewrite the deref source to point to the split one */
nir_instr_rewrite_src(&intrin->instr, &intrin->src[d],
nir_src_for_ssa(&new_deref->def));
nir_src_rewrite(&intrin->src[d], &new_deref->def);
nir_deref_instr_remove_if_unused(deref);
}
}
@ -1615,8 +1614,7 @@ shrink_vec_var_access_impl(nir_function_impl *impl,
nir_swizzle(&b, intrin->src[1].ssa, swizzle, c);
/* Rewrite to use the compacted source */
nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
nir_src_for_ssa(swizzled));
nir_src_rewrite(&intrin->src[1], swizzled);
nir_intrinsic_set_write_mask(intrin, new_write_mask);
intrin->num_components = c;
}

View file

@ -273,7 +273,7 @@ convert_loop_exit_for_ssa(nir_def *def, void *void_state)
}
if (!is_use_inside_loop(use, state->loop)) {
nir_instr_rewrite_src(use->parent_instr, use, nir_src_for_ssa(dest));
nir_src_rewrite(use, dest);
}
}

View file

@ -314,8 +314,8 @@ ir3_nir_lower_array_sampler_cb(struct nir_builder *b, nir_instr *instr, void *_d
assume(ncomp >= 1);
nir_def *ai = nir_channel(b, src, ncomp - 1);
ai = nir_fadd_imm(b, ai, 0.5);
nir_instr_rewrite_src(&tex->instr, &tex->src[coord_idx].src,
nir_src_for_ssa(nir_vector_insert_imm(b, src, ai, ncomp - 1)));
nir_src_rewrite(&tex->src[coord_idx].src,
nir_vector_insert_imm(b, src, ai, ncomp - 1));
return true;
}

View file

@ -559,7 +559,7 @@ fixup_load_uniform_instr(struct nir_builder *b, nir_instr *instr, void *arg)
nir_intrinsic_set_base(intr, new_base_offset);
offset = nir_iadd_imm(b, offset, base_offset - new_base_offset);
nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(offset));
nir_src_rewrite(&intr->src[0], offset);
return NIR_LOWER_INSTR_PROGRESS;
}

View file

@ -246,8 +246,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
/* Replace the last source of the new intrinsic by the result of
* the offset divided by 4.
*/
nir_instr_rewrite_src(&new_intrinsic->instr, target_src,
nir_src_for_ssa(offset));
nir_src_rewrite(target_src, offset);
if (has_dest) {
/* Replace the uses of the original destination by that

View file

@ -59,7 +59,7 @@ lower_multiview_mask(nir_shader *nir, uint32_t *mask)
nir_load_view_index(&b))));
nir_def *src = nir_bcsel(&b, cmp, orig_src, nir_imm_float(&b, 0.));
nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(src));
nir_src_rewrite(&intrin->src[1], src);
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);

View file

@ -540,8 +540,7 @@ lower_tex(nir_builder *b, nir_tex_instr *tex, struct tu_device *dev,
if (sampler_src_idx >= 0) {
nir_deref_instr *deref = nir_src_as_deref(tex->src[sampler_src_idx].src);
nir_def *bindless = build_bindless(dev, b, deref, true, shader, layout);
nir_instr_rewrite_src(&tex->instr, &tex->src[sampler_src_idx].src,
nir_src_for_ssa(bindless));
nir_src_rewrite(&tex->src[sampler_src_idx].src, bindless);
tex->src[sampler_src_idx].src_type = nir_tex_src_sampler_handle;
}
@ -549,8 +548,7 @@ lower_tex(nir_builder *b, nir_tex_instr *tex, struct tu_device *dev,
if (tex_src_idx >= 0) {
nir_deref_instr *deref = nir_src_as_deref(tex->src[tex_src_idx].src);
nir_def *bindless = build_bindless(dev, b, deref, false, shader, layout);
nir_instr_rewrite_src(&tex->instr, &tex->src[tex_src_idx].src,
nir_src_for_ssa(bindless));
nir_src_rewrite(&tex->src[tex_src_idx].src, bindless);
tex->src[tex_src_idx].src_type = nir_tex_src_texture_handle;
/* for the input attachment case: */

View file

@ -229,7 +229,7 @@ lower_aaline_instr(nir_builder *b, nir_instr *instr, void *data)
nir_channel(b, out_input, 1),
nir_channel(b, out_input, 2),
tmp);
nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(out));
nir_src_rewrite(&intrin->src[1], out);
return true;
}
@ -302,7 +302,7 @@ nir_lower_aapoint_block(nir_block *block,
nir_channel(b, out_input, 1),
nir_channel(b, out_input, 2),
tmp);
nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(out));
nir_src_rewrite(&intrin->src[1], out);
}
}

View file

@ -3454,11 +3454,9 @@ nir_to_tgsi_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
nir_intrinsic_set_write_mask(first, nir_intrinsic_write_mask(instr) & 3);
nir_intrinsic_set_write_mask(second, nir_intrinsic_write_mask(instr) >> 2);
nir_instr_rewrite_src(&first->instr, &first->src[0],
nir_src_for_ssa(nir_vec_scalars(b, channels, 2)));
nir_instr_rewrite_src(&second->instr, &second->src[0],
nir_src_for_ssa(nir_vec_scalars(b, &channels[2],
second->num_components)));
nir_src_rewrite(&first->src[0], nir_vec_scalars(b, channels, 2));
nir_src_rewrite(&second->src[0],
nir_vec_scalars(b, &channels[2], second->num_components));
}
int offset_src = -1;
@ -3483,8 +3481,7 @@ nir_to_tgsi_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
b->cursor = nir_before_instr(&second->instr);
nir_def *second_offset =
nir_iadd_imm(b, second->src[offset_src].ssa, offset_amount);
nir_instr_rewrite_src(&second->instr, &second->src[offset_src],
nir_src_for_ssa(second_offset));
nir_src_rewrite(&second->src[offset_src], second_offset);
}
/* DCE stores we generated with no writemask (nothing else does this

View file

@ -671,8 +671,7 @@ crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
if (load->src[0].ssa == temp_ubo_name) {
nir_def *imm = nir_imm_int(&b, sysval_cbuf_index);
nir_instr_rewrite_src(instr, &load->src[0],
nir_src_for_ssa(imm));
nir_src_rewrite(&load->src[0], imm);
}
}
}
@ -786,7 +785,7 @@ rewrite_src_with_bti(nir_builder *b, struct crocus_binding_table *bt,
assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
}
nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
nir_src_rewrite(src, bti);
}
static void

View file

@ -79,7 +79,7 @@ lower_pos_write(nir_builder *b, struct nir_instr *instr, nir_variable **flip)
nir_fmul(b, nir_channel(b, pos, 1), flip_y),
nir_channel(b, pos, 2),
nir_channel(b, pos, 3));
nir_instr_rewrite_src(&intr->instr, intr->src + 1, nir_src_for_ssa(def));
nir_src_rewrite(intr->src + 1, def);
}
void
@ -231,7 +231,7 @@ lower_uint_color_write(nir_builder *b, struct nir_instr *instr, bool is_signed)
def = nir_bcsel(b, nir_ilt_imm(b, def, 0),
nir_iadd_imm(b, def, 1ull << NUM_BITS),
def);
nir_instr_rewrite_src(&intr->instr, intr->src + 1, nir_src_for_ssa(def));
nir_src_rewrite(intr->src + 1, def);
}
void
@ -364,7 +364,7 @@ invert_depth_impl(nir_builder *b, struct invert_depth_state *state)
nir_pop_if(b, NULL);
def = nir_if_phi(b, def, pos);
}
nir_instr_rewrite_src(&intr->instr, intr->src + 1, nir_src_for_ssa(def));
nir_src_rewrite(intr->src + 1, def);
state->viewport_index = NULL;
state->store_pos_instr = NULL;

View file

@ -735,7 +735,7 @@ insert_vec_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
if (!(write_mask & (1 << i)))
continue;
nir_instr_rewrite_src(&vec->instr, &vec->src[i].src, nir_src_for_ssa(&mov->def));
nir_src_rewrite(&vec->src[i].src, &mov->def);
vec->src[i].swizzle[0] = j++;
}
@ -836,7 +836,7 @@ lower_alu(struct etna_compile *c, nir_alu_instr *alu)
if (!cv)
continue;
nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(def));
nir_src_rewrite(&alu->src[i].src, def);
for (unsigned j = 0; j < 4; j++)
alu->src[i].swizzle[j] = swizzle[i][j];
@ -856,7 +856,7 @@ lower_alu(struct etna_compile *c, nir_alu_instr *alu)
continue;
nir_def *mov = nir_mov(&b, alu->src[i].src.ssa);
nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(mov));
nir_src_rewrite(&alu->src[i].src, mov);
}
return;
}
@ -888,7 +888,7 @@ lower_alu(struct etna_compile *c, nir_alu_instr *alu)
if (!cv)
continue;
nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(def));
nir_src_rewrite(&alu->src[i].src, def);
alu->src[i].swizzle[0] = j++;
}
}
@ -1010,7 +1010,7 @@ emit_shader(struct etna_compile *c, unsigned *num_temps, unsigned *num_consts)
deref->var->data.location == FRAG_RESULT_DEPTH &&
src->ssa->parent_instr->type != nir_instr_type_alu)) {
b.cursor = nir_before_instr(instr);
nir_instr_rewrite_src(instr, src, nir_src_for_ssa(nir_mov(&b, src->ssa)));
nir_src_rewrite(src, nir_mov(&b, src->ssa));
}
} break;
default:

View file

@ -74,7 +74,7 @@ etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
alu->src[0].swizzle[0] = 2;
alu->src[0].swizzle[2] = 0;
nir_instr_rewrite_src(instr, &intr->src[1], nir_src_for_ssa(ssa));
nir_src_rewrite(&intr->src[1], ssa);
} break;
case nir_intrinsic_load_vertex_id:
case nir_intrinsic_load_instance_id:
@ -135,7 +135,7 @@ etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
nir_def_init(&vec->instr, &vec->def, 4, 32);
nir_tex_instr_remove_src(tex, src1_idx);
nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(&vec->def));
nir_src_rewrite(coord, &vec->def);
tex->coord_components = 4;
nir_instr_insert_before(&tex->instr, &vec->instr);
@ -168,8 +168,8 @@ etna_lower_alu_impl(nir_function_impl *impl, bool has_new_transcendentals)
nir_imm_float(&b, 1.0 / M_PI) :
nir_imm_float(&b, 2.0 / M_PI);
nir_instr_rewrite_src(instr, &alu->src[0].src,
nir_src_for_ssa(nir_fmul(&b, alu->src[0].src.ssa, imm)));
nir_src_rewrite(&alu->src[0].src,
nir_fmul(&b, alu->src[0].src.ssa, imm));
}
/* change transcendental ops to vec2 and insert vec1 mul for the result

View file

@ -708,8 +708,7 @@ iris_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
if (load->src[0].ssa == temp_ubo_name) {
nir_def *imm = nir_imm_int(&b, sysval_cbuf_index);
nir_instr_rewrite_src(instr, &load->src[0],
nir_src_for_ssa(imm));
nir_src_rewrite(&load->src[0], imm);
}
}
}
@ -854,7 +853,7 @@ rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
}
nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
nir_src_rewrite(src, bti);
}
static void

View file

@ -49,7 +49,7 @@ lima_nir_duplicate_load_const(nir_builder *b, nir_load_const_instr *load)
dupl = last_dupl;
}
nir_instr_rewrite_src(use_src->parent_instr, use_src, nir_src_for_ssa(&dupl->def));
nir_src_rewrite(use_src, &dupl->def);
last_parent_instr = use_src->parent_instr;
last_dupl = dupl;
}

View file

@ -53,7 +53,7 @@ lima_nir_duplicate_intrinsic(nir_builder *b, nir_intrinsic_instr *itr,
dupl = last_dupl;
}
nir_instr_rewrite_src(use_src->parent_instr, use_src, nir_src_for_ssa(&dupl->def));
nir_src_rewrite(use_src, &dupl->def);
last_parent_instr = use_src->parent_instr;
last_dupl = dupl;
}

View file

@ -64,12 +64,12 @@ replace_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin)
_mesa_hash_table_search(visited_instrs, src->parent_instr);
if (entry && (src->parent_instr->type != nir_instr_type_phi)) {
nir_def *def = entry->data;
nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(def));
nir_src_rewrite(src, def);
continue;
}
b->cursor = nir_before_src(src);
nir_def *new = clone_intrinsic(b, intrin);
nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(new));
nir_src_rewrite(src, new);
_mesa_hash_table_insert(visited_instrs, src->parent_instr, new);
}
nir_foreach_if_use_safe(src, &intrin->def) {
@ -92,14 +92,14 @@ replace_load_const(nir_builder *b, nir_load_const_instr *load_const)
_mesa_hash_table_search(visited_instrs, src->parent_instr);
if (entry && (src->parent_instr->type != nir_instr_type_phi)) {
nir_def *def = entry->data;
nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(def));
nir_src_rewrite(src, def);
continue;
}
b->cursor = nir_before_src(src);
nir_def *new = nir_build_imm(b, load_const->def.num_components,
load_const->def.bit_size,
load_const->value);
nir_instr_rewrite_src(src->parent_instr, src, nir_src_for_ssa(new));
nir_src_rewrite(src, new);
_mesa_hash_table_insert(visited_instrs, src->parent_instr, new);
}

View file

@ -104,9 +104,7 @@ r600_nir_lower_scratch_address_impl(nir_builder *b, nir_intrinsic_instr *instr)
nir_def *address = instr->src[address_index].ssa;
nir_def *new_address = nir_ishr_imm(b, address, 4 * align);
nir_instr_rewrite_src(&instr->instr,
&instr->src[address_index],
nir_src_for_ssa(new_address));
nir_src_rewrite(&instr->src[address_index], new_address);
}
bool
@ -417,7 +415,7 @@ r600_lower_deref_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
* opcode.
*/
instr->intrinsic = op;
nir_instr_rewrite_src(&instr->instr, &instr->src[0], nir_src_for_ssa(offset));
nir_src_rewrite(&instr->src[0], offset);
nir_intrinsic_set_base(instr, idx);
nir_intrinsic_set_range_base(instr, var->data.index);

View file

@ -560,10 +560,10 @@ LowerSplit64BitVar::split_store_output(nir_intrinsic_instr *store1)
auto src1 = nir_trim_vector(b, src.ssa, 2);
auto src2 = nir_channels(b, src.ssa, old_components == 3 ? 4 : 0xc);
nir_instr_rewrite_src(&store1->instr, &src, nir_src_for_ssa(src1));
nir_src_rewrite(&src, src1);
nir_intrinsic_set_write_mask(store1, 3);
nir_instr_rewrite_src(&store2->instr, &src, nir_src_for_ssa(src2));
nir_src_rewrite(&src, src2);
nir_intrinsic_set_write_mask(store2, old_components == 3 ? 1 : 3);
sem.num_slots = 1;
@ -1349,13 +1349,9 @@ r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
nir_intrinsic_set_write_mask(first, nir_intrinsic_write_mask(instr) & 3);
nir_intrinsic_set_write_mask(second, nir_intrinsic_write_mask(instr) >> 2);
nir_instr_rewrite_src(&first->instr,
&first->src[0],
nir_src_for_ssa(nir_vec_scalars(b, channels, 2)));
nir_instr_rewrite_src(
&second->instr,
&second->src[0],
nir_src_for_ssa(nir_vec_scalars(b, &channels[2], second->num_components)));
nir_src_rewrite(&first->src[0], nir_vec_scalars(b, channels, 2));
nir_src_rewrite(&second->src[0],
nir_vec_scalars(b, &channels[2], second->num_components));
}
int offset_src = -1;
@ -1381,9 +1377,7 @@ r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
b->cursor = nir_before_instr(&second->instr);
nir_def *second_offset =
nir_iadd_imm(b, second->src[offset_src].ssa, offset_amount);
nir_instr_rewrite_src(&second->instr,
&second->src[offset_src],
nir_src_for_ssa(second_offset));
nir_src_rewrite(&second->src[offset_src], second_offset);
}
/* DCE stores we generated with no writemask (nothing else does this

View file

@ -54,7 +54,7 @@ lower_coord_shift_normalized(nir_builder *b, nir_tex_instr *tex)
tex->src[coord_index].src.ssa);
}
nir_instr_rewrite_src(&tex->instr, &tex->src[coord_index].src, nir_src_for_ssa(corr));
nir_src_rewrite(&tex->src[coord_index].src, corr);
return true;
}
@ -75,7 +75,7 @@ lower_coord_shift_unnormalized(nir_builder *b, nir_tex_instr *tex)
} else {
corr = nir_fadd_imm(b, tex->src[coord_index].src.ssa, -0.5f);
}
nir_instr_rewrite_src(&tex->instr, &tex->src[coord_index].src, nir_src_for_ssa(corr));
nir_src_rewrite(&tex->src[coord_index].src, corr);
return true;
}
@ -281,24 +281,16 @@ r600_nir_lower_cube_to_2darray_impl(nir_builder *b, nir_instr *instr, void *_opt
if (tex->op == nir_texop_txd) {
int ddx_idx = nir_tex_instr_src_index(tex, nir_tex_src_ddx);
nir_instr_rewrite_src(
&tex->instr,
&tex->src[ddx_idx].src,
nir_src_for_ssa(
nir_fmul_imm(b, nir_ssa_for_src(b, tex->src[ddx_idx].src, 3), 0.5)));
nir_src_rewrite(&tex->src[ddx_idx].src,
nir_fmul_imm(b, nir_ssa_for_src(b, tex->src[ddx_idx].src, 3), 0.5));
int ddy_idx = nir_tex_instr_src_index(tex, nir_tex_src_ddy);
nir_instr_rewrite_src(
&tex->instr,
&tex->src[ddy_idx].src,
nir_src_for_ssa(
nir_fmul_imm(b, nir_ssa_for_src(b, tex->src[ddy_idx].src, 3), 0.5)));
nir_src_rewrite(&tex->src[ddy_idx].src,
nir_fmul_imm(b, nir_ssa_for_src(b, tex->src[ddy_idx].src, 3), 0.5));
}
auto new_coord = nir_vec3(b, nir_channel(b, xy, 0), nir_channel(b, xy, 1), z);
nir_instr_rewrite_src(&tex->instr,
&tex->src[coord_idx].src,
nir_src_for_ssa(new_coord));
nir_src_rewrite(&tex->src[coord_idx].src, new_coord);
tex->sampler_dim = GLSL_SAMPLER_DIM_2D;
tex->is_array = true;
tex->array_is_lowered_cube = true;

View file

@ -362,7 +362,7 @@ static bool lower_resource_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin
nir_def_rewrite_uses(&intrin->def, desc);
nir_instr_remove(&intrin->instr);
} else {
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0], nir_src_for_ssa(desc));
nir_src_rewrite(&intrin->src[0], desc);
}
break;
}

View file

@ -299,10 +299,7 @@ lower_uniform_offset_to_bytes_cb(nir_builder *b, nir_instr *instr, void *_state)
b->cursor = nir_before_instr(&intr->instr);
nir_intrinsic_set_base(intr, nir_intrinsic_base(intr) * 16);
nir_instr_rewrite_src(&intr->instr,
&intr->src[0],
nir_src_for_ssa(nir_ishl_imm(b, intr->src[0].ssa,
4)));
nir_src_rewrite(&intr->src[0], nir_ishl_imm(b, intr->src[0].ssa, 4));
return true;
}

View file

@ -555,8 +555,7 @@ vc4_nir_lower_blend_instr(struct vc4_compile *c, nir_builder *b,
blend_output = vc4_nir_blend_pipeline(c, b, frag_color, 0);
}
nir_instr_rewrite_src(&intr->instr, &intr->src[0],
nir_src_for_ssa(blend_output));
nir_src_rewrite(&intr->src[0], blend_output);
if (intr->num_components != blend_output->num_components) {
unsigned component_mask = BITFIELD_MASK(blend_output->num_components);
nir_intrinsic_set_write_mask(intr, component_mask);

View file

@ -71,7 +71,7 @@ try_lower_input_load(nir_intrinsic_instr *load, bool use_fragcoord_sysval)
nir_def *coord =
nir_vec4(&b, nir_channel(&b, pos, 0), nir_channel(&b, pos, 1), layer, nir_imm_int(&b, 0));
nir_instr_rewrite_src(&load->instr, &load->src[1], nir_src_for_ssa(coord));
nir_src_rewrite(&load->src[1], coord);
return true;
}

View file

@ -171,10 +171,10 @@ lower_load_ubo(nir_builder *b, nir_instr *instr, void *data_cb)
b->cursor = nir_before_instr(instr);
nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(nir_imm_int(b, binding.desc_set + 1)));
nir_src_rewrite(&intrin->src[0], nir_imm_int(b, binding.desc_set + 1));
nir_def *offset = nir_iadd_imm(b, intrin->src[1].ssa, bind_layout->uniform_block_offset);
nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(offset));
nir_src_rewrite(&intrin->src[1], offset);
return true;
}
@ -198,8 +198,7 @@ static nir_def *lower_vri_instr(struct nir_builder *b,
/* Ignore the offset component. */
b->cursor = nir_before_instr(instr);
nir_def *resource = nir_ssa_for_src(b, intrin->src[0], 2);
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
nir_src_for_ssa(resource));
nir_src_rewrite(&intrin->src[0], resource);
return NULL;
}
case nir_intrinsic_image_deref_sparse_load:

View file

@ -203,7 +203,7 @@ brw_nir_adjust_task_payload_offsets_instr(struct nir_builder *b,
*/
nir_def *offset = nir_ishr_imm(b, offset_src->ssa, 2);
nir_instr_rewrite_src(&intrin->instr, offset_src, nir_src_for_ssa(offset));
nir_src_rewrite(offset_src, offset);
unsigned base = nir_intrinsic_base(intrin);
assert(base % 4 == 0);
@ -1190,7 +1190,7 @@ brw_nir_adjust_offset(nir_builder *b, nir_intrinsic_instr *intrin, uint32_t pitc
nir_iadd(b,
offset_src->ssa,
nir_imul_imm(b, index_src->ssa, pitch));
nir_instr_rewrite_src(&intrin->instr, offset_src, nir_src_for_ssa(offset));
nir_src_rewrite(offset_src, offset);
}
static bool

View file

@ -153,8 +153,7 @@ remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
nir_intrinsic_set_write_mask(intr, mask);
if (src) {
nir_instr_rewrite_src(&intr->instr, &intr->src[0],
nir_src_for_ssa(src));
nir_src_rewrite(&intr->src[0], src);
}
} else if (dest) {
nir_def_rewrite_uses_after(&intr->def, dest,
@ -226,8 +225,7 @@ remap_patch_urb_offsets(nir_block *block, nir_builder *b,
nir_iadd(b, vertex_offset,
nir_ssa_for_src(b, *offset, 1));
nir_instr_rewrite_src(&intrin->instr, offset,
nir_src_for_ssa(total_offset));
nir_src_rewrite(offset, total_offset);
}
}
}
@ -494,7 +492,7 @@ lower_barycentric_at_offset(nir_builder *b, nir_instr *instr, void *data)
nir_imin(b, nir_imm_int(b, 7),
nir_f2i32(b, nir_fmul_imm(b, intrin->src[0].ssa, 16)));
nir_instr_rewrite_src(instr, &intrin->src[0], nir_src_for_ssa(offset));
nir_src_rewrite(&intrin->src[0], offset);
return true;
}

View file

@ -182,9 +182,7 @@ brw_nir_lower_alpha_to_coverage(nir_shader *shader,
dither_mask, sample_mask_write->src[0].ssa);
}
nir_instr_rewrite_src(&sample_mask_write->instr,
&sample_mask_write->src[0],
nir_src_for_ssa(dither_mask));
nir_src_rewrite(&sample_mask_write->src[0], dither_mask);
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);

View file

@ -83,8 +83,7 @@ lower_shading_rate_output_instr(nir_builder *b, nir_instr *instr,
nir_iand_imm(b, bit_field, 0x3)));
nir_def *packed_fp16_xy = nir_pack_32_2x16_split(b, fp16_x, fp16_y);
nir_instr_rewrite_src(instr, &intrin->src[0],
nir_src_for_ssa(packed_fp16_xy));
nir_src_rewrite(&intrin->src[0], packed_fp16_xy);
} else {
nir_def *packed_fp16_xy = &intrin->def;

View file

@ -569,8 +569,7 @@ lower_image_store_instr(nir_builder *b,
intrin->src[3].ssa,
image_fmt, lower_fmt);
intrin->num_components = isl_format_get_num_channels(lower_fmt);
nir_instr_rewrite_src(&intrin->instr, &intrin->src[3],
nir_src_for_ssa(color));
nir_src_rewrite(&intrin->src[3], color);
} else {
const struct isl_format_layout *image_fmtl =
isl_format_get_layout(image_fmt);

View file

@ -43,8 +43,7 @@ resize_deref(nir_builder *b, nir_deref_instr *deref,
} else {
idx = nir_i2iN(b, deref->arr.index.ssa, bit_size);
}
nir_instr_rewrite_src(&deref->instr, &deref->arr.index,
nir_src_for_ssa(idx));
nir_src_rewrite(&deref->arr.index, idx);
}
deref->def.num_components = num_components;

View file

@ -1298,8 +1298,7 @@ lower_direct_buffer_instr(nir_builder *b, nir_instr *instr, void *_state)
non_uniform,
state);
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
nir_src_for_ssa(surface_index));
nir_src_rewrite(&intrin->src[0], surface_index);
_mesa_set_add(state->lowered_instrs, intrin);
return true;
}
@ -1589,8 +1588,7 @@ lower_tex_deref(nir_builder *b, nir_tex_instr *tex,
nir_tex_src_sampler_offset;
}
nir_instr_rewrite_src(&tex->instr, &tex->src[deref_src_idx].src,
nir_src_for_ssa(index));
nir_src_rewrite(&tex->src[deref_src_idx].src, index);
tex->src[deref_src_idx].src_type = offset_src_type;
}

View file

@ -833,8 +833,7 @@ lower_get_ssbo_size(nir_builder *b, nir_intrinsic_instr *intrin,
/* The binding table index is the first component of the address. The
* back-end wants a scalar binding table index source.
*/
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
nir_src_for_ssa(nir_channel(b, desc, 0)));
nir_src_rewrite(&intrin->src[0], nir_channel(b, desc, 0));
break;
default:
@ -1043,8 +1042,7 @@ lower_tex_deref(nir_builder *b, nir_tex_instr *tex,
}
if (index) {
nir_instr_rewrite_src(&tex->instr, &tex->src[deref_src_idx].src,
nir_src_for_ssa(index));
nir_src_rewrite(&tex->src[deref_src_idx].src, index);
tex->src[deref_src_idx].src_type = offset_src_type;
} else {
nir_tex_instr_remove_src(tex, deref_src_idx);

View file

@ -147,12 +147,8 @@ lower_tex_src_plane_block(nir_builder *b, lower_tex_src_state *state, nir_block
nir_deref_instr *tex_deref_instr = nir_build_deref_var(b, samp);
nir_def *tex_deref = &tex_deref_instr->def;
nir_instr_rewrite_src(&tex->instr,
&tex->src[tex_index].src,
nir_src_for_ssa(tex_deref));
nir_instr_rewrite_src(&tex->instr,
&tex->src[samp_index].src,
nir_src_for_ssa(tex_deref));
nir_src_rewrite(&tex->src[tex_index].src, tex_deref);
nir_src_rewrite(&tex->src[samp_index].src, tex_deref);
} else {
/* For others we need to update texture_index */
assume(tex->texture_index == tex->sampler_index);

View file

@ -384,9 +384,7 @@ clc_lower_nonnormalized_samplers(nir_shader *nir,
comps[i] = nir_fmul(&b, comps[i], nir_channel(&b, scale, i));
}
nir_def *normalized_coords = nir_vec(&b, comps, coords->num_components);
nir_instr_rewrite_src(&tex->instr,
&tex->src[coords_idx].src,
nir_src_for_ssa(normalized_coords));
nir_src_rewrite(&tex->src[coords_idx].src, normalized_coords);
}
}
}

View file

@ -784,7 +784,7 @@ lower_alu_deref_srcs(nir_builder *b, nir_alu_instr *alu)
nir_def *ptr =
nir_iadd(b, root_deref->parent.ssa,
nir_build_deref_offset(b, deref, cl_type_size_align));
nir_instr_rewrite_src(&alu->instr, &alu->src[i].src, nir_src_for_ssa(ptr));
nir_src_rewrite(&alu->src[i].src, ptr);
progress = true;
}

View file

@ -475,7 +475,7 @@ lower_yz_flip(struct nir_builder *builder, nir_instr *instr,
y_pos,
z_pos,
nir_channel(builder, pos, 3));
nir_instr_rewrite_src(&intrin->instr, &intrin->src[1], nir_src_for_ssa(def));
nir_src_rewrite(&intrin->src[1], def);
return true;
}

View file

@ -73,8 +73,7 @@ nir_lod_errata_instr(nir_builder *b, nir_instr *instr, void *data)
nir_def *biased = nir_fadd(b, lod, lod_bias);
nir_def *clamped = nir_fmin(b, nir_fmax(b, biased, min_lod), max_lod);
nir_instr_rewrite_src(&tex->instr, &tex->src[i].src,
nir_src_for_ssa(clamped));
nir_src_rewrite(&tex->src[i].src, clamped);
}
return true;

View file

@ -55,7 +55,7 @@ nir_lower_image_bitsize(nir_builder *b, nir_instr *instr, UNUSED void *data)
nir_def *coord16 = nir_u2u16(b, coord);
nir_instr_rewrite_src(instr, &intr->src[1], nir_src_for_ssa(coord16));
nir_src_rewrite(&intr->src[1], coord16);
return true;
}