nir: replace use of nir_src_copy()

Since 03b2c34793 nir_src_copy() no longer does anything useful,
it will be removed in the following patch.

Reviewed-by: Emma Anholt <emma@anholt.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24986>
This commit is contained in:
Timothy Arceri 2023-09-06 13:56:09 +10:00 committed by Marge Bot
parent fd297ecf98
commit af1528cc15
25 changed files with 56 additions and 64 deletions

View file

@ -398,7 +398,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
new_tex->texture_index = tex->texture_index;
new_tex->sampler_index = tex->sampler_index;
new_tex->dest_type = nir_type_int32;
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src = nir_src_for_ssa(tex->src[i].src.ssa);
new_tex->src[0].src_type = tex->src[i].src_type;
nir_def_init(&new_tex->instr, &new_tex->def,
nir_tex_instr_dest_size(new_tex), 32);
@ -417,7 +417,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
new_tex->texture_index = tex->texture_index;
new_tex->sampler_index = tex->sampler_index;
new_tex->dest_type = nir_type_int32;
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src = nir_src_for_ssa(tex->src[i].src.ssa);
new_tex->src[0].src_type = tex->src[i].src_type;
nir_def_init(&new_tex->instr, &new_tex->def,
nir_tex_instr_dest_size(new_tex), 32);

View file

@ -286,7 +286,7 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data)
new_tex->texture_index = tex->texture_index;
new_tex->sampler_index = tex->sampler_index;
new_tex->dest_type = nir_type_int32;
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src = nir_src_for_ssa(tex->src[i].src.ssa);
new_tex->src[0].src_type = tex->src[i].src_type;
nir_def_init(&new_tex->instr, &new_tex->def,
nir_tex_instr_dest_size(new_tex), 32);

View file

@ -1673,9 +1673,7 @@ nir_visitor::visit(ir_call *ir)
call->params[i] = nir_src_for_ssa(&out_param_deref->def);
} else if (sig_param->data.mode == ir_var_function_in) {
nir_def *val = evaluate_rvalue(param_rvalue);
nir_src src = nir_src_for_ssa(val);
nir_src_copy(&call->params[i], &src, &call->instr);
call->params[i] = nir_src_for_ssa(val);
} else if (sig_param->data.mode == ir_var_function_inout) {
unreachable("unimplemented: inout parameters");
}

View file

@ -508,7 +508,7 @@ void
nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
nir_alu_instr *instr)
{
nir_src_copy(&dest->src, &src->src, instr ? &instr->instr : NULL);
dest->src = nir_src_for_ssa(src->src.ssa);
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
dest->swizzle[i] = src->swizzle[i];
}

View file

@ -365,7 +365,7 @@ nir_get_texture_size(nir_builder *b, nir_tex_instr *tex)
tex->src[i].src_type == nir_tex_src_sampler_offset ||
tex->src[i].src_type == nir_tex_src_texture_handle ||
tex->src[i].src_type == nir_tex_src_sampler_handle) {
nir_src_copy(&txs->src[idx].src, &tex->src[i].src, &txs->instr);
txs->src[idx].src = nir_src_for_ssa(tex->src[i].src.ssa);
txs->src[idx].src_type = tex->src[i].src_type;
idx++;
}
@ -418,7 +418,7 @@ nir_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
tex->src[i].src_type == nir_tex_src_sampler_offset ||
tex->src[i].src_type == nir_tex_src_texture_handle ||
tex->src[i].src_type == nir_tex_src_sampler_handle) {
nir_src_copy(&tql->src[idx].src, &tex->src[i].src, &tql->instr);
tql->src[idx].src = nir_src_for_ssa(tex->src[i].src.ssa);
tql->src[idx].src_type = tex->src[i].src_type;
idx++;
}

View file

@ -776,7 +776,7 @@ rematerialize_deref_in_block(nir_deref_instr *deref,
parent = rematerialize_deref_in_block(parent, state);
new_deref->parent = nir_src_for_ssa(&parent->def);
} else {
nir_src_copy(&new_deref->parent, &deref->parent, &new_deref->instr);
new_deref->parent = nir_src_for_ssa(deref->parent.ssa);
}
}
@ -795,7 +795,7 @@ rematerialize_deref_in_block(nir_deref_instr *deref,
case nir_deref_type_array:
case nir_deref_type_ptr_as_array:
assert(!nir_src_as_deref(deref->arr.index));
nir_src_copy(&new_deref->arr.index, &deref->arr.index, &new_deref->instr);
new_deref->arr.index = nir_src_for_ssa(deref->arr.index.ssa);
break;
case nir_deref_type_struct:

View file

@ -112,7 +112,7 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b, un
/* remapped to ssbo_atomic_add: { buffer_idx, offset, +1 } */
temp = nir_imm_int(b, +1);
new_instr->src[0] = nir_src_for_ssa(buffer);
nir_src_copy(&new_instr->src[1], &instr->src[0], &new_instr->instr);
new_instr->src[1] = nir_src_for_ssa(instr->src[0].ssa);
new_instr->src[2] = nir_src_for_ssa(temp);
break;
case nir_intrinsic_atomic_counter_pre_dec:
@ -121,21 +121,21 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b, un
/* NOTE semantic difference so we adjust the return value below */
temp = nir_imm_int(b, -1);
new_instr->src[0] = nir_src_for_ssa(buffer);
nir_src_copy(&new_instr->src[1], &instr->src[0], &new_instr->instr);
new_instr->src[1] = nir_src_for_ssa(instr->src[0].ssa);
new_instr->src[2] = nir_src_for_ssa(temp);
break;
case nir_intrinsic_atomic_counter_read:
/* remapped to load_ssbo: { buffer_idx, offset } */
new_instr->src[0] = nir_src_for_ssa(buffer);
nir_src_copy(&new_instr->src[1], &instr->src[0], &new_instr->instr);
new_instr->src[1] = nir_src_for_ssa(instr->src[0].ssa);
break;
default:
/* remapped to ssbo_atomic_x: { buffer_idx, offset, data, (compare)? } */
new_instr->src[0] = nir_src_for_ssa(buffer);
nir_src_copy(&new_instr->src[1], &instr->src[0], &new_instr->instr);
nir_src_copy(&new_instr->src[2], &instr->src[1], &new_instr->instr);
new_instr->src[1] = nir_src_for_ssa(instr->src[0].ssa);
new_instr->src[2] = nir_src_for_ssa(instr->src[1].ssa);
if (op == nir_intrinsic_ssbo_atomic_swap)
nir_src_copy(&new_instr->src[3], &instr->src[2], &new_instr->instr);
new_instr->src[3] = nir_src_for_ssa(instr->src[2].ssa);
break;
}

View file

@ -98,7 +98,7 @@ emit_load_store_deref(nir_builder *b, nir_intrinsic_instr *orig_instr,
/* Copy over any other sources. This is needed for interp_deref_at */
for (unsigned i = 1;
i < nir_intrinsic_infos[orig_instr->intrinsic].num_srcs; i++)
nir_src_copy(&load->src[i], &orig_instr->src[i], &load->instr);
load->src[i] = nir_src_for_ssa(orig_instr->src[i].ssa);
nir_def_init(&load->instr, &load->def,
orig_instr->def.num_components,

View file

@ -563,7 +563,7 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample ||
intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex)
nir_src_copy(&bary_setup->src[0], &intrin->src[1], &bary_setup->instr);
bary_setup->src[0] = nir_src_for_ssa(intrin->src[1].ssa);
nir_builder_instr_insert(b, &bary_setup->instr);

View file

@ -177,8 +177,7 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var,
if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset ||
intr->intrinsic == nir_intrinsic_interp_deref_at_sample ||
intr->intrinsic == nir_intrinsic_interp_deref_at_vertex) {
nir_src_copy(&element_intr->src[1], &intr->src[1],
&element_intr->instr);
element_intr->src[1] = nir_src_for_ssa(intr->src[1].ssa);
}
nir_def_rewrite_uses(&intr->def,
@ -186,8 +185,7 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var,
} else {
nir_intrinsic_set_write_mask(element_intr,
nir_intrinsic_write_mask(intr));
nir_src_copy(&element_intr->src[1], &intr->src[1],
&element_intr->instr);
element_intr->src[1] = nir_src_for_ssa(intr->src[1].ssa);
}
nir_builder_instr_insert(b, &element_intr->instr);

View file

@ -63,12 +63,11 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
set_io_semantics(chan_intr, intr, i);
/* offset and vertex (if needed) */
for (unsigned j = 0; j < nir_intrinsic_infos[intr->intrinsic].num_srcs; ++j)
nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr);
chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
if (newc + newi > 3) {
nir_src *src = nir_get_io_offset_src(chan_intr);
nir_def *offset = nir_iadd_imm(b, src->ssa, (newc + newi) / 4);
nir_src new_src = nir_src_for_ssa(offset);
nir_src_copy(src, &new_src, &chan_intr->instr);
*src = nir_src_for_ssa(offset);
}
nir_builder_instr_insert(b, &chan_intr->instr);
@ -110,7 +109,7 @@ lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
if (nir_intrinsic_has_base(intr))
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
for (unsigned j = 0; j < nir_intrinsic_infos[intr->intrinsic].num_srcs - 1; j++)
nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr);
chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
/* increment offset per component */
nir_def *offset = nir_iadd_imm(b, base_offset, i * (intr->def.bit_size / 8));
@ -178,12 +177,11 @@ lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
chan_intr->src[0] = nir_src_for_ssa(nir_channel(b, value, i));
/* offset and vertex (if needed) */
for (unsigned j = 1; j < nir_intrinsic_infos[intr->intrinsic].num_srcs; ++j)
nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr);
chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
if (newc + newi > 3) {
nir_src *src = nir_get_io_offset_src(chan_intr);
nir_def *offset = nir_iadd_imm(b, src->ssa, (newc + newi) / 4);
nir_src new_src = nir_src_for_ssa(offset);
nir_src_copy(src, &new_src, &chan_intr->instr);
*src = nir_src_for_ssa(offset);
}
nir_builder_instr_insert(b, &chan_intr->instr);
@ -220,7 +218,7 @@ lower_store_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
/* value */
chan_intr->src[0] = nir_src_for_ssa(nir_channel(b, value, i));
for (unsigned j = 1; j < nir_intrinsic_infos[intr->intrinsic].num_srcs - 1; j++)
nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr);
chan_intr->src[j] = nir_src_for_ssa(intr->src[j].ssa);
/* increment offset per component */
nir_def *offset = nir_iadd_imm(b, base_offset, i * (value->bit_size / 8));
@ -393,7 +391,7 @@ lower_load_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr,
if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset ||
intr->intrinsic == nir_intrinsic_interp_deref_at_sample ||
intr->intrinsic == nir_intrinsic_interp_deref_at_vertex)
nir_src_copy(&chan_intr->src[1], &intr->src[1], &chan_intr->instr);
chan_intr->src[1] = nir_src_for_ssa(intr->src[1].ssa);
nir_builder_instr_insert(b, &chan_intr->instr);

View file

@ -216,7 +216,7 @@ lower_phis_to_scalar_block(nir_block *block,
nir_alu_instr *mov = nir_alu_instr_create(state->shader,
nir_op_mov);
nir_def_init(&mov->instr, &mov->def, 1, bit_size);
nir_src_copy(&mov->src[0].src, &src->src, &mov->instr);
mov->src[0].src = nir_src_for_ssa(src->src.ssa);
mov->src[0].swizzle[0] = i;
/* Insert at the end of the predecessor but before the jump */

View file

@ -65,7 +65,7 @@ nir_load_ssbo_prop(nir_builder *b, nir_intrinsic_op op,
{
nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
load->num_components = 1;
nir_src_copy(&load->src[0], idx, &load->instr);
load->src[0] = nir_src_for_ssa(idx->ssa);
nir_def_init(&load->instr, &load->def, 1, bitsize);
nir_builder_instr_insert(b, &load->instr);
return &load->def;
@ -112,16 +112,16 @@ lower_ssbo_instr(nir_builder *b, nir_intrinsic_instr *intr)
}
if (is_store) {
nir_src_copy(&global->src[0], &intr->src[0], &global->instr);
global->src[0] = nir_src_for_ssa(intr->src[0].ssa);
nir_intrinsic_set_write_mask(global, nir_intrinsic_write_mask(intr));
} else {
nir_def_init(&global->instr, &global->def,
intr->def.num_components, intr->def.bit_size);
if (is_atomic) {
nir_src_copy(&global->src[1], &intr->src[2], &global->instr);
global->src[1] = nir_src_for_ssa(intr->src[2].ssa);
if (nir_intrinsic_infos[op].num_srcs > 2)
nir_src_copy(&global->src[2], &intr->src[3], &global->instr);
global->src[2] = nir_src_for_ssa(intr->src[3].ssa);
}
}

View file

@ -45,7 +45,7 @@ lower_subgroups_64bit_split_intrinsic(nir_builder *b, nir_intrinsic_instr *intri
intr->const_index[1] = intrin->const_index[1];
intr->src[0] = nir_src_for_ssa(comp);
if (nir_intrinsic_infos[intrin->intrinsic].num_srcs == 2)
nir_src_copy(&intr->src[1], &intrin->src[1], &intr->instr);
intr->src[1] = nir_src_for_ssa(intrin->src[1].ssa);
intr->num_components = 1;
nir_builder_instr_insert(b, &intr->instr);
@ -126,7 +126,7 @@ lower_subgroup_op_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin,
/* invocation */
if (nir_intrinsic_infos[intrin->intrinsic].num_srcs > 1) {
assert(nir_intrinsic_infos[intrin->intrinsic].num_srcs == 2);
nir_src_copy(&chan_intrin->src[1], &intrin->src[1], &chan_intrin->instr);
chan_intrin->src[1] = nir_src_for_ssa(intrin->src[1].ssa);
}
chan_intrin->const_index[0] = intrin->const_index[0];
@ -207,7 +207,7 @@ lower_shuffle_to_swizzle(nir_builder *b, nir_intrinsic_instr *intrin,
nir_intrinsic_instr *swizzle = nir_intrinsic_instr_create(
b->shader, nir_intrinsic_masked_swizzle_amd);
swizzle->num_components = intrin->num_components;
nir_src_copy(&swizzle->src[0], &intrin->src[0], &swizzle->instr);
swizzle->src[0] = nir_src_for_ssa(intrin->src[0].ssa);
nir_intrinsic_set_swizzle_mask(swizzle, (mask << 10) | 0x1f);
nir_def_init(&swizzle->instr, &swizzle->def,
intrin->def.num_components, intrin->def.bit_size);
@ -297,7 +297,7 @@ lower_to_shuffle(nir_builder *b, nir_intrinsic_instr *intrin,
nir_intrinsic_instr *shuffle =
nir_intrinsic_instr_create(b->shader, nir_intrinsic_shuffle);
shuffle->num_components = intrin->num_components;
nir_src_copy(&shuffle->src[0], &intrin->src[0], &shuffle->instr);
shuffle->src[0] = nir_src_for_ssa(intrin->src[0].ssa);
shuffle->src[1] = nir_src_for_ssa(index);
nir_def_init(&shuffle->instr, &shuffle->def,
intrin->def.num_components, intrin->def.bit_size);
@ -581,7 +581,7 @@ lower_dynamic_quad_broadcast(nir_builder *b, nir_intrinsic_instr *intrin,
qbcst->num_components = intrin->num_components;
qbcst->src[1] = nir_src_for_ssa(nir_imm_int(b, i));
nir_src_copy(&qbcst->src[0], &intrin->src[0], &qbcst->instr);
qbcst->src[0] = nir_src_for_ssa(intrin->src[0].ssa);
nir_def_init(&qbcst->instr, &qbcst->def,
intrin->def.num_components, intrin->def.bit_size);

View file

@ -301,7 +301,7 @@ sample_plane(nir_builder *b, nir_tex_instr *tex, int plane,
nir_tex_instr *plane_tex =
nir_tex_instr_create(b->shader, tex->num_srcs + 1);
for (unsigned i = 0; i < tex->num_srcs; i++) {
nir_src_copy(&plane_tex->src[i].src, &tex->src[i].src, &plane_tex->instr);
plane_tex->src[i].src = nir_src_for_ssa(tex->src[i].src.ssa);
plane_tex->src[i].src_type = tex->src[i].src_type;
}
plane_tex->src[tex->num_srcs] = nir_tex_src_for_ssa(nir_tex_src_plane,
@ -869,7 +869,7 @@ lower_tex_to_txd(nir_builder *b, nir_tex_instr *tex)
/* reuse existing srcs */
for (unsigned i = 0; i < tex->num_srcs; i++) {
nir_src_copy(&txd->src[i].src, &tex->src[i].src, &txd->instr);
txd->src[i].src = nir_src_for_ssa(tex->src[i].src.ssa);
txd->src[i].src_type = tex->src[i].src_type;
}
int coord = nir_tex_instr_src_index(tex, nir_tex_src_coord);
@ -908,7 +908,7 @@ lower_txb_to_txl(nir_builder *b, nir_tex_instr *tex)
/* reuse all but bias src */
for (int i = 0; i < 2; i++) {
if (tex->src[i].src_type != nir_tex_src_bias) {
nir_src_copy(&txl->src[i].src, &tex->src[i].src, &txl->instr);
txl->src[i].src = nir_src_for_ssa(tex->src[i].src.ssa);
txl->src[i].src_type = tex->src[i].src_type;
}
}
@ -1201,7 +1201,7 @@ lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex)
tex_copy->backend_flags = tex->backend_flags;
for (unsigned j = 0; j < tex->num_srcs; ++j) {
nir_src_copy(&tex_copy->src[j].src, &tex->src[j].src, &tex_copy->instr);
tex_copy->src[j].src = nir_src_for_ssa(tex->src[j].src.ssa);
tex_copy->src[j].src_type = tex->src[j].src_type;
}

View file

@ -444,7 +444,7 @@ nir_opt_peephole_select_block(nir_block *block, nir_shader *shader,
nir_foreach_phi_safe(phi, block) {
nir_alu_instr *sel = nir_alu_instr_create(shader, nir_op_bcsel);
nir_src_copy(&sel->src[0].src, &if_stmt->condition, &sel->instr);
sel->src[0].src = nir_src_for_ssa(if_stmt->condition.ssa);
/* Splat the condition to all channels */
memset(sel->src[0].swizzle, 0, sizeof sel->src[0].swizzle);
@ -453,7 +453,7 @@ nir_opt_peephole_select_block(nir_block *block, nir_shader *shader,
assert(src->pred == then_block || src->pred == else_block);
unsigned idx = src->pred == then_block ? 1 : 2;
nir_src_copy(&sel->src[idx].src, &src->src, &sel->instr);
sel->src[idx].src = nir_src_for_ssa(src->src.ssa);
}
nir_def_init(&sel->instr, &sel->def,

View file

@ -77,7 +77,7 @@ lima_nir_split_load_input_instr(nir_builder *b,
nir_intrinsic_set_dest_type(new_intrin, nir_intrinsic_dest_type(intrin));
/* offset */
nir_src_copy(&new_intrin->src[0], &intrin->src[0], &new_intrin->instr);
new_intrin->src[0] = nir_src_for_ssa(intrin->src[0].ssa);
nir_builder_instr_insert(b, &new_intrin->instr);
nir_def_rewrite_uses(&alu->def,

View file

@ -158,7 +158,7 @@ r600_create_new_load(nir_builder *b,
if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset ||
intr->intrinsic == nir_intrinsic_interp_deref_at_sample)
nir_src_copy(&new_intr->src[1], &intr->src[1], &new_intr->instr);
new_intr->src[1] = nir_src_for_ssa(intr->src[1].ssa);
nir_builder_instr_insert(b, &new_intr->instr);

View file

@ -1587,11 +1587,11 @@ lower_txf_lod_robustness_instr(nir_builder *b, nir_instr *in, void *data)
levels->dest_type = nir_type_int | lod->bit_size;
if (offset_idx >= 0) {
levels->src[0].src_type = nir_tex_src_texture_offset;
nir_src_copy(&levels->src[0].src, &txf->src[offset_idx].src, &levels->instr);
levels->src[0].src = nir_src_for_ssa(txf->src[offset_idx].src.ssa);
}
if (handle_idx >= 0) {
levels->src[!!(offset_idx >= 0)].src_type = nir_tex_src_texture_handle;
nir_src_copy(&levels->src[!!(offset_idx >= 0)].src, &txf->src[handle_idx].src, &levels->instr);
levels->src[!!(offset_idx >= 0)].src = nir_src_for_ssa(txf->src[handle_idx].src.ssa);
}
nir_def_init(&levels->instr, &levels->def,
nir_tex_instr_dest_size(levels), 32);
@ -2207,7 +2207,7 @@ rewrite_atomic_ssbo_instr(nir_builder *b, nir_instr *instr, struct bo_vars *bo)
new_instr->src[0] = nir_src_for_ssa(&deref_arr->def);
/* deref ops have no offset src, so copy the srcs after it */
for (unsigned i = 2; i < nir_intrinsic_infos[intr->intrinsic].num_srcs; i++)
nir_src_copy(&new_instr->src[i - 1], &intr->src[i], &new_instr->instr);
new_instr->src[i - 1] = nir_src_for_ssa(intr->src[i].ssa);
nir_builder_instr_insert(b, &new_instr->instr);
result[i] = &new_instr->def;

View file

@ -172,7 +172,7 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_def *coor
nir_tex_instr_src_size(array_tex, s));
array_tex->src[s].src = nir_src_for_ssa(c);
} else
nir_src_copy(&array_tex->src[s].src, psrc, &array_tex->instr);
array_tex->src[s].src = nir_src_for_ssa(psrc->ssa);
s++;
}
@ -434,7 +434,7 @@ lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex)
for (int i = 0; i < tex->num_srcs; i++) {
if (i == bias_idx)
continue;
nir_src_copy(&txl->src[s].src, &tex->src[i].src, &txl->instr);
txl->src[s].src = nir_src_for_ssa(tex->src[i].src.ssa);
txl->src[s].src_type = tex->src[i].src_type;
s++;
}

View file

@ -132,7 +132,7 @@ create_plane_tex_instr_implicit(struct ycbcr_state *state,
}
FALLTHROUGH;
default:
nir_src_copy(&tex->src[i].src, &old_tex->src[i].src, &tex->instr);
tex->src[i].src = nir_src_for_ssa(old_tex->src[i].src.ssa);
break;
}
}

View file

@ -184,7 +184,7 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_def *coor
nir_src *psrc = (tex->src[i].src_type == nir_tex_src_coord) ?
&coord_src : &tex->src[i].src;
nir_src_copy(&array_tex->src[i].src, psrc, &array_tex->instr);
array_tex->src[i].src = nir_src_for_ssa(psrc->ssa);
array_tex->src[i].src_type = tex->src[i].src_type;
}

View file

@ -87,8 +87,7 @@ dx_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
int coord_index = nir_tex_instr_src_index(tex, nir_tex_src_coord);
nir_def *ssa_src = nir_trim_vector(b, tex->src[coord_index].src.ssa,
coord_components);
nir_src src = nir_src_for_ssa(ssa_src);
nir_src_copy(&tql->src[0].src, &src, &tql->instr);
tql->src[0].src = nir_src_for_ssa(ssa_src);
tql->src[0].src_type = nir_tex_src_coord;
unsigned idx = 1;
@ -99,7 +98,7 @@ dx_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
tex->src[i].src_type == nir_tex_src_sampler_offset ||
tex->src[i].src_type == nir_tex_src_texture_handle ||
tex->src[i].src_type == nir_tex_src_sampler_handle) {
nir_src_copy(&tql->src[idx].src, &tex->src[i].src, &tql->instr);
tql->src[idx].src = nir_src_for_ssa(tex->src[i].src.ssa);
tql->src[idx].src_type = tex->src[i].src_type;
idx++;
}
@ -290,7 +289,7 @@ create_txf_from_tex(nir_builder *b, nir_tex_instr *tex)
if (tex->src[i].src_type == nir_tex_src_texture_deref ||
tex->src[i].src_type == nir_tex_src_texture_offset ||
tex->src[i].src_type == nir_tex_src_texture_handle) {
nir_src_copy(&txf->src[idx].src, &tex->src[i].src, &txf->instr);
txf->src[idx].src = nir_src_for_ssa(tex->src[i].src.ssa);
txf->src[idx].src_type = tex->src[i].src_type;
idx++;
}

View file

@ -52,8 +52,7 @@ nir_lod_errata_instr(nir_builder *b, nir_instr *instr, void *data)
nir_def_init(&l->instr, &l->def, 3, 32);
/* TODO: Indirect samplers, separate sampler objects XXX */
nir_src idx = nir_src_for_ssa(nir_imm_int(b, tex->texture_index));
nir_src_copy(&l->src[0], &idx, &l->instr);
l->src[0] = nir_src_for_ssa(nir_imm_int(b, tex->texture_index));
nir_builder_instr_insert(b, &l->instr);
nir_def *params = &l->def;

View file

@ -252,7 +252,7 @@ create_plane_tex_instr_implicit(struct ycbcr_state *state,
}
FALLTHROUGH;
default:
nir_src_copy(&tex->src[i].src, &old_tex->src[i].src, &tex->instr);
tex->src[i].src = nir_src_for_ssa(old_tex->src[i].src.ssa);
break;
}
}