From aa2d6e020b138ebcf86ee091268dd5193babc832 Mon Sep 17 00:00:00 2001 From: Rhys Perry Date: Wed, 8 Sep 2021 11:37:07 +0100 Subject: [PATCH] Revert "nir: Drop the unused instr arg for src/dest copy functions." This reverts commit c3a01841184ee8303c0c5ebe58491301622c5ad6. Acked-by: Alyssa Rosenzweig Reviewed-by: Jason Ekstrand Reviewed-by: Emma Anholt Part-of: --- src/amd/common/ac_nir_lower_resinfo.c | 2 +- .../vulkan/radv_nir_lower_ycbcr_textures.c | 2 +- src/compiler/glsl/glsl_to_nir.cpp | 2 +- src/compiler/nir/nir.c | 24 ++++++++++--------- src/compiler/nir/nir.h | 10 ++++---- src/compiler/nir/nir_builtin_builder.c | 4 ++-- src/compiler/nir/nir_deref.c | 4 ++-- src/compiler/nir/nir_from_ssa.c | 2 +- src/compiler/nir/nir_lower_alu_width.c | 8 +++---- src/compiler/nir/nir_lower_atomics_to_ssbo.c | 12 +++++----- src/compiler/nir/nir_lower_bit_size.c | 2 +- src/compiler/nir/nir_lower_indirect_derefs.c | 2 +- src/compiler/nir/nir_lower_io.c | 2 +- .../nir/nir_lower_io_arrays_to_elements.c | 6 +++-- src/compiler/nir/nir_lower_io_to_scalar.c | 10 ++++---- src/compiler/nir/nir_lower_locals_to_regs.c | 4 ++-- src/compiler/nir/nir_lower_phis_to_scalar.c | 2 +- src/compiler/nir/nir_lower_ssbo.c | 8 +++---- src/compiler/nir/nir_lower_subgroups.c | 10 ++++---- src/compiler/nir/nir_lower_tex.c | 8 +++---- src/compiler/nir/nir_lower_vec_to_movs.c | 4 ++-- src/compiler/nir/nir_opt_peephole_select.c | 4 ++-- src/compiler/nir/nir_opt_undef.c | 3 ++- src/compiler/nir/nir_search.c | 3 ++- .../drivers/etnaviv/etnaviv_compiler_nir.c | 2 +- .../lima/ir/lima_nir_split_load_input.c | 2 +- .../r600/sfn/sfn_nir_vectorize_vs_inputs.c | 2 +- .../r600/sfn/tests/sfn_valuefactory_test.cpp | 2 +- src/gallium/drivers/zink/zink_compiler.c | 2 +- .../zink/zink_lower_cubemap_to_array.c | 4 ++-- .../rogue/nir/rogue_nir_lower_io.c | 2 +- .../compiler/brw_nir_opt_peephole_ffma.c | 2 +- .../vulkan/anv_nir_lower_ycbcr_textures.c | 2 +- .../compiler/dxil_nir_lower_int_cubemaps.c | 2 +- .../compiler/dxil_nir_lower_int_samplers.c | 6 ++--- src/panfrost/midgard/midgard_errata_lod.c | 2 +- 36 files changed, 88 insertions(+), 80 deletions(-) diff --git a/src/amd/common/ac_nir_lower_resinfo.c b/src/amd/common/ac_nir_lower_resinfo.c index be5d6a89843..7c91ee07548 100644 --- a/src/amd/common/ac_nir_lower_resinfo.c +++ b/src/amd/common/ac_nir_lower_resinfo.c @@ -279,7 +279,7 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data) new_tex->texture_index = tex->texture_index; new_tex->sampler_index = tex->sampler_index; new_tex->dest_type = nir_type_int32; - nir_src_copy(&new_tex->src[0].src, &tex->src[i].src); + nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr); new_tex->src[0].src_type = tex->src[i].src_type; nir_ssa_dest_init(&new_tex->instr, &new_tex->dest, nir_tex_instr_dest_size(new_tex), 32, NULL); diff --git a/src/amd/vulkan/radv_nir_lower_ycbcr_textures.c b/src/amd/vulkan/radv_nir_lower_ycbcr_textures.c index 931e1e29a85..4cc322a0253 100644 --- a/src/amd/vulkan/radv_nir_lower_ycbcr_textures.c +++ b/src/amd/vulkan/radv_nir_lower_ycbcr_textures.c @@ -134,7 +134,7 @@ create_plane_tex_instr_implicit(struct ycbcr_state *state, uint32_t plane) } FALLTHROUGH; default: - nir_src_copy(&tex->src[i].src, &old_tex->src[i].src); + nir_src_copy(&tex->src[i].src, &old_tex->src[i].src, tex); break; } } diff --git a/src/compiler/glsl/glsl_to_nir.cpp b/src/compiler/glsl/glsl_to_nir.cpp index 725e37b8c61..f38df40d58b 100644 --- a/src/compiler/glsl/glsl_to_nir.cpp +++ b/src/compiler/glsl/glsl_to_nir.cpp @@ -1704,7 +1704,7 @@ nir_visitor::visit(ir_call *ir) nir_ssa_def *val = evaluate_rvalue(param_rvalue); nir_src src = nir_src_for_ssa(val); - nir_src_copy(&call->params[i], &src); + nir_src_copy(&call->params[i], &src, call); } else if (sig_param->data.mode == ir_var_function_inout) { unreachable("unimplemented: inout parameters"); } diff --git a/src/compiler/nir/nir.c b/src/compiler/nir/nir.c index 1e749911e34..03ac7b0c7f1 100644 --- a/src/compiler/nir/nir.c +++ b/src/compiler/nir/nir.c @@ -454,7 +454,7 @@ static void dest_free_indirects(nir_dest *dest) /* NOTE: if the instruction you are copying a src to is already added * to the IR, use nir_instr_rewrite_src() instead. */ -void nir_src_copy(nir_src *dest, const nir_src *src) +void nir_src_copy(nir_src *dest, const nir_src *src, void *mem_ctx) { src_free_indirects(dest); @@ -466,14 +466,14 @@ void nir_src_copy(nir_src *dest, const nir_src *src) dest->reg.reg = src->reg.reg; if (src->reg.indirect) { dest->reg.indirect = calloc(1, sizeof(nir_src)); - nir_src_copy(dest->reg.indirect, src->reg.indirect); + nir_src_copy(dest->reg.indirect, src->reg.indirect, mem_ctx); } else { dest->reg.indirect = NULL; } } } -void nir_dest_copy(nir_dest *dest, const nir_dest *src) +void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr) { /* Copying an SSA definition makes no sense whatsoever. */ assert(!src->is_ssa); @@ -486,16 +486,17 @@ void nir_dest_copy(nir_dest *dest, const nir_dest *src) dest->reg.reg = src->reg.reg; if (src->reg.indirect) { dest->reg.indirect = calloc(1, sizeof(nir_src)); - nir_src_copy(dest->reg.indirect, src->reg.indirect); + nir_src_copy(dest->reg.indirect, src->reg.indirect, instr); } else { dest->reg.indirect = NULL; } } void -nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src) +nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src, + nir_alu_instr *instr) { - nir_src_copy(&dest->src, &src->src); + nir_src_copy(&dest->src, &src->src, &instr->instr); dest->abs = src->abs; dest->negate = src->negate; for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) @@ -503,9 +504,10 @@ nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src) } void -nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src) +nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src, + nir_alu_instr *instr) { - nir_dest_copy(&dest->dest, &src->dest); + nir_dest_copy(&dest->dest, &src->dest, &instr->instr); dest->write_mask = src->write_mask; dest->saturate = src->saturate; } @@ -1680,7 +1682,7 @@ nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src) assert(!src_is_valid(src) || src->parent_instr == instr); src_remove_all_uses(src); - nir_src_copy(src, &new_src); + nir_src_copy(src, &new_src, instr); src_add_all_uses(src, instr, NULL); } @@ -1704,7 +1706,7 @@ nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src) assert(!src_is_valid(src) || src->parent_if == if_stmt); src_remove_all_uses(src); - nir_src_copy(src, &new_src); + nir_src_copy(src, &new_src, if_stmt); src_add_all_uses(src, NULL, if_stmt); } @@ -1723,7 +1725,7 @@ nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, nir_dest new_dest) /* We can't re-write with an SSA def */ assert(!new_dest.is_ssa); - nir_dest_copy(dest, &new_dest); + nir_dest_copy(dest, &new_dest, instr); dest->reg.parent_instr = instr; list_addtail(&dest->reg.def_link, &new_dest.reg.reg->defs); diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index 8fb1b30581b..4219123ce67 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -1137,8 +1137,8 @@ nir_is_sequential_comp_swizzle(uint8_t *swiz, unsigned nr_comp) return true; } -void nir_src_copy(nir_src *dest, const nir_src *src); -void nir_dest_copy(nir_dest *dest, const nir_dest *src); +void nir_src_copy(nir_src *dest, const nir_src *src, void *instr_or_if); +void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr); typedef struct { /** Base source */ @@ -1472,8 +1472,10 @@ typedef struct nir_alu_instr { nir_alu_src src[]; } nir_alu_instr; -void nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src); -void nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src); +void nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src, + nir_alu_instr *instr); +void nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src, + nir_alu_instr *instr); bool nir_alu_instr_is_copy(nir_alu_instr *instr); diff --git a/src/compiler/nir/nir_builtin_builder.c b/src/compiler/nir/nir_builtin_builder.c index b975157cfb1..26b52412223 100644 --- a/src/compiler/nir/nir_builtin_builder.c +++ b/src/compiler/nir/nir_builtin_builder.c @@ -366,7 +366,7 @@ nir_get_texture_size(nir_builder *b, nir_tex_instr *tex) tex->src[i].src_type == nir_tex_src_sampler_offset || tex->src[i].src_type == nir_tex_src_texture_handle || tex->src[i].src_type == nir_tex_src_sampler_handle) { - nir_src_copy(&txs->src[idx].src, &tex->src[i].src); + nir_src_copy(&txs->src[idx].src, &tex->src[i].src, txs); txs->src[idx].src_type = tex->src[i].src_type; idx++; } @@ -421,7 +421,7 @@ nir_get_texture_lod(nir_builder *b, nir_tex_instr *tex) tex->src[i].src_type == nir_tex_src_sampler_offset || tex->src[i].src_type == nir_tex_src_texture_handle || tex->src[i].src_type == nir_tex_src_sampler_handle) { - nir_src_copy(&tql->src[idx].src, &tex->src[i].src); + nir_src_copy(&tql->src[idx].src, &tex->src[i].src, tql); tql->src[idx].src_type = tex->src[i].src_type; idx++; } diff --git a/src/compiler/nir/nir_deref.c b/src/compiler/nir/nir_deref.c index 9e46d0ab5d5..7427835e327 100644 --- a/src/compiler/nir/nir_deref.c +++ b/src/compiler/nir/nir_deref.c @@ -757,7 +757,7 @@ rematerialize_deref_in_block(nir_deref_instr *deref, parent = rematerialize_deref_in_block(parent, state); new_deref->parent = nir_src_for_ssa(&parent->dest.ssa); } else { - nir_src_copy(&new_deref->parent, &deref->parent); + nir_src_copy(&new_deref->parent, &deref->parent, new_deref); } } @@ -774,7 +774,7 @@ rematerialize_deref_in_block(nir_deref_instr *deref, case nir_deref_type_array: case nir_deref_type_ptr_as_array: assert(!nir_src_as_deref(deref->arr.index)); - nir_src_copy(&new_deref->arr.index, &deref->arr.index); + nir_src_copy(&new_deref->arr.index, &deref->arr.index, new_deref); break; case nir_deref_type_struct: diff --git a/src/compiler/nir/nir_from_ssa.c b/src/compiler/nir/nir_from_ssa.c index 7456eab7686..3292c1ff4e6 100644 --- a/src/compiler/nir/nir_from_ssa.c +++ b/src/compiler/nir/nir_from_ssa.c @@ -644,7 +644,7 @@ emit_copy(nir_builder *b, nir_src src, nir_src dest_src) assert(src.reg.reg->num_components >= dest_src.reg.reg->num_components); nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_mov); - nir_src_copy(&mov->src[0].src, &src); + nir_src_copy(&mov->src[0].src, &src, mov); mov->dest.dest = nir_dest_for_reg(dest_src.reg.reg); mov->dest.write_mask = (1 << dest_src.reg.reg->num_components) - 1; diff --git a/src/compiler/nir/nir_lower_alu_width.c b/src/compiler/nir/nir_lower_alu_width.c index 039e2bd796b..c75fc75f574 100644 --- a/src/compiler/nir/nir_lower_alu_width.c +++ b/src/compiler/nir/nir_lower_alu_width.c @@ -106,11 +106,11 @@ lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op, for (int i = num_components - 1; i >= 0; i--) { nir_alu_instr *chan = nir_alu_instr_create(builder->shader, chan_op); nir_alu_ssa_dest_init(chan, 1, alu->dest.dest.ssa.bit_size); - nir_alu_src_copy(&chan->src[0], &alu->src[0]); + nir_alu_src_copy(&chan->src[0], &alu->src[0], chan); chan->src[0].swizzle[0] = chan->src[0].swizzle[i]; if (nir_op_infos[chan_op].num_inputs > 1) { assert(nir_op_infos[chan_op].num_inputs == 2); - nir_alu_src_copy(&chan->src[1], &alu->src[1]); + nir_alu_src_copy(&chan->src[1], &alu->src[1], chan); chan->src[1].swizzle[0] = chan->src[1].swizzle[i]; } chan->exact = alu->exact; @@ -159,7 +159,7 @@ lower_fdot(nir_alu_instr *alu, nir_builder *builder) builder->shader, prev ? nir_op_ffma : nir_op_fmul); nir_alu_ssa_dest_init(instr, 1, alu->dest.dest.ssa.bit_size); for (unsigned j = 0; j < 2; j++) { - nir_alu_src_copy(&instr->src[j], &alu->src[j]); + nir_alu_src_copy(&instr->src[j], &alu->src[j], instr); instr->src[j].swizzle[0] = alu->src[j].swizzle[i]; } if (i != num_components - 1) @@ -381,7 +381,7 @@ lower_alu_instr_width(nir_builder *b, nir_instr *instr, void *_data) nir_alu_instr *lower = nir_alu_instr_create(b->shader, alu->op); for (i = 0; i < num_src; i++) { - nir_alu_src_copy(&lower->src[i], &alu->src[i]); + nir_alu_src_copy(&lower->src[i], &alu->src[i], lower); /* We only handle same-size-as-dest (input_sizes[] == 0) or scalar * args (input_sizes[] == 1). diff --git a/src/compiler/nir/nir_lower_atomics_to_ssbo.c b/src/compiler/nir/nir_lower_atomics_to_ssbo.c index 7bf7f7b620e..4abf83fcf27 100644 --- a/src/compiler/nir/nir_lower_atomics_to_ssbo.c +++ b/src/compiler/nir/nir_lower_atomics_to_ssbo.c @@ -122,7 +122,7 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b, un /* remapped to ssbo_atomic_add: { buffer_idx, offset, +1 } */ temp = nir_imm_int(b, +1); new_instr->src[0] = nir_src_for_ssa(buffer); - nir_src_copy(&new_instr->src[1], &instr->src[0]); + nir_src_copy(&new_instr->src[1], &instr->src[0], new_instr); new_instr->src[2] = nir_src_for_ssa(temp); break; case nir_intrinsic_atomic_counter_pre_dec: @@ -131,22 +131,22 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b, un /* NOTE semantic difference so we adjust the return value below */ temp = nir_imm_int(b, -1); new_instr->src[0] = nir_src_for_ssa(buffer); - nir_src_copy(&new_instr->src[1], &instr->src[0]); + nir_src_copy(&new_instr->src[1], &instr->src[0], new_instr); new_instr->src[2] = nir_src_for_ssa(temp); break; case nir_intrinsic_atomic_counter_read: /* remapped to load_ssbo: { buffer_idx, offset } */ new_instr->src[0] = nir_src_for_ssa(buffer); - nir_src_copy(&new_instr->src[1], &instr->src[0]); + nir_src_copy(&new_instr->src[1], &instr->src[0], new_instr); break; default: /* remapped to ssbo_atomic_x: { buffer_idx, offset, data, (compare)? } */ new_instr->src[0] = nir_src_for_ssa(buffer); - nir_src_copy(&new_instr->src[1], &instr->src[0]); - nir_src_copy(&new_instr->src[2], &instr->src[1]); + nir_src_copy(&new_instr->src[1], &instr->src[0], new_instr); + nir_src_copy(&new_instr->src[2], &instr->src[1], new_instr); if (op == nir_intrinsic_ssbo_atomic_comp_swap || op == nir_intrinsic_ssbo_atomic_fcomp_swap) - nir_src_copy(&new_instr->src[3], &instr->src[2]); + nir_src_copy(&new_instr->src[3], &instr->src[2], new_instr); break; } diff --git a/src/compiler/nir/nir_lower_bit_size.c b/src/compiler/nir/nir_lower_bit_size.c index 4c76d8d56c3..eb4d9cdc048 100644 --- a/src/compiler/nir/nir_lower_bit_size.c +++ b/src/compiler/nir/nir_lower_bit_size.c @@ -38,7 +38,7 @@ static nir_ssa_def *convert_to_bit_size(nir_builder *bld, nir_ssa_def *src, if ((type & (nir_type_uint | nir_type_int)) && bit_size == 32 && alu && (alu->op == nir_op_b2i8 || alu->op == nir_op_b2i16)) { nir_alu_instr *instr = nir_alu_instr_create(bld->shader, nir_op_b2i32); - nir_alu_src_copy(&instr->src[0], &alu->src[0]); + nir_alu_src_copy(&instr->src[0], &alu->src[0], instr); return nir_builder_alu_instr_finish_and_insert(bld, instr); } diff --git a/src/compiler/nir/nir_lower_indirect_derefs.c b/src/compiler/nir/nir_lower_indirect_derefs.c index 93be7372528..e30ccad9ebc 100644 --- a/src/compiler/nir/nir_lower_indirect_derefs.c +++ b/src/compiler/nir/nir_lower_indirect_derefs.c @@ -98,7 +98,7 @@ emit_load_store_deref(nir_builder *b, nir_intrinsic_instr *orig_instr, /* Copy over any other sources. This is needed for interp_deref_at */ for (unsigned i = 1; i < nir_intrinsic_infos[orig_instr->intrinsic].num_srcs; i++) - nir_src_copy(&load->src[i], &orig_instr->src[i]); + nir_src_copy(&load->src[i], &orig_instr->src[i], load); nir_ssa_dest_init(&load->instr, &load->dest, orig_instr->dest.ssa.num_components, diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c index 69a8ddd5513..7ff8d437e0b 100644 --- a/src/compiler/nir/nir_lower_io.c +++ b/src/compiler/nir/nir_lower_io.c @@ -611,7 +611,7 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state, if (intrin->intrinsic == nir_intrinsic_interp_deref_at_sample || intrin->intrinsic == nir_intrinsic_interp_deref_at_offset || intrin->intrinsic == nir_intrinsic_interp_deref_at_vertex) - nir_src_copy(&bary_setup->src[0], &intrin->src[1]); + nir_src_copy(&bary_setup->src[0], &intrin->src[1], bary_setup); nir_builder_instr_insert(b, &bary_setup->instr); diff --git a/src/compiler/nir/nir_lower_io_arrays_to_elements.c b/src/compiler/nir/nir_lower_io_arrays_to_elements.c index 6383b1d0022..901da66962b 100644 --- a/src/compiler/nir/nir_lower_io_arrays_to_elements.c +++ b/src/compiler/nir/nir_lower_io_arrays_to_elements.c @@ -181,7 +181,8 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var, if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset || intr->intrinsic == nir_intrinsic_interp_deref_at_sample || intr->intrinsic == nir_intrinsic_interp_deref_at_vertex) { - nir_src_copy(&element_intr->src[1], &intr->src[1]); + nir_src_copy(&element_intr->src[1], &intr->src[1], + &element_intr->instr); } nir_ssa_def_rewrite_uses(&intr->dest.ssa, @@ -189,7 +190,8 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var, } else { nir_intrinsic_set_write_mask(element_intr, nir_intrinsic_write_mask(intr)); - nir_src_copy(&element_intr->src[1], &intr->src[1]); + nir_src_copy(&element_intr->src[1], &intr->src[1], + &element_intr->instr); } nir_builder_instr_insert(b, &element_intr->instr); diff --git a/src/compiler/nir/nir_lower_io_to_scalar.c b/src/compiler/nir/nir_lower_io_to_scalar.c index 12c6a7a46f3..ff2c9f07fea 100644 --- a/src/compiler/nir/nir_lower_io_to_scalar.c +++ b/src/compiler/nir/nir_lower_io_to_scalar.c @@ -62,7 +62,7 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr) set_io_semantics(chan_intr, intr, i); /* offset and vertex (if needed) */ for (unsigned j = 0; j < nir_intrinsic_infos[intr->intrinsic].num_srcs; ++j) - nir_src_copy(&chan_intr->src[j], &intr->src[j]); + nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr); nir_builder_instr_insert(b, &chan_intr->instr); @@ -105,7 +105,7 @@ lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr) if (nir_intrinsic_has_base(intr)) nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr)); for (unsigned j = 0; j < nir_intrinsic_infos[intr->intrinsic].num_srcs - 1; j++) - nir_src_copy(&chan_intr->src[j], &intr->src[j]); + nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr); /* increment offset per component */ nir_ssa_def *offset = nir_iadd_imm(b, base_offset, i * (intr->dest.ssa.bit_size / 8)); @@ -171,7 +171,7 @@ lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr) chan_intr->src[0] = nir_src_for_ssa(nir_channel(b, value, i)); /* offset and vertex (if needed) */ for (unsigned j = 1; j < nir_intrinsic_infos[intr->intrinsic].num_srcs; ++j) - nir_src_copy(&chan_intr->src[j], &intr->src[j]); + nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr); nir_builder_instr_insert(b, &chan_intr->instr); } @@ -207,7 +207,7 @@ lower_store_to_scalar(nir_builder *b, nir_intrinsic_instr *intr) /* value */ chan_intr->src[0] = nir_src_for_ssa(nir_channel(b, value, i)); for (unsigned j = 1; j < nir_intrinsic_infos[intr->intrinsic].num_srcs - 1; j++) - nir_src_copy(&chan_intr->src[j], &intr->src[j]); + nir_src_copy(&chan_intr->src[j], &intr->src[j], &chan_intr->instr); /* increment offset per component */ nir_ssa_def *offset = nir_iadd_imm(b, base_offset, i * (value->bit_size / 8)); @@ -362,7 +362,7 @@ lower_load_to_scalar_early(nir_builder *b, nir_intrinsic_instr *intr, if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset || intr->intrinsic == nir_intrinsic_interp_deref_at_sample || intr->intrinsic == nir_intrinsic_interp_deref_at_vertex) - nir_src_copy(&chan_intr->src[1], &intr->src[1]); + nir_src_copy(&chan_intr->src[1], &intr->src[1], &chan_intr->instr); nir_builder_instr_insert(b, &chan_intr->instr); diff --git a/src/compiler/nir/nir_lower_locals_to_regs.c b/src/compiler/nir/nir_lower_locals_to_regs.c index 40e10cadb0a..d4d4c8899bf 100644 --- a/src/compiler/nir/nir_lower_locals_to_regs.c +++ b/src/compiler/nir/nir_lower_locals_to_regs.c @@ -218,7 +218,7 @@ lower_locals_to_regs_block(nir_block *block, nir_ssa_def_rewrite_uses(&intrin->dest.ssa, &mov->dest.dest.ssa); } else { - nir_dest_copy(&mov->dest.dest, &intrin->dest); + nir_dest_copy(&mov->dest.dest, &intrin->dest, &mov->instr); } nir_builder_instr_insert(b, &mov->instr); @@ -246,7 +246,7 @@ lower_locals_to_regs_block(nir_block *block, nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_mov); - nir_src_copy(&mov->src[0].src, &intrin->src[1]); + nir_src_copy(&mov->src[0].src, &intrin->src[1], mov); /* The normal NIR SSA copy propagate pass can't happen after this pass, * so do an ad-hoc copy propagate since this ALU op can do swizzles diff --git a/src/compiler/nir/nir_lower_phis_to_scalar.c b/src/compiler/nir/nir_lower_phis_to_scalar.c index 9abaf24bae5..9ba119a1938 100644 --- a/src/compiler/nir/nir_lower_phis_to_scalar.c +++ b/src/compiler/nir/nir_lower_phis_to_scalar.c @@ -239,7 +239,7 @@ lower_phis_to_scalar_block(nir_block *block, nir_op_mov); nir_ssa_dest_init(&mov->instr, &mov->dest.dest, 1, bit_size, NULL); mov->dest.write_mask = 1; - nir_src_copy(&mov->src[0].src, &src->src); + nir_src_copy(&mov->src[0].src, &src->src, &mov->instr); mov->src[0].swizzle[0] = i; /* Insert at the end of the predecessor but before the jump */ diff --git a/src/compiler/nir/nir_lower_ssbo.c b/src/compiler/nir/nir_lower_ssbo.c index 408b03b3500..19a040a68d8 100644 --- a/src/compiler/nir/nir_lower_ssbo.c +++ b/src/compiler/nir/nir_lower_ssbo.c @@ -90,7 +90,7 @@ nir_load_ssbo_prop(nir_builder *b, nir_intrinsic_op op, { nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op); load->num_components = 1; - nir_src_copy(&load->src[0], idx); + nir_src_copy(&load->src[0], idx, load); nir_ssa_dest_init(&load->instr, &load->dest, 1, bitsize, NULL); nir_builder_instr_insert(b, &load->instr); return &load->dest.ssa; @@ -134,7 +134,7 @@ lower_ssbo_instr(nir_builder *b, nir_intrinsic_instr *intr) } if (is_store) { - nir_src_copy(&global->src[0], &intr->src[0]); + nir_src_copy(&global->src[0], &intr->src[0], global); nir_intrinsic_set_write_mask(global, nir_intrinsic_write_mask(intr)); } else { nir_ssa_dest_init(&global->instr, &global->dest, @@ -142,9 +142,9 @@ lower_ssbo_instr(nir_builder *b, nir_intrinsic_instr *intr) intr->dest.ssa.bit_size, NULL); if (is_atomic) { - nir_src_copy(&global->src[1], &intr->src[2]); + nir_src_copy(&global->src[1], &intr->src[2], global); if (nir_intrinsic_infos[op].num_srcs > 2) - nir_src_copy(&global->src[2], &intr->src[3]); + nir_src_copy(&global->src[2], &intr->src[3], global); } } diff --git a/src/compiler/nir/nir_lower_subgroups.c b/src/compiler/nir/nir_lower_subgroups.c index f29545ae4d8..a2e2e119862 100644 --- a/src/compiler/nir/nir_lower_subgroups.c +++ b/src/compiler/nir/nir_lower_subgroups.c @@ -45,7 +45,7 @@ lower_subgroups_64bit_split_intrinsic(nir_builder *b, nir_intrinsic_instr *intri intr->const_index[1] = intrin->const_index[1]; intr->src[0] = nir_src_for_ssa(comp); if (nir_intrinsic_infos[intrin->intrinsic].num_srcs == 2) - nir_src_copy(&intr->src[1], &intrin->src[1]); + nir_src_copy(&intr->src[1], &intrin->src[1], intr); intr->num_components = 1; nir_builder_instr_insert(b, &intr->instr); @@ -126,7 +126,7 @@ lower_subgroup_op_to_scalar(nir_builder *b, nir_intrinsic_instr *intrin, /* invocation */ if (nir_intrinsic_infos[intrin->intrinsic].num_srcs > 1) { assert(nir_intrinsic_infos[intrin->intrinsic].num_srcs == 2); - nir_src_copy(&chan_intrin->src[1], &intrin->src[1]); + nir_src_copy(&chan_intrin->src[1], &intrin->src[1], chan_intrin); } chan_intrin->const_index[0] = intrin->const_index[0]; @@ -209,7 +209,7 @@ lower_shuffle_to_swizzle(nir_builder *b, nir_intrinsic_instr *intrin, nir_intrinsic_instr *swizzle = nir_intrinsic_instr_create( b->shader, nir_intrinsic_masked_swizzle_amd); swizzle->num_components = intrin->num_components; - nir_src_copy(&swizzle->src[0], &intrin->src[0]); + nir_src_copy(&swizzle->src[0], &intrin->src[0], swizzle); nir_intrinsic_set_swizzle_mask(swizzle, (mask << 10) | 0x1f); nir_ssa_dest_init(&swizzle->instr, &swizzle->dest, intrin->dest.ssa.num_components, @@ -288,7 +288,7 @@ lower_to_shuffle(nir_builder *b, nir_intrinsic_instr *intrin, nir_intrinsic_instr *shuffle = nir_intrinsic_instr_create(b->shader, nir_intrinsic_shuffle); shuffle->num_components = intrin->num_components; - nir_src_copy(&shuffle->src[0], &intrin->src[0]); + nir_src_copy(&shuffle->src[0], &intrin->src[0], shuffle); shuffle->src[1] = nir_src_for_ssa(index); nir_ssa_dest_init(&shuffle->instr, &shuffle->dest, intrin->dest.ssa.num_components, @@ -568,7 +568,7 @@ lower_dynamic_quad_broadcast(nir_builder *b, nir_intrinsic_instr *intrin, qbcst->num_components = intrin->num_components; qbcst->src[1] = nir_src_for_ssa(nir_imm_int(b, i)); - nir_src_copy(&qbcst->src[0], &intrin->src[0]); + nir_src_copy(&qbcst->src[0], &intrin->src[0], qbcst); nir_ssa_dest_init(&qbcst->instr, &qbcst->dest, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, NULL); diff --git a/src/compiler/nir/nir_lower_tex.c b/src/compiler/nir/nir_lower_tex.c index 634384fdb41..14f83c9fa74 100644 --- a/src/compiler/nir/nir_lower_tex.c +++ b/src/compiler/nir/nir_lower_tex.c @@ -315,7 +315,7 @@ sample_plane(nir_builder *b, nir_tex_instr *tex, int plane, nir_tex_instr *plane_tex = nir_tex_instr_create(b->shader, tex->num_srcs + 1); for (unsigned i = 0; i < tex->num_srcs; i++) { - nir_src_copy(&plane_tex->src[i].src, &tex->src[i].src); + nir_src_copy(&plane_tex->src[i].src, &tex->src[i].src, plane_tex); plane_tex->src[i].src_type = tex->src[i].src_type; } plane_tex->src[tex->num_srcs].src = nir_src_for_ssa(nir_imm_int(b, plane)); @@ -812,7 +812,7 @@ lower_tex_to_txd(nir_builder *b, nir_tex_instr *tex) /* reuse existing srcs */ for (unsigned i = 0; i < tex->num_srcs; i++) { - nir_src_copy(&txd->src[i].src, &tex->src[i].src); + nir_src_copy(&txd->src[i].src, &tex->src[i].src, txd); txd->src[i].src_type = tex->src[i].src_type; } int coord = nir_tex_instr_src_index(tex, nir_tex_src_coord); @@ -852,7 +852,7 @@ lower_txb_to_txl(nir_builder *b, nir_tex_instr *tex) /* reuse all but bias src */ for (int i = 0; i < 2; i++) { if (tex->src[i].src_type != nir_tex_src_bias) { - nir_src_copy(&txl->src[i].src, &tex->src[i].src); + nir_src_copy(&txl->src[i].src, &tex->src[i].src, txl); txl->src[i].src_type = tex->src[i].src_type; } } @@ -1164,7 +1164,7 @@ lower_tg4_offsets(nir_builder *b, nir_tex_instr *tex) tex_copy->dest_type = tex->dest_type; for (unsigned j = 0; j < tex->num_srcs; ++j) { - nir_src_copy(&tex_copy->src[j].src, &tex->src[j].src); + nir_src_copy(&tex_copy->src[j].src, &tex->src[j].src, tex_copy); tex_copy->src[j].src_type = tex->src[j].src_type; } diff --git a/src/compiler/nir/nir_lower_vec_to_movs.c b/src/compiler/nir/nir_lower_vec_to_movs.c index dd382768871..afebc136452 100644 --- a/src/compiler/nir/nir_lower_vec_to_movs.c +++ b/src/compiler/nir/nir_lower_vec_to_movs.c @@ -68,8 +68,8 @@ insert_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader) return 1 << start_idx; nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov); - nir_alu_src_copy(&mov->src[0], &vec->src[start_idx]); - nir_alu_dest_copy(&mov->dest, &vec->dest); + nir_alu_src_copy(&mov->src[0], &vec->src[start_idx], mov); + nir_alu_dest_copy(&mov->dest, &vec->dest, mov); mov->dest.write_mask = (1u << start_idx); mov->src[0].swizzle[start_idx] = vec->src[start_idx].swizzle[0]; diff --git a/src/compiler/nir/nir_opt_peephole_select.c b/src/compiler/nir/nir_opt_peephole_select.c index fc0a32b2bc6..cc5fb5463ef 100644 --- a/src/compiler/nir/nir_opt_peephole_select.c +++ b/src/compiler/nir/nir_opt_peephole_select.c @@ -457,7 +457,7 @@ nir_opt_peephole_select_block(nir_block *block, nir_shader *shader, nir_phi_instr *phi = nir_instr_as_phi(instr); nir_alu_instr *sel = nir_alu_instr_create(shader, nir_op_bcsel); - nir_src_copy(&sel->src[0].src, &if_stmt->condition); + nir_src_copy(&sel->src[0].src, &if_stmt->condition, sel); /* Splat the condition to all channels */ memset(sel->src[0].swizzle, 0, sizeof sel->src[0].swizzle); @@ -467,7 +467,7 @@ nir_opt_peephole_select_block(nir_block *block, nir_shader *shader, assert(src->src.is_ssa); unsigned idx = src->pred == then_block ? 1 : 2; - nir_src_copy(&sel->src[idx].src, &src->src); + nir_src_copy(&sel->src[idx].src, &src->src, sel); } nir_ssa_dest_init(&sel->instr, &sel->dest.dest, diff --git a/src/compiler/nir/nir_opt_undef.c b/src/compiler/nir/nir_opt_undef.c index 49b6733ca3f..0d63de7b8cb 100644 --- a/src/compiler/nir/nir_opt_undef.c +++ b/src/compiler/nir/nir_opt_undef.c @@ -56,7 +56,8 @@ opt_undef_csel(nir_alu_instr *instr) */ nir_instr_rewrite_src(&instr->instr, &instr->src[0].src, instr->src[i == 1 ? 2 : 1].src); - nir_alu_src_copy(&instr->src[0], &instr->src[i == 1 ? 2 : 1]); + nir_alu_src_copy(&instr->src[0], &instr->src[i == 1 ? 2 : 1], + instr); nir_src empty_src; memset(&empty_src, 0, sizeof(empty_src)); diff --git a/src/compiler/nir/nir_search.c b/src/compiler/nir/nir_search.c index 6ff44f91891..596db69f2b8 100644 --- a/src/compiler/nir/nir_search.c +++ b/src/compiler/nir/nir_search.c @@ -526,7 +526,8 @@ construct_value(nir_builder *build, assert(state->variables_seen & (1 << var->variable)); nir_alu_src val = { NIR_SRC_INIT }; - nir_alu_src_copy(&val, &state->variables[var->variable]); + nir_alu_src_copy(&val, &state->variables[var->variable], + (void *)build->shader); assert(!var->is_constant); for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) diff --git a/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c b/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c index acd341da983..74cec72c814 100644 --- a/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c +++ b/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c @@ -691,7 +691,7 @@ insert_vec_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader) unsigned write_mask = (1u << start_idx); nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov); - nir_alu_src_copy(&mov->src[0], &vec->src[start_idx]); + nir_alu_src_copy(&mov->src[0], &vec->src[start_idx], mov); mov->src[0].swizzle[0] = vec->src[start_idx].swizzle[0]; mov->src[0].negate = vec->src[start_idx].negate; diff --git a/src/gallium/drivers/lima/ir/lima_nir_split_load_input.c b/src/gallium/drivers/lima/ir/lima_nir_split_load_input.c index 4cd37bdd243..7506c0f90be 100644 --- a/src/gallium/drivers/lima/ir/lima_nir_split_load_input.c +++ b/src/gallium/drivers/lima/ir/lima_nir_split_load_input.c @@ -85,7 +85,7 @@ lima_nir_split_load_input_instr(nir_builder *b, nir_intrinsic_set_dest_type(new_intrin, nir_intrinsic_dest_type(intrin)); /* offset */ - nir_src_copy(&new_intrin->src[0], &intrin->src[0]); + nir_src_copy(&new_intrin->src[0], &intrin->src[0], new_intrin); nir_builder_instr_insert(b, &new_intrin->instr); nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, diff --git a/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c b/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c index 583eb12d89e..35fd5cfffa1 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c +++ b/src/gallium/drivers/r600/sfn/sfn_nir_vectorize_vs_inputs.c @@ -159,7 +159,7 @@ r600_create_new_load(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *va if (intr->intrinsic == nir_intrinsic_interp_deref_at_offset || intr->intrinsic == nir_intrinsic_interp_deref_at_sample) - nir_src_copy(&new_intr->src[1], &intr->src[1]); + nir_src_copy(&new_intr->src[1], &intr->src[1], &new_intr->instr); nir_builder_instr_insert(b, &new_intr->instr); diff --git a/src/gallium/drivers/r600/sfn/tests/sfn_valuefactory_test.cpp b/src/gallium/drivers/r600/sfn/tests/sfn_valuefactory_test.cpp index 2032fe7ee6b..f0527f77642 100644 --- a/src/gallium/drivers/r600/sfn/tests/sfn_valuefactory_test.cpp +++ b/src/gallium/drivers/r600/sfn/tests/sfn_valuefactory_test.cpp @@ -150,7 +150,7 @@ TEST_F(ValuefactoryTest, test_create_register_array_indirect_access) mov->dest.dest.reg.base_offset = 0; mov->dest.dest.reg.indirect = (nir_src *)calloc(1, sizeof(nir_src)); nir_src addr = nir_src_for_ssa(c2); - nir_src_copy(mov->dest.dest.reg.indirect, &addr); + nir_src_copy(mov->dest.dest.reg.indirect, &addr, mov); nir_builder_instr_insert(&b, &mov->instr); auto addr_reg = factory->src(addr, 0); diff --git a/src/gallium/drivers/zink/zink_compiler.c b/src/gallium/drivers/zink/zink_compiler.c index d35ed49ea72..5417bfd9502 100644 --- a/src/gallium/drivers/zink/zink_compiler.c +++ b/src/gallium/drivers/zink/zink_compiler.c @@ -1273,7 +1273,7 @@ rewrite_atomic_ssbo_instr(nir_builder *b, nir_instr *instr, struct bo_vars *bo) new_instr->src[0] = nir_src_for_ssa(&deref_arr->dest.ssa); /* deref ops have no offset src, so copy the srcs after it */ for (unsigned i = 2; i < nir_intrinsic_infos[intr->intrinsic].num_srcs; i++) - nir_src_copy(&new_instr->src[i - 1], &intr->src[i]); + nir_src_copy(&new_instr->src[i - 1], &intr->src[i], new_instr); nir_builder_instr_insert(b, &new_instr->instr); result[i] = &new_instr->dest.ssa; diff --git a/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c b/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c index 2141385ccf3..217fc4b0774 100644 --- a/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c +++ b/src/gallium/drivers/zink/zink_lower_cubemap_to_array.c @@ -171,7 +171,7 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_ssa_def * nir_ssa_def *c = nir_channels(b, psrc->ssa, BITFIELD_MASK(nir_tex_instr_src_size(array_tex, s))); array_tex->src[s].src = nir_src_for_ssa(c); } else - nir_src_copy(&array_tex->src[s].src, psrc); + nir_src_copy(&array_tex->src[s].src, psrc, array_tex); s++; } @@ -432,7 +432,7 @@ lower_tex_to_txl(nir_builder *b, nir_tex_instr *tex) for (int i = 0; i < tex->num_srcs; i++) { if (i == bias_idx) continue; - nir_src_copy(&txl->src[s].src, &tex->src[i].src); + nir_src_copy(&txl->src[s].src, &tex->src[i].src, txl); txl->src[s].src_type = tex->src[i].src_type; s++; } diff --git a/src/imagination/rogue/nir/rogue_nir_lower_io.c b/src/imagination/rogue/nir/rogue_nir_lower_io.c index 63cbae1867a..dba7e5de3bf 100644 --- a/src/imagination/rogue/nir/rogue_nir_lower_io.c +++ b/src/imagination/rogue/nir/rogue_nir_lower_io.c @@ -87,7 +87,7 @@ static void lower_load_ubo_to_scalar(nir_builder *b, nir_intrinsic_instr *intr) nir_intrinsic_set_range(chan_intr, scaled_range); /* Base (desc_set, binding). */ - nir_src_copy(&chan_intr->src[0], &intr->src[0]); + nir_src_copy(&chan_intr->src[0], &intr->src[0], &chan_intr->instr); /* Offset (unused). */ chan_intr->src[1] = nir_src_for_ssa(nir_imm_int(b, 0)); diff --git a/src/intel/compiler/brw_nir_opt_peephole_ffma.c b/src/intel/compiler/brw_nir_opt_peephole_ffma.c index 2d28daeddc7..ce8d6376055 100644 --- a/src/intel/compiler/brw_nir_opt_peephole_ffma.c +++ b/src/intel/compiler/brw_nir_opt_peephole_ffma.c @@ -243,7 +243,7 @@ brw_nir_opt_peephole_ffma_instr(nir_builder *b, for (unsigned j = 0; j < add->dest.dest.ssa.num_components; j++) ffma->src[i].swizzle[j] = mul->src[i].swizzle[swizzle[j]]; } - nir_alu_src_copy(&ffma->src[2], &add->src[1 - add_mul_src]); + nir_alu_src_copy(&ffma->src[2], &add->src[1 - add_mul_src], ffma); assert(add->dest.dest.is_ssa); diff --git a/src/intel/vulkan/anv_nir_lower_ycbcr_textures.c b/src/intel/vulkan/anv_nir_lower_ycbcr_textures.c index fdc77d87988..58f81c20803 100644 --- a/src/intel/vulkan/anv_nir_lower_ycbcr_textures.c +++ b/src/intel/vulkan/anv_nir_lower_ycbcr_textures.c @@ -135,7 +135,7 @@ create_plane_tex_instr_implicit(struct ycbcr_state *state, } FALLTHROUGH; default: - nir_src_copy(&tex->src[i].src, &old_tex->src[i].src); + nir_src_copy(&tex->src[i].src, &old_tex->src[i].src, tex); break; } } diff --git a/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c b/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c index 41b28ab48b3..936875b2f3f 100644 --- a/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c +++ b/src/microsoft/compiler/dxil_nir_lower_int_cubemaps.c @@ -210,7 +210,7 @@ create_array_tex_from_cube_tex(nir_builder *b, nir_tex_instr *tex, nir_ssa_def * nir_src *psrc = (tex->src[i].src_type == nir_tex_src_coord) ? &coord_src : &tex->src[i].src; - nir_src_copy(&array_tex->src[i].src, psrc); + nir_src_copy(&array_tex->src[i].src, psrc, array_tex); array_tex->src[i].src_type = tex->src[i].src_type; } diff --git a/src/microsoft/compiler/dxil_nir_lower_int_samplers.c b/src/microsoft/compiler/dxil_nir_lower_int_samplers.c index 4811fa37052..4546a79a1cf 100644 --- a/src/microsoft/compiler/dxil_nir_lower_int_samplers.c +++ b/src/microsoft/compiler/dxil_nir_lower_int_samplers.c @@ -81,7 +81,7 @@ dx_get_texture_lod(nir_builder *b, nir_tex_instr *tex) nir_ssa_def *ssa_src = nir_channels(b, tex->src[coord_index].src.ssa, (1 << coord_components) - 1); nir_src src = nir_src_for_ssa(ssa_src); - nir_src_copy(&tql->src[0].src, &src); + nir_src_copy(&tql->src[0].src, &src, tql); tql->src[0].src_type = nir_tex_src_coord; unsigned idx = 1; @@ -92,7 +92,7 @@ dx_get_texture_lod(nir_builder *b, nir_tex_instr *tex) tex->src[i].src_type == nir_tex_src_sampler_offset || tex->src[i].src_type == nir_tex_src_texture_handle || tex->src[i].src_type == nir_tex_src_sampler_handle) { - nir_src_copy(&tql->src[idx].src, &tex->src[i].src); + nir_src_copy(&tql->src[idx].src, &tex->src[i].src, tql); tql->src[idx].src_type = tex->src[i].src_type; idx++; } @@ -278,7 +278,7 @@ create_txf_from_tex(nir_builder *b, nir_tex_instr *tex) if (tex->src[i].src_type == nir_tex_src_texture_deref || tex->src[i].src_type == nir_tex_src_texture_offset || tex->src[i].src_type == nir_tex_src_texture_handle) { - nir_src_copy(&txf->src[idx].src, &tex->src[i].src); + nir_src_copy(&txf->src[idx].src, &tex->src[i].src, txf); txf->src[idx].src_type = tex->src[i].src_type; idx++; } diff --git a/src/panfrost/midgard/midgard_errata_lod.c b/src/panfrost/midgard/midgard_errata_lod.c index be5800ed2d3..7ceea1c23c6 100644 --- a/src/panfrost/midgard/midgard_errata_lod.c +++ b/src/panfrost/midgard/midgard_errata_lod.c @@ -53,7 +53,7 @@ nir_lod_errata_instr(nir_builder *b, nir_instr *instr, void *data) /* TODO: Indirect samplers, separate sampler objects XXX */ nir_src idx = nir_src_for_ssa(nir_imm_int(b, tex->texture_index)); - nir_src_copy(&l->src[0], &idx); + nir_src_copy(&l->src[0], &idx, l); nir_builder_instr_insert(b, &l->instr); nir_ssa_def *params = &l->dest.ssa;