nir: Drop nir_dest

Instead, we replace every use of it with nir_def.  Most of this commit
was generated by sed:

   sed -i -e 's/dest.ssa/def/g' src/**/*.h src/**/*.c src/**/*.cpp

A few manual fixups were required in lima and the nir_legacy code.

Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24674>
This commit is contained in:
Faith Ekstrand 2023-08-14 11:56:00 -05:00 committed by Marge Bot
parent 6c1d32581a
commit 4695bebc79
346 changed files with 2929 additions and 2949 deletions

View file

@ -134,7 +134,7 @@ lower_intrinsic_to_arg(nir_builder *b, nir_instr *instr, void *state)
}
assert(replacement);
nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(&intrin->instr);
return true;
}

View file

@ -272,12 +272,12 @@ lower_gs_per_vertex_input_load(nir_builder *b,
nir_def *off = gs_per_vertex_input_offset(b, st, intrin);
if (st->gfx_level >= GFX9)
return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off);
return nir_load_shared(b, intrin->def.num_components, intrin->def.bit_size, off);
unsigned wave_size = 64u; /* GFX6-8 only support wave64 */
nir_def *ring = nir_load_ring_esgs_amd(b);
return emit_split_buffer_load(b, ring, off, nir_imm_zero(b, 1, 32), 4u * wave_size,
intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size);
intrin->def.num_components, intrin->def.bit_size);
}
static bool

View file

@ -98,8 +98,8 @@ process_instr(nir_builder *b, nir_instr *instr, void *_)
new_intrin->num_components = intrin->num_components;
if (op != nir_intrinsic_store_global_amd)
nir_def_init(&new_intrin->instr, &new_intrin->dest.ssa,
intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size);
nir_def_init(&new_intrin->instr, &new_intrin->def,
intrin->def.num_components, intrin->def.bit_size);
unsigned num_src = nir_intrinsic_infos[intrin->intrinsic].num_srcs;
for (unsigned i = 0; i < num_src; i++)
@ -121,7 +121,7 @@ process_instr(nir_builder *b, nir_instr *instr, void *_)
nir_builder_instr_insert(b, &new_intrin->instr);
if (op != nir_intrinsic_store_global_amd)
nir_def_rewrite_uses(&intrin->dest.ssa, &new_intrin->dest.ssa);
nir_def_rewrite_uses(&intrin->def, &new_intrin->def);
nir_instr_remove(&intrin->instr);
return true;

View file

@ -299,7 +299,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
nir_def *desc = NULL, *result = NULL;
ASSERTED const char *intr_name;
nir_def *dst = &intr->dest.ssa;
nir_def *dst = &intr->def;
b->cursor = nir_before_instr(instr);
switch (intr->intrinsic) {
@ -357,7 +357,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
case nir_intrinsic_image_load:
case nir_intrinsic_image_deref_load:
case nir_intrinsic_bindless_image_load:
result = emulated_image_load(b, intr->dest.ssa.num_components, intr->dest.ssa.bit_size,
result = emulated_image_load(b, intr->def.num_components, intr->def.bit_size,
desc, intr->src[1].ssa, access, dim, is_array, true);
nir_def_rewrite_uses_after(dst, result, instr);
nir_instr_remove(instr);
@ -378,7 +378,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
nir_tex_instr *new_tex;
nir_def *coord = NULL, *desc = NULL, *sampler_desc = NULL, *result = NULL;
nir_def *dst = &tex->dest.ssa;
nir_def *dst = &tex->def;
b->cursor = nir_before_instr(instr);
switch (tex->op) {
@ -400,10 +400,10 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
new_tex->dest_type = nir_type_int32;
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src_type = tex->src[i].src_type;
nir_def_init(&new_tex->instr, &new_tex->dest.ssa,
nir_def_init(&new_tex->instr, &new_tex->def,
nir_tex_instr_dest_size(new_tex), 32);
nir_builder_instr_insert(b, &new_tex->instr);
desc = &new_tex->dest.ssa;
desc = &new_tex->def;
break;
case nir_tex_src_sampler_deref:
@ -419,10 +419,10 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
new_tex->dest_type = nir_type_int32;
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src_type = tex->src[i].src_type;
nir_def_init(&new_tex->instr, &new_tex->dest.ssa,
nir_def_init(&new_tex->instr, &new_tex->def,
nir_tex_instr_dest_size(new_tex), 32);
nir_builder_instr_insert(b, &new_tex->instr);
sampler_desc = &new_tex->dest.ssa;
sampler_desc = &new_tex->def;
break;
case nir_tex_src_coord:
@ -443,7 +443,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
switch (tex->op) {
case nir_texop_txf:
result = emulated_image_load(b, tex->dest.ssa.num_components, tex->dest.ssa.bit_size,
result = emulated_image_load(b, tex->def.num_components, tex->def.bit_size,
desc, coord,
ACCESS_RESTRICT | ACCESS_NON_WRITEABLE | ACCESS_CAN_REORDER,
tex->sampler_dim, tex->is_array, true);
@ -453,7 +453,7 @@ static bool lower_image_opcodes(nir_builder *b, nir_instr *instr, void *data)
case nir_texop_tex:
case nir_texop_txl:
result = emulated_tex_level_zero(b, tex->dest.ssa.num_components, tex->dest.ssa.bit_size,
result = emulated_tex_level_zero(b, tex->def.num_components, tex->def.bit_size,
desc, sampler_desc, coord, tex->sampler_dim, tex->is_array);
nir_def_rewrite_uses_after(dst, result, instr);
nir_instr_remove(instr);

View file

@ -1164,15 +1164,15 @@ find_reusable_ssa_def(nir_instr *instr)
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (!nir_intrinsic_can_reorder(intrin) ||
!nir_intrinsic_infos[intrin->intrinsic].has_dest ||
intrin->dest.ssa.divergent)
intrin->def.divergent)
return NULL;
return &intrin->dest.ssa;
return &intrin->def;
}
case nir_instr_type_phi: {
nir_phi_instr *phi = nir_instr_as_phi(instr);
if (phi->dest.ssa.divergent)
if (phi->def.divergent)
return NULL;
return &phi->dest.ssa;
return &phi->def;
}
default:
return NULL;
@ -3913,8 +3913,8 @@ ms_load_arrayed_output_intrin(nir_builder *b,
unsigned location = nir_intrinsic_io_semantics(intrin).location;
unsigned component_offset = nir_intrinsic_component(intrin);
unsigned bit_size = intrin->dest.ssa.bit_size;
unsigned num_components = intrin->dest.ssa.num_components;
unsigned bit_size = intrin->def.bit_size;
unsigned num_components = intrin->def.num_components;
unsigned load_bit_size = MAX2(bit_size, 32);
nir_def *load =

View file

@ -187,7 +187,7 @@ lower_ps_load_barycentric(nir_builder *b, nir_intrinsic_instr *intrin, lower_ps_
b->cursor = nir_before_instr(&intrin->instr);
nir_def *replacement = nir_load_var(b, var);
nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(&intrin->instr);
return true;
@ -255,7 +255,7 @@ lower_ps_load_sample_mask_in(nir_builder *b, nir_intrinsic_instr *intrin, lower_
nir_def *sample_mask = nir_load_sample_mask_in(b);
nir_def *replacement = nir_iand(b, sample_mask, submask);
nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(&intrin->instr);
return true;

View file

@ -212,7 +212,7 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data)
bool is_array;
nir_def *desc = NULL;
dst = &intr->dest.ssa;
dst = &intr->def;
b->cursor = nir_before_instr(instr);
switch (intr->intrinsic) {
@ -268,7 +268,7 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data)
nir_def *desc = NULL;
nir_src *lod = NULL;
dst = &tex->dest.ssa;
dst = &tex->def;
b->cursor = nir_before_instr(instr);
switch (tex->op) {
@ -288,10 +288,10 @@ static bool lower_resinfo(nir_builder *b, nir_instr *instr, void *data)
new_tex->dest_type = nir_type_int32;
nir_src_copy(&new_tex->src[0].src, &tex->src[i].src, &new_tex->instr);
new_tex->src[0].src_type = tex->src[i].src_type;
nir_def_init(&new_tex->instr, &new_tex->dest.ssa,
nir_def_init(&new_tex->instr, &new_tex->def,
nir_tex_instr_dest_size(new_tex), 32);
nir_builder_instr_insert(b, &new_tex->instr);
desc = &new_tex->dest.ssa;
desc = &new_tex->def;
break;
case nir_tex_src_lod:

View file

@ -55,7 +55,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
return false;
}
unsigned bit_size = intr->dest.ssa.bit_size;
unsigned bit_size = intr->def.bit_size;
if (bit_size >= 32)
return false;
@ -70,15 +70,15 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
nir_src *src_offset = nir_get_io_offset_src(intr);
nir_def *offset = src_offset->ssa;
nir_def *result = &intr->dest.ssa;
nir_def *result = &intr->def;
/* Change the load to 32 bits per channel, update the channel count,
* and increase the declared load alignment.
*/
intr->dest.ssa.bit_size = 32;
intr->def.bit_size = 32;
if (align_mul == 4 && align_offset == 0) {
intr->num_components = intr->dest.ssa.num_components =
intr->num_components = intr->def.num_components =
DIV_ROUND_UP(num_components, comp_per_dword);
/* Aligned loads. Just bitcast the vector and trim it if there are
@ -87,7 +87,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
b->cursor = nir_after_instr(instr);
result = nir_extract_bits(b, &result, 1, 0, num_components, bit_size);
nir_def_rewrite_uses_after(&intr->dest.ssa, result,
nir_def_rewrite_uses_after(&intr->def, result,
result->parent_instr);
return true;
}
@ -95,7 +95,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
/* Multi-component unaligned loads may straddle the dword boundary.
* E.g. for 2 components, we need to load an extra dword, and so on.
*/
intr->num_components = intr->dest.ssa.num_components =
intr->num_components = intr->def.num_components =
DIV_ROUND_UP(4 - align_mul + align_offset + num_components * component_size, 4);
nir_intrinsic_set_align(intr,
@ -121,7 +121,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
result = nir_extract_bits(b, &result, 1, comp_offset * bit_size,
num_components, bit_size);
nir_def_rewrite_uses_after(&intr->dest.ssa, result,
nir_def_rewrite_uses_after(&intr->def, result,
result->parent_instr);
return true;
}
@ -203,7 +203,7 @@ lower_subdword_loads(nir_builder *b, nir_instr *instr, void *data)
result = nir_vec(b, elems, intr->num_components);
result = nir_extract_bits(b, &result, 1, 0, num_components, bit_size);
nir_def_rewrite_uses_after(&intr->dest.ssa, result,
nir_def_rewrite_uses_after(&intr->def, result,
result->parent_instr);
return true;
}

View file

@ -214,8 +214,8 @@ lower_taskmesh_payload_load(nir_builder *b,
lower_tsms_io_state *s)
{
unsigned base = nir_intrinsic_base(intrin);
unsigned num_components = intrin->dest.ssa.num_components;
unsigned bit_size = intrin->dest.ssa.bit_size;
unsigned num_components = intrin->def.num_components;
unsigned bit_size = intrin->def.bit_size;
nir_def *ptr =
b->shader->info.stage == MESA_SHADER_TASK ?

View file

@ -402,7 +402,7 @@ lower_hs_per_vertex_input_load(nir_builder *b,
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
nir_def *off = hs_per_vertex_input_lds_offset(b, st, intrin);
return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off);
return nir_load_shared(b, intrin->def.num_components, intrin->def.bit_size, off);
}
static nir_def *
@ -480,7 +480,7 @@ lower_hs_output_load(nir_builder *b,
lower_tess_io_state *st)
{
nir_def *off = hs_output_lds_offset(b, st, intrin);
return nir_load_shared(b, intrin->dest.ssa.num_components, intrin->dest.ssa.bit_size, off);
return nir_load_shared(b, intrin->def.num_components, intrin->def.bit_size, off);
}
static void
@ -704,8 +704,8 @@ lower_tes_input_load(nir_builder *b,
nir_def *zero = nir_imm_int(b, 0);
return nir_load_buffer_amd(b, intrin->dest.ssa.num_components,
intrin->dest.ssa.bit_size, offchip_ring,
return nir_load_buffer_amd(b, intrin->def.num_components,
intrin->def.bit_size, offchip_ring,
off, offchip_offset, zero,
.access = ACCESS_COHERENT);
}

File diff suppressed because it is too large Load diff

View file

@ -94,7 +94,7 @@ only_used_by_cross_lane_instrs(nir_def* ssa, bool follow_phis = true)
return false;
nir_phi_instr* phi = nir_instr_as_phi(src->parent_instr);
if (!only_used_by_cross_lane_instrs(&phi->dest.ssa, false))
if (!only_used_by_cross_lane_instrs(&phi->def, false))
return false;
continue;
@ -461,8 +461,8 @@ init_context(isel_context* ctx, nir_shader* shader)
if (!nir_intrinsic_infos[intrinsic->intrinsic].has_dest)
break;
if (intrinsic->intrinsic == nir_intrinsic_strict_wqm_coord_amd) {
regclasses[intrinsic->dest.ssa.index] =
RegClass::get(RegType::vgpr, intrinsic->dest.ssa.num_components * 4 +
regclasses[intrinsic->def.index] =
RegClass::get(RegType::vgpr, intrinsic->def.num_components * 4 +
nir_intrinsic_base(intrinsic))
.as_linear();
break;
@ -542,7 +542,7 @@ init_context(isel_context* ctx, nir_shader* shader)
* it is beneficial to use a VGPR destination. This is because this allows
* to put the s_waitcnt further down, which decreases latency.
*/
if (only_used_by_cross_lane_instrs(&intrinsic->dest.ssa)) {
if (only_used_by_cross_lane_instrs(&intrinsic->def)) {
type = RegType::vgpr;
break;
}
@ -560,7 +560,7 @@ init_context(isel_context* ctx, nir_shader* shader)
case nir_intrinsic_load_ubo:
case nir_intrinsic_load_ssbo:
case nir_intrinsic_load_global_amd:
type = intrinsic->dest.ssa.divergent ? RegType::vgpr : RegType::sgpr;
type = intrinsic->def.divergent ? RegType::vgpr : RegType::sgpr;
break;
case nir_intrinsic_load_view_index:
type = ctx->stage == fragment_fs ? RegType::vgpr : RegType::sgpr;
@ -573,22 +573,21 @@ init_context(isel_context* ctx, nir_shader* shader)
}
break;
}
RegClass rc = get_reg_class(ctx, type, intrinsic->dest.ssa.num_components,
intrinsic->dest.ssa.bit_size);
regclasses[intrinsic->dest.ssa.index] = rc;
RegClass rc =
get_reg_class(ctx, type, intrinsic->def.num_components, intrinsic->def.bit_size);
regclasses[intrinsic->def.index] = rc;
break;
}
case nir_instr_type_tex: {
nir_tex_instr* tex = nir_instr_as_tex(instr);
RegType type = tex->dest.ssa.divergent ? RegType::vgpr : RegType::sgpr;
RegType type = tex->def.divergent ? RegType::vgpr : RegType::sgpr;
if (tex->op == nir_texop_texture_samples) {
assert(!tex->dest.ssa.divergent);
assert(!tex->def.divergent);
}
RegClass rc =
get_reg_class(ctx, type, tex->dest.ssa.num_components, tex->dest.ssa.bit_size);
regclasses[tex->dest.ssa.index] = rc;
RegClass rc = get_reg_class(ctx, type, tex->def.num_components, tex->def.bit_size);
regclasses[tex->def.index] = rc;
break;
}
case nir_instr_type_ssa_undef: {
@ -601,11 +600,11 @@ init_context(isel_context* ctx, nir_shader* shader)
case nir_instr_type_phi: {
nir_phi_instr* phi = nir_instr_as_phi(instr);
RegType type = RegType::sgpr;
unsigned num_components = phi->dest.ssa.num_components;
assert((phi->dest.ssa.bit_size != 1 || num_components == 1) &&
unsigned num_components = phi->def.num_components;
assert((phi->def.bit_size != 1 || num_components == 1) &&
"Multiple components not supported on boolean phis.");
if (phi->dest.ssa.divergent) {
if (phi->def.divergent) {
type = RegType::vgpr;
} else {
nir_foreach_phi_src (src, phi) {
@ -614,10 +613,10 @@ init_context(isel_context* ctx, nir_shader* shader)
}
}
RegClass rc = get_reg_class(ctx, type, num_components, phi->dest.ssa.bit_size);
if (rc != regclasses[phi->dest.ssa.index])
RegClass rc = get_reg_class(ctx, type, num_components, phi->def.bit_size);
if (rc != regclasses[phi->def.index])
done = false;
regclasses[phi->dest.ssa.index] = rc;
regclasses[phi->def.index] = rc;
break;
}
default: break;

View file

@ -1471,7 +1471,7 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_te
assert((!args->tfe || !args->d16) && "unsupported");
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
unsigned mask = nir_def_components_read(&instr->dest.ssa);
unsigned mask = nir_def_components_read(&instr->def);
/* Buffers don't support A16. */
if (args->a16)
@ -1479,7 +1479,7 @@ static LLVMValueRef build_tex_intrinsic(struct ac_nir_context *ctx, const nir_te
return ac_build_buffer_load_format(&ctx->ac, args->resource, args->coords[0], ctx->ac.i32_0,
util_last_bit(mask), 0, true,
instr->dest.ssa.bit_size == 16,
instr->def.bit_size == 16,
args->tfe);
}
@ -1566,11 +1566,11 @@ static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_int
/* Load constant values from user SGPRS when possible, otherwise
* fallback to the default path that loads directly from memory.
*/
if (LLVMIsConstant(src0) && instr->dest.ssa.bit_size >= 32) {
unsigned count = instr->dest.ssa.num_components;
if (LLVMIsConstant(src0) && instr->def.bit_size >= 32) {
unsigned count = instr->def.num_components;
unsigned offset = index;
if (instr->dest.ssa.bit_size == 64)
if (instr->def.bit_size == 64)
count *= 2;
offset += LLVMConstIntGetZExtValue(src0);
@ -1585,8 +1585,8 @@ static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_int
for (unsigned i = 0; i < count; i++)
push_constants[i] = ac_get_arg(&ctx->ac, ctx->args->inline_push_consts[arg_index++]);
LLVMValueRef res = ac_build_gather_values(&ctx->ac, push_constants, count);
return instr->dest.ssa.bit_size == 64
? LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->dest.ssa), "")
return instr->def.bit_size == 64
? LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->def), "")
: res;
}
}
@ -1594,8 +1594,8 @@ static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_int
struct ac_llvm_pointer pc = ac_get_ptr_arg(&ctx->ac, ctx->args, ctx->args->push_constants);
ptr = LLVMBuildGEP2(ctx->ac.builder, pc.t, pc.v, &addr, 1, "");
if (instr->dest.ssa.bit_size == 8) {
unsigned load_dwords = instr->dest.ssa.num_components > 1 ? 2 : 1;
if (instr->def.bit_size == 8) {
unsigned load_dwords = instr->def.num_components > 1 ? 2 : 1;
LLVMTypeRef vec_type = LLVMVectorType(ctx->ac.i8, 4 * load_dwords);
ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type);
LLVMValueRef res = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
@ -1617,13 +1617,13 @@ static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_int
res = LLVMBuildTrunc(
ctx->ac.builder, res,
LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.num_components * 8), "");
if (instr->dest.ssa.num_components > 1)
LLVMIntTypeInContext(ctx->ac.context, instr->def.num_components * 8), "");
if (instr->def.num_components > 1)
res = LLVMBuildBitCast(ctx->ac.builder, res,
LLVMVectorType(ctx->ac.i8, instr->dest.ssa.num_components), "");
LLVMVectorType(ctx->ac.i8, instr->def.num_components), "");
return res;
} else if (instr->dest.ssa.bit_size == 16) {
unsigned load_dwords = instr->dest.ssa.num_components / 2 + 1;
} else if (instr->def.bit_size == 16) {
unsigned load_dwords = instr->def.num_components / 2 + 1;
LLVMTypeRef vec_type = LLVMVectorType(ctx->ac.i16, 2 * load_dwords);
ptr = ac_cast_ptr(&ctx->ac, ptr, vec_type);
LLVMValueRef res = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
@ -1634,17 +1634,17 @@ static LLVMValueRef visit_load_push_constant(struct ac_nir_context *ctx, nir_int
ctx->ac.i32_0, ctx->ac.i32_1,
LLVMConstInt(ctx->ac.i32, 2, false), LLVMConstInt(ctx->ac.i32, 3, false),
LLVMConstInt(ctx->ac.i32, 4, false)};
LLVMValueRef swizzle_aligned = LLVMConstVector(&mask[0], instr->dest.ssa.num_components);
LLVMValueRef swizzle_unaligned = LLVMConstVector(&mask[1], instr->dest.ssa.num_components);
LLVMValueRef swizzle_aligned = LLVMConstVector(&mask[0], instr->def.num_components);
LLVMValueRef swizzle_unaligned = LLVMConstVector(&mask[1], instr->def.num_components);
LLVMValueRef shuffle_aligned =
LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_aligned, "");
LLVMValueRef shuffle_unaligned =
LLVMBuildShuffleVector(ctx->ac.builder, res, res, swizzle_unaligned, "");
res = LLVMBuildSelect(ctx->ac.builder, cond, shuffle_unaligned, shuffle_aligned, "");
return LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->dest.ssa), "");
return LLVMBuildBitCast(ctx->ac.builder, res, get_def_type(ctx, &instr->def), "");
}
LLVMTypeRef ptr_type = get_def_type(ctx, &instr->dest.ssa);
LLVMTypeRef ptr_type = get_def_type(ctx, &instr->def);
ptr = ac_cast_ptr(&ctx->ac, ptr, ptr_type);
return LLVMBuildLoad2(ctx->ac.builder, ptr_type, ptr, "");
@ -1941,7 +1941,7 @@ static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx, nir_intrinsic_
struct waterfall_context wctx;
LLVMValueRef rsrc_base = enter_waterfall_ssbo(ctx, &wctx, instr, instr->src[0]);
int elem_size_bytes = instr->dest.ssa.bit_size / 8;
int elem_size_bytes = instr->def.bit_size / 8;
int num_components = instr->num_components;
enum gl_access_qualifier access = ac_get_mem_access_flags(instr);
@ -1950,7 +1950,7 @@ static LLVMValueRef visit_load_buffer(struct ac_nir_context *ctx, nir_intrinsic_
ctx->abi->load_ssbo(ctx->abi, rsrc_base, false, false) : rsrc_base;
LLVMValueRef vindex = ctx->ac.i32_0;
LLVMTypeRef def_type = get_def_type(ctx, &instr->dest.ssa);
LLVMTypeRef def_type = get_def_type(ctx, &instr->def);
LLVMTypeRef def_elem_type = num_components > 1 ? LLVMGetElementType(def_type) : def_type;
LLVMValueRef results[4];
@ -2035,7 +2035,7 @@ static LLVMValueRef get_global_address(struct ac_nir_context *ctx,
static LLVMValueRef visit_load_global(struct ac_nir_context *ctx,
nir_intrinsic_instr *instr)
{
LLVMTypeRef result_type = get_def_type(ctx, &instr->dest.ssa);
LLVMTypeRef result_type = get_def_type(ctx, &instr->def);
LLVMValueRef val;
LLVMValueRef addr = get_global_address(ctx, instr, result_type);
@ -2119,18 +2119,18 @@ static LLVMValueRef visit_load_ubo_buffer(struct ac_nir_context *ctx, nir_intrin
LLVMValueRef offset = get_src(ctx, instr->src[1]);
int num_components = instr->num_components;
assert(instr->dest.ssa.bit_size >= 32 && instr->dest.ssa.bit_size % 32 == 0);
assert(instr->def.bit_size >= 32 && instr->def.bit_size % 32 == 0);
if (ctx->abi->load_ubo)
rsrc = ctx->abi->load_ubo(ctx->abi, rsrc);
/* Convert to a 32-bit load. */
if (instr->dest.ssa.bit_size == 64)
if (instr->def.bit_size == 64)
num_components *= 2;
ret = ac_build_buffer_load(&ctx->ac, rsrc, num_components, NULL, offset, NULL,
ctx->ac.f32, 0, true, true);
ret = LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), "");
ret = LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
return exit_waterfall(ctx, &wctx, ret);
}
@ -2326,8 +2326,8 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri
args.tfe = instr->intrinsic == nir_intrinsic_bindless_image_sparse_load;
if (dim == GLSL_SAMPLER_DIM_BUF) {
unsigned num_channels = util_last_bit(nir_def_components_read(&instr->dest.ssa));
if (instr->dest.ssa.bit_size == 64)
unsigned num_channels = util_last_bit(nir_def_components_read(&instr->def));
if (instr->def.bit_size == 64)
num_channels = num_channels < 4 ? 2 : 4;
LLVMValueRef rsrc, vindex;
@ -2338,11 +2338,11 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri
bool can_speculate = access & ACCESS_CAN_REORDER;
res = ac_build_buffer_load_format(&ctx->ac, rsrc, vindex, ctx->ac.i32_0, num_channels,
args.access, can_speculate,
instr->dest.ssa.bit_size == 16,
instr->def.bit_size == 16,
args.tfe);
res = ac_build_expand(&ctx->ac, res, num_channels, args.tfe ? 5 : 4);
res = ac_trim_vector(&ctx->ac, res, instr->dest.ssa.num_components);
res = ac_trim_vector(&ctx->ac, res, instr->def.num_components);
res = ac_to_integer(&ctx->ac, res);
} else if (instr->intrinsic == nir_intrinsic_bindless_image_fragment_mask_load_amd) {
assert(ctx->ac.gfx_level < GFX11);
@ -2368,12 +2368,12 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri
args.dmask = 15;
args.attributes = access & ACCESS_CAN_REORDER ? AC_ATTR_INVARIANT_LOAD : 0;
args.d16 = instr->dest.ssa.bit_size == 16;
args.d16 = instr->def.bit_size == 16;
res = ac_build_image_opcode(&ctx->ac, &args);
}
if (instr->dest.ssa.bit_size == 64) {
if (instr->def.bit_size == 64) {
LLVMValueRef code = NULL;
if (args.tfe) {
code = ac_llvm_extract_elem(&ctx->ac, res, 4);
@ -2525,7 +2525,7 @@ static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx, const nir_int
params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[1]),
ctx->ac.i32_0, ""); /* vindex */
params[param_count++] = ctx->ac.i32_0; /* voffset */
if (cmpswap && instr->dest.ssa.bit_size == 64) {
if (cmpswap && instr->def.bit_size == 64) {
result = emit_ssbo_comp_swap_64(ctx, params[2], params[3], params[1], params[0], true);
} else {
LLVMTypeRef data_type = LLVMTypeOf(params[0]);
@ -2637,7 +2637,7 @@ static LLVMValueRef visit_load_shared(struct ac_nir_context *ctx, const nir_intr
LLVMValueRef values[16], derived_ptr, index, ret;
unsigned const_off = nir_intrinsic_base(instr);
LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
LLVMTypeRef elem_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], const_off);
for (int chan = 0; chan < instr->num_components; chan++) {
@ -2648,7 +2648,7 @@ static LLVMValueRef visit_load_shared(struct ac_nir_context *ctx, const nir_intr
ret = ac_build_gather_values(&ctx->ac, values, instr->num_components);
return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), "");
return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
}
static void visit_store_shared(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
@ -2676,7 +2676,7 @@ static void visit_store_shared(struct ac_nir_context *ctx, const nir_intrinsic_i
static LLVMValueRef visit_load_shared2_amd(struct ac_nir_context *ctx,
const nir_intrinsic_instr *instr)
{
LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
LLVMTypeRef pointee_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
LLVMValueRef ptr = get_memory_ptr(ctx, instr->src[0], 0);
LLVMValueRef values[2];
@ -2689,7 +2689,7 @@ static LLVMValueRef visit_load_shared2_amd(struct ac_nir_context *ctx,
}
LLVMValueRef ret = ac_build_gather_values(&ctx->ac, values, 2);
return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), "");
return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->def), "");
}
static void visit_store_shared2_amd(struct ac_nir_context *ctx, const nir_intrinsic_instr *instr)
@ -2905,17 +2905,17 @@ static LLVMValueRef visit_load(struct ac_nir_context *ctx, nir_intrinsic_instr *
bool is_output)
{
LLVMValueRef values[8];
LLVMTypeRef dest_type = get_def_type(ctx, &instr->dest.ssa);
LLVMTypeRef dest_type = get_def_type(ctx, &instr->def);
LLVMTypeRef component_type;
unsigned base = nir_intrinsic_base(instr);
unsigned component = nir_intrinsic_component(instr);
unsigned count = instr->dest.ssa.num_components;
unsigned count = instr->def.num_components;
nir_src *vertex_index_src = nir_get_io_arrayed_index_src(instr);
LLVMValueRef vertex_index = vertex_index_src ? get_src(ctx, *vertex_index_src) : NULL;
nir_src offset = *nir_get_io_offset_src(instr);
LLVMValueRef indir_index = NULL;
switch (instr->dest.ssa.bit_size) {
switch (instr->def.bit_size) {
case 16:
case 32:
break;
@ -2945,7 +2945,7 @@ static LLVMValueRef visit_load(struct ac_nir_context *ctx, nir_intrinsic_instr *
vertex_index, indir_index,
base, component,
count, !is_output);
if (instr->dest.ssa.bit_size == 16) {
if (instr->def.bit_size == 16) {
result = ac_to_integer(&ctx->ac, result);
result = LLVMBuildTrunc(ctx->ac.builder, result, dest_type, "");
}
@ -2981,12 +2981,12 @@ static LLVMValueRef visit_load(struct ac_nir_context *ctx, nir_intrinsic_instr *
values[chan] = ac_build_fs_interp_mov(&ctx->ac, vertex_id, llvm_chan, attr_number,
ac_get_arg(&ctx->ac, ctx->args->prim_mask));
values[chan] = LLVMBuildBitCast(ctx->ac.builder, values[chan], ctx->ac.i32, "");
if (instr->dest.ssa.bit_size == 16 &&
if (instr->def.bit_size == 16 &&
nir_intrinsic_io_semantics(instr).high_16bits)
values[chan] = LLVMBuildLShr(ctx->ac.builder, values[chan], LLVMConstInt(ctx->ac.i32, 16, 0), "");
values[chan] =
LLVMBuildTruncOrBitCast(ctx->ac.builder, values[chan],
instr->dest.ssa.bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32, "");
instr->def.bit_size == 16 ? ctx->ac.i16 : ctx->ac.i32, "");
}
LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, count);
@ -3035,8 +3035,8 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
switch (instr->intrinsic) {
case nir_intrinsic_ballot:
result = ac_build_ballot(&ctx->ac, get_src(ctx, instr->src[0]));
if (instr->dest.ssa.bit_size > ctx->ac.wave_size) {
LLVMTypeRef dest_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
if (instr->def.bit_size > ctx->ac.wave_size) {
LLVMTypeRef dest_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
result = LLVMBuildZExt(ctx->ac.builder, result, dest_type, "");
}
break;
@ -3057,7 +3057,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
values[i] = ctx->args->workgroup_ids[i].used
? ac_get_arg(&ctx->ac, ctx->args->workgroup_ids[i])
: ctx->ac.i32_0;
if (instr->dest.ssa.bit_size == 64)
if (instr->def.bit_size == 64)
values[i] = LLVMBuildZExt(ctx->ac.builder, values[i], ctx->ac.i64, "");
}
@ -3156,7 +3156,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
result = ac_build_load_invariant(&ctx->ac,
ac_get_ptr_arg(&ctx->ac, ctx->args, ctx->args->num_work_groups), ctx->ac.i32_0);
}
if (instr->dest.ssa.bit_size == 64)
if (instr->def.bit_size == 64)
result = LLVMBuildZExt(ctx->ac.builder, result, LLVMVectorType(ctx->ac.i64, 3), "");
break;
case nir_intrinsic_load_local_invocation_index:
@ -3305,7 +3305,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
unsigned index = nir_intrinsic_base(instr);
unsigned component = nir_intrinsic_component(instr);
result = load_interpolated_input(ctx, interp_param, index, component,
instr->dest.ssa.num_components, instr->dest.ssa.bit_size,
instr->def.num_components, instr->def.bit_size,
nir_intrinsic_io_semantics(instr).high_16bits);
break;
}
@ -3418,10 +3418,10 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
case nir_intrinsic_load_scratch: {
LLVMValueRef offset = get_src(ctx, instr->src[0]);
LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->scratch, offset);
LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
LLVMTypeRef vec_type = instr->dest.ssa.num_components == 1
LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
LLVMTypeRef vec_type = instr->def.num_components == 1
? comp_type
: LLVMVectorType(comp_type, instr->dest.ssa.num_components);
: LLVMVectorType(comp_type, instr->def.num_components);
result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
break;
}
@ -3457,10 +3457,10 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
offset = LLVMBuildSelect(ctx->ac.builder, cond, offset, size, "");
LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->constant_data, offset);
LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
LLVMTypeRef vec_type = instr->dest.ssa.num_components == 1
LLVMTypeRef comp_type = LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
LLVMTypeRef vec_type = instr->def.num_components == 1
? comp_type
: LLVMVectorType(comp_type, instr->dest.ssa.num_components);
: LLVMVectorType(comp_type, instr->def.num_components);
result = LLVMBuildLoad2(ctx->ac.builder, vec_type, ptr, "");
break;
}
@ -3479,7 +3479,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
LLVMValueRef addr_voffset = get_src(ctx, instr->src[src_base + 1]);
LLVMValueRef addr_soffset = get_src(ctx, instr->src[src_base + 2]);
LLVMValueRef vidx = idxen ? get_src(ctx, instr->src[src_base + 3]) : NULL;
unsigned num_components = instr->dest.ssa.num_components;
unsigned num_components = instr->def.num_components;
unsigned const_offset = nir_intrinsic_base(instr);
bool reorder = nir_intrinsic_can_reorder(instr);
enum gl_access_qualifier access = ac_get_mem_access_flags(instr);
@ -3489,10 +3489,10 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
LLVMConstInt(ctx->ac.i32, const_offset, 0), "");
if (instr->intrinsic == nir_intrinsic_load_buffer_amd && uses_format) {
assert(instr->dest.ssa.bit_size == 16 || instr->dest.ssa.bit_size == 32);
assert(instr->def.bit_size == 16 || instr->def.bit_size == 32);
result = ac_build_buffer_load_format(&ctx->ac, descriptor, vidx, voffset, num_components,
access, reorder,
instr->dest.ssa.bit_size == 16, false);
instr->def.bit_size == 16, false);
result = ac_to_integer(&ctx->ac, result);
} else if (instr->intrinsic == nir_intrinsic_store_buffer_amd && uses_format) {
assert(instr->src[0].ssa->bit_size == 16 || instr->src[0].ssa->bit_size == 32);
@ -3503,9 +3503,9 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
* Workaround by using i32 and casting to the correct type later.
*/
const unsigned fetch_num_components =
num_components * MAX2(32, instr->dest.ssa.bit_size) / 32;
num_components * MAX2(32, instr->def.bit_size) / 32;
LLVMTypeRef channel_type =
LLVMIntTypeInContext(ctx->ac.context, MIN2(32, instr->dest.ssa.bit_size));
LLVMIntTypeInContext(ctx->ac.context, MIN2(32, instr->def.bit_size));
if (instr->intrinsic == nir_intrinsic_load_buffer_amd) {
result = ac_build_buffer_load(&ctx->ac, descriptor, fetch_num_components, vidx, voffset,
@ -3527,9 +3527,9 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
result = ac_trim_vector(&ctx->ac, result, fetch_num_components);
/* Cast to larger than 32-bit sized components if needed. */
if (instr->dest.ssa.bit_size > 32) {
if (instr->def.bit_size > 32) {
LLVMTypeRef cast_channel_type =
LLVMIntTypeInContext(ctx->ac.context, instr->dest.ssa.bit_size);
LLVMIntTypeInContext(ctx->ac.context, instr->def.bit_size);
LLVMTypeRef cast_type =
num_components == 1 ? cast_channel_type :
LLVMVectorType(cast_channel_type, num_components);
@ -3600,7 +3600,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
arg.used = true;
result = ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, arg));
if (ac_get_elem_bits(&ctx->ac, LLVMTypeOf(result)) != 32)
result = LLVMBuildBitCast(ctx->ac.builder, result, get_def_type(ctx, &instr->dest.ssa), "");
result = LLVMBuildBitCast(ctx->ac.builder, result, get_def_type(ctx, &instr->def), "");
break;
}
case nir_intrinsic_load_smem_amd: {
@ -3610,7 +3610,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
bool is_addr_32bit = nir_src_bit_size(instr->src[0]) == 32;
int addr_space = is_addr_32bit ? AC_ADDR_SPACE_CONST_32BIT : AC_ADDR_SPACE_CONST;
LLVMTypeRef result_type = get_def_type(ctx, &instr->dest.ssa);
LLVMTypeRef result_type = get_def_type(ctx, &instr->def);
LLVMTypeRef byte_ptr_type = LLVMPointerType(ctx->ac.i8, addr_space);
LLVMValueRef addr = LLVMBuildIntToPtr(ctx->ac.builder, base, byte_ptr_type, "");
@ -3785,7 +3785,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
return false;
}
if (result) {
ctx->ssa_defs[instr->dest.ssa.index] = result;
ctx->ssa_defs[instr->def.index] = result;
}
return true;
}
@ -4067,7 +4067,7 @@ static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr)
args.sampler = LLVMBuildInsertElement(ctx->ac.builder, args.sampler, dword0, ctx->ac.i32_0, "");
}
args.d16 = instr->dest.ssa.bit_size == 16;
args.d16 = instr->def.bit_size == 16;
args.tfe = instr->is_sparse;
result = build_tex_intrinsic(ctx, instr, &args);
@ -4090,7 +4090,7 @@ static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr)
LLVMBuildExtractElement(ctx->ac.builder, result, ctx->ac.i32_0, ""),
LLVMConstInt(ctx->ac.i32, 0x76543210, false), "");
} else if (nir_tex_instr_result_size(instr) != 4)
result = ac_trim_vector(&ctx->ac, result, instr->dest.ssa.num_components);
result = ac_trim_vector(&ctx->ac, result, instr->def.num_components);
if (instr->is_sparse)
result = ac_build_concat(&ctx->ac, result, code);
@ -4102,16 +4102,16 @@ static void visit_tex(struct ac_nir_context *ctx, nir_tex_instr *instr)
result = exit_waterfall(ctx, wctx + i, result);
}
ctx->ssa_defs[instr->dest.ssa.index] = result;
ctx->ssa_defs[instr->def.index] = result;
}
}
static void visit_phi(struct ac_nir_context *ctx, nir_phi_instr *instr)
{
LLVMTypeRef type = get_def_type(ctx, &instr->dest.ssa);
LLVMTypeRef type = get_def_type(ctx, &instr->def);
LLVMValueRef result = LLVMBuildPhi(ctx->ac.builder, type, "");
ctx->ssa_defs[instr->dest.ssa.index] = result;
ctx->ssa_defs[instr->def.index] = result;
_mesa_hash_table_insert(ctx->phis, instr, result);
}

View file

@ -63,7 +63,7 @@ build_nir_itob_compute_shader(struct radv_device *dev, bool is_3d)
nir_def *coord = nir_replicate(&b, tmp, 4);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32), outval,
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32), outval,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
return b.shader;
@ -213,7 +213,7 @@ build_nir_btoi_compute_shader(struct radv_device *dev, bool is_3d)
nir_def *img_coord = nir_vec4(&b, nir_channel(&b, coord, 0), nir_channel(&b, coord, 1),
is_3d ? nir_channel(&b, coord, 2) : nir_undef(&b, 1, 32), nir_undef(&b, 1, 32));
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), outval,
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_undef(&b, 1, 32), outval,
nir_imm_int(&b, 0), .image_dim = dim);
return b.shader;
@ -367,7 +367,7 @@ build_nir_btoi_r32g32b32_compute_shader(struct radv_device *dev)
nir_def *coord = nir_replicate(&b, local_pos, 4);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32),
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32),
nir_channel(&b, outval, chan), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
}
@ -494,8 +494,8 @@ build_nir_itoi_compute_shader(struct radv_device *dev, bool is_3d, int samples)
is_3d ? nir_channel(&b, dst_coord, 2) : nir_undef(&b, 1, 32), nir_undef(&b, 1, 32));
for (uint32_t i = 0; i < samples; i++) {
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_imm_int(&b, i),
tex_vals[i], nir_imm_int(&b, 0), .image_dim = dim);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_imm_int(&b, i), tex_vals[i],
nir_imm_int(&b, 0), .image_dim = dim);
}
return b.shader;
@ -665,7 +665,7 @@ build_nir_itoi_r32g32b32_compute_shader(struct radv_device *dev)
nir_def *dst_coord = nir_replicate(&b, dst_local_pos, 4);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, dst_coord, nir_undef(&b, 1, 32),
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, dst_coord, nir_undef(&b, 1, 32),
nir_channel(&b, outval, 0), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
}
@ -779,8 +779,8 @@ build_nir_cleari_compute_shader(struct radv_device *dev, bool is_3d, int samples
global_id = nir_vec(&b, comps, 4);
for (uint32_t i = 0; i < samples; i++) {
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, global_id, nir_imm_int(&b, i),
clear_val, nir_imm_int(&b, 0), .image_dim = dim);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, global_id, nir_imm_int(&b, i), clear_val,
nir_imm_int(&b, 0), .image_dim = dim);
}
return b.shader;
@ -929,7 +929,7 @@ build_nir_cleari_r32g32b32_compute_shader(struct radv_device *dev)
nir_def *coord = nir_replicate(&b, local_pos, 4);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32),
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32),
nir_channel(&b, clear_val, chan), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_BUF);
}

View file

@ -929,7 +929,7 @@ build_clear_dcc_comp_to_single_shader(struct radv_device *dev, bool is_msaa)
/* Store the clear color values. */
nir_def *sample_id = is_msaa ? nir_imm_int(&b, 0) : nir_undef(&b, 1, 32);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, sample_id, data, nir_imm_int(&b, 0),
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, sample_id, data, nir_imm_int(&b, 0),
.image_dim = dim, .image_array = true);
return b.shader;

View file

@ -51,8 +51,8 @@ build_dcc_retile_compute_shader(struct radv_device *dev, struct radeon_surf *sur
output_dcc->data.descriptor_set = 0;
output_dcc->data.binding = 1;
nir_def *input_dcc_ref = &nir_build_deref_var(&b, input_dcc)->dest.ssa;
nir_def *output_dcc_ref = &nir_build_deref_var(&b, output_dcc)->dest.ssa;
nir_def *input_dcc_ref = &nir_build_deref_var(&b, input_dcc)->def;
nir_def *output_dcc_ref = &nir_build_deref_var(&b, output_dcc)->def;
nir_def *coord = get_global_ids(&b, 2);
nir_def *zero = nir_imm_int(&b, 0);

View file

@ -58,7 +58,7 @@ build_expand_depth_stencil_compute_shader(struct radv_device *dev)
nir_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id);
nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, global_id,
nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->def, global_id,
nir_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
/* We need a SCOPE_DEVICE memory_scope because ACO will avoid
@ -68,7 +68,7 @@ build_expand_depth_stencil_compute_shader(struct radv_device *dev)
nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
.memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, global_id, nir_undef(&b, 1, 32), data,
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, global_id, nir_undef(&b, 1, 32), data,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
return b.shader;
}

View file

@ -426,13 +426,13 @@ build_shader(struct radv_device *dev)
nir_push_if(&b, is_3d);
{
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_3d)->dest.ssa, img_coord, nir_undef(&b, 1, 32),
outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_3D);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_3d)->def, img_coord, nir_undef(&b, 1, 32), outval,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_3D);
}
nir_push_else(&b, NULL);
{
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_2d)->dest.ssa, img_coord, nir_undef(&b, 1, 32),
outval, nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img_2d)->def, img_coord, nir_undef(&b, 1, 32), outval,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true);
}
nir_pop_if(&b, NULL);
return b.shader;

View file

@ -56,7 +56,7 @@ build_dcc_decompress_compute_shader(struct radv_device *dev)
nir_def *img_coord = nir_vec4(&b, nir_channel(&b, global_id, 0), nir_channel(&b, global_id, 1), nir_undef(&b, 1, 32),
nir_undef(&b, 1, 32));
nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, img_coord,
nir_def *data = nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->def, img_coord,
nir_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
/* We need a SCOPE_DEVICE memory_scope because ACO will avoid
@ -66,7 +66,7 @@ build_dcc_decompress_compute_shader(struct radv_device *dev)
nir_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
.memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), data,
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_undef(&b, 1, 32), data,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
return b.shader;
}

View file

@ -88,7 +88,7 @@ build_fmask_copy_compute_shader(struct radv_device *dev, int samples)
nir_def *outval = nir_build_tex_deref_instr(&b, nir_texop_fragment_fetch_amd, nir_build_deref_var(&b, input_img),
NULL, ARRAY_SIZE(frag_fetch_srcs), frag_fetch_srcs);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, dst_coord, sample_id, outval,
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, dst_coord, sample_id, outval,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_MS);
radv_break_on_count(&b, counter, max_sample_index);

View file

@ -48,7 +48,7 @@ build_fmask_expand_compute_shader(struct radv_device *device, int samples)
output_img->data.access = ACCESS_NON_READABLE;
nir_deref_instr *input_img_deref = nir_build_deref_var(&b, input_img);
nir_def *output_img_deref = &nir_build_deref_var(&b, output_img)->dest.ssa;
nir_def *output_img_deref = &nir_build_deref_var(&b, output_img)->def;
nir_def *tex_coord = get_global_ids(&b, 3);

View file

@ -81,7 +81,7 @@ build_resolve_compute_shader(struct radv_device *dev, bool is_integer, bool is_s
nir_def *img_coord = nir_vec4(&b, nir_channel(&b, dst_coord, 0), nir_channel(&b, dst_coord, 1), nir_undef(&b, 1, 32),
nir_undef(&b, 1, 32));
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_undef(&b, 1, 32), outval,
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, img_coord, nir_undef(&b, 1, 32), outval,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
return b.shader;
}
@ -174,7 +174,7 @@ build_depth_stencil_resolve_compute_shader(struct radv_device *dev, int samples,
nir_def *coord = nir_vec4(&b, nir_channel(&b, img_coord, 0), nir_channel(&b, img_coord, 1),
nir_channel(&b, img_coord, 2), nir_undef(&b, 1, 32));
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, coord, nir_undef(&b, 1, 32), outval,
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->def, coord, nir_undef(&b, 1, 32), outval,
nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D, .image_array = true);
return b.shader;
}

View file

@ -97,9 +97,9 @@ visit_vulkan_resource_index(nir_builder *b, apply_layout_state *state, nir_intri
if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) {
assert(stride == 16);
nir_def_rewrite_uses(&intrin->dest.ssa, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
nir_def_rewrite_uses(&intrin->def, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
} else {
nir_def_rewrite_uses(&intrin->dest.ssa, nir_vec3(b, set_ptr, binding_ptr, nir_imm_int(b, stride)));
nir_def_rewrite_uses(&intrin->def, nir_vec3(b, set_ptr, binding_ptr, nir_imm_int(b, stride)));
}
nir_instr_remove(&intrin->instr);
}
@ -117,7 +117,7 @@ visit_vulkan_resource_reindex(nir_builder *b, apply_layout_state *state, nir_int
binding_ptr = nir_iadd_nuw(b, binding_ptr, index);
nir_def_rewrite_uses(&intrin->dest.ssa, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
nir_def_rewrite_uses(&intrin->def, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
} else {
assert(desc_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
@ -129,7 +129,7 @@ visit_vulkan_resource_reindex(nir_builder *b, apply_layout_state *state, nir_int
binding_ptr = nir_iadd_nuw(b, binding_ptr, index);
nir_def_rewrite_uses(&intrin->dest.ssa, nir_vector_insert_imm(b, intrin->src[0].ssa, binding_ptr, 1));
nir_def_rewrite_uses(&intrin->def, nir_vector_insert_imm(b, intrin->src[0].ssa, binding_ptr, 1));
}
nir_instr_remove(&intrin->instr);
}
@ -143,9 +143,9 @@ visit_load_vulkan_descriptor(nir_builder *b, apply_layout_state *state, nir_intr
nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa)));
nir_def *desc = nir_build_load_global(b, 1, 64, addr, .access = ACCESS_NON_WRITEABLE);
nir_def_rewrite_uses(&intrin->dest.ssa, desc);
nir_def_rewrite_uses(&intrin->def, desc);
} else {
nir_def_rewrite_uses(&intrin->dest.ssa, nir_vector_insert_imm(b, intrin->src[0].ssa, nir_imm_int(b, 0), 2));
nir_def_rewrite_uses(&intrin->def, nir_vector_insert_imm(b, intrin->src[0].ssa, nir_imm_int(b, 0), 2));
}
nir_instr_remove(&intrin->instr);
}
@ -211,7 +211,7 @@ visit_get_ssbo_size(nir_builder *b, apply_layout_state *state, nir_intrinsic_ins
size = nir_channel(b, desc, 2);
}
nir_def_rewrite_uses(&intrin->dest.ssa, size);
nir_def_rewrite_uses(&intrin->def, size);
nir_instr_remove(&intrin->instr);
}
@ -358,7 +358,7 @@ update_image_intrinsic(nir_builder *b, apply_layout_state *state, nir_intrinsic_
nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM, NULL, !is_load);
if (intrin->intrinsic == nir_intrinsic_image_deref_descriptor_amd) {
nir_def_rewrite_uses(&intrin->dest.ssa, desc);
nir_def_rewrite_uses(&intrin->def, desc);
nir_instr_remove(&intrin->instr);
} else {
nir_rewrite_image_intrinsic(intrin, desc, true);
@ -477,7 +477,7 @@ apply_layout_to_tex(nir_builder *b, apply_layout_state *state, nir_tex_instr *te
}
if (tex->op == nir_texop_descriptor_amd) {
nir_def_rewrite_uses(&tex->dest.ssa, image);
nir_def_rewrite_uses(&tex->def, image);
nir_instr_remove(&tex->instr);
return;
}

View file

@ -499,7 +499,7 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
return false;
if (replacement)
nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(instr);
nir_instr_free(instr);

View file

@ -250,7 +250,7 @@ lower_load_barycentric_coord(nir_builder *b, lower_fs_barycentric_state *state,
}
}
nir_def_rewrite_uses(&intrin->dest.ssa, new_dest);
nir_def_rewrite_uses(&intrin->def, new_dest);
nir_instr_remove(&intrin->instr);
return true;

View file

@ -63,7 +63,7 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs
def = sample_coverage;
}
nir_def_rewrite_uses(&intrin->dest.ssa, def);
nir_def_rewrite_uses(&intrin->def, def);
nir_instr_remove(instr);
progress = true;
@ -73,10 +73,10 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs
if (!key->adjust_frag_coord_z)
continue;
if (!(nir_def_components_read(&intrin->dest.ssa) & (1 << 2)))
if (!(nir_def_components_read(&intrin->def) & (1 << 2)))
continue;
nir_def *frag_z = nir_channel(&b, &intrin->dest.ssa, 2);
nir_def *frag_z = nir_channel(&b, &intrin->def, 2);
/* adjusted_frag_z = fddx_fine(frag_z) * 0.0625 + frag_z */
nir_def *adjusted_frag_z = nir_fddx_fine(&b, frag_z);
@ -90,8 +90,8 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs
nir_def *cond = nir_ieq_imm(&b, x_rate, 1);
frag_z = nir_bcsel(&b, cond, adjusted_frag_z, frag_z);
nir_def *new_dest = nir_vector_insert_imm(&b, &intrin->dest.ssa, frag_z, 2);
nir_def_rewrite_uses_after(&intrin->dest.ssa, new_dest, new_dest->parent_instr);
nir_def *new_dest = nir_vector_insert_imm(&b, &intrin->def, frag_z, 2);
nir_def_rewrite_uses_after(&intrin->def, new_dest, new_dest->parent_instr);
progress = true;
break;
@ -134,7 +134,7 @@ radv_nir_lower_fs_intrinsics(nir_shader *nir, const struct radv_shader_stage *fs
}
}
nir_def_rewrite_uses(&intrin->dest.ssa, new_dest);
nir_def_rewrite_uses(&intrin->def, new_dest);
nir_instr_remove(instr);
progress = true;

View file

@ -60,7 +60,7 @@ radv_nir_lower_intrinsics_early(nir_shader *nir, const struct radv_pipeline_key
continue;
}
nir_def_rewrite_uses(&intrin->dest.ssa, def);
nir_def_rewrite_uses(&intrin->def, def);
nir_instr_remove(instr);
progress = true;

View file

@ -701,7 +701,7 @@ radv_nir_lower_ray_queries(struct nir_shader *shader, struct radv_device *device
}
if (new_dest)
nir_def_rewrite_uses(&intrinsic->dest.ssa, new_dest);
nir_def_rewrite_uses(&intrinsic->def, new_dest);
nir_instr_remove(instr);
nir_instr_free(instr);

View file

@ -72,7 +72,7 @@ radv_nir_lower_view_index(nir_shader *nir, bool per_primitive)
layer->data.per_primitive = per_primitive;
b.cursor = nir_before_instr(instr);
nir_def *def = nir_load_var(&b, layer);
nir_def_rewrite_uses(&load->dest.ssa, def);
nir_def_rewrite_uses(&load->def, def);
/* Update inputs_read to reflect that the pass added a new input. */
nir->info.inputs_read |= VARYING_BIT_LAYER;

View file

@ -51,7 +51,7 @@ radv_nir_lower_viewport_to_zero(nir_shader *nir)
b.cursor = nir_before_instr(instr);
nir_def_rewrite_uses(&intr->dest.ssa, nir_imm_zero(&b, 1, 32));
nir_def_rewrite_uses(&intr->def, nir_imm_zero(&b, 1, 32));
progress = true;
break;
}

View file

@ -47,8 +47,8 @@ lower_load_vs_input_from_prolog(nir_builder *b, nir_intrinsic_instr *intrin, low
const unsigned base_offset = nir_src_as_uint(*offset_src);
const unsigned driver_location = base + base_offset - VERT_ATTRIB_GENERIC0;
const unsigned component = nir_intrinsic_component(intrin);
const unsigned bit_size = intrin->dest.ssa.bit_size;
const unsigned num_components = intrin->dest.ssa.num_components;
const unsigned bit_size = intrin->def.bit_size;
const unsigned num_components = intrin->def.num_components;
/* 64-bit inputs: they occupy twice as many 32-bit components.
* 16-bit inputs: they occupy a 32-bit component (not packed).
@ -209,8 +209,8 @@ lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs
const unsigned base = nir_intrinsic_base(intrin);
const unsigned base_offset = nir_src_as_uint(*offset_src);
const unsigned location = base + base_offset - VERT_ATTRIB_GENERIC0;
const unsigned bit_size = intrin->dest.ssa.bit_size;
const unsigned dest_num_components = intrin->dest.ssa.num_components;
const unsigned bit_size = intrin->def.bit_size;
const unsigned dest_num_components = intrin->def.num_components;
/* Convert the component offset to bit_size units.
* (Intrinsic component offset is in 32-bit units.)
@ -225,7 +225,7 @@ lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs
/* Bitmask of components in bit_size units
* of the current input load that are actually used.
*/
const unsigned dest_use_mask = nir_def_components_read(&intrin->dest.ssa) << component;
const unsigned dest_use_mask = nir_def_components_read(&intrin->def) << component;
/* If the input is entirely unused, just replace it with undef.
* This is just in case we debug this pass without running DCE first.
@ -406,7 +406,7 @@ lower_vs_input_instr(nir_builder *b, nir_instr *instr, void *state)
replacement = lower_load_vs_input(b, intrin, s);
}
nir_def_rewrite_uses(&intrin->dest.ssa, replacement);
nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(instr);
nir_instr_free(instr);

View file

@ -62,7 +62,7 @@ lower_rt_derefs(nir_shader *shader)
b.cursor = nir_before_instr(&deref->instr);
nir_deref_instr *replacement =
nir_build_deref_cast(&b, arg_offset, nir_var_function_temp, deref->var->type, 0);
nir_def_rewrite_uses(&deref->dest.ssa, &replacement->dest.ssa);
nir_def_rewrite_uses(&deref->def, &replacement->def);
nir_instr_remove(&deref->instr);
}
}
@ -560,7 +560,7 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca
}
if (ret)
nir_def_rewrite_uses(&intr->dest.ssa, ret);
nir_def_rewrite_uses(&intr->def, ret);
nir_instr_remove(instr);
break;
}
@ -600,8 +600,8 @@ lower_hit_attrib_deref(nir_builder *b, nir_instr *instr, void *data)
b->cursor = nir_after_instr(instr);
if (intrin->intrinsic == nir_intrinsic_load_deref) {
uint32_t num_components = intrin->dest.ssa.num_components;
uint32_t bit_size = intrin->dest.ssa.bit_size;
uint32_t num_components = intrin->def.num_components;
uint32_t bit_size = intrin->def.bit_size;
nir_def *components[NIR_MAX_VEC_COMPONENTS];
@ -626,7 +626,7 @@ lower_hit_attrib_deref(nir_builder *b, nir_instr *instr, void *data)
}
}
nir_def_rewrite_uses(&intrin->dest.ssa, nir_vec(b, components, num_components));
nir_def_rewrite_uses(&intrin->def, nir_vec(b, components, num_components));
} else {
nir_def *value = intrin->src[1].ssa;
uint32_t num_components = value->num_components;
@ -913,12 +913,12 @@ lower_any_hit_for_intersection(nir_shader *any_hit)
break;
case nir_intrinsic_load_ray_t_max:
nir_def_rewrite_uses(&intrin->dest.ssa, hit_t);
nir_def_rewrite_uses(&intrin->def, hit_t);
nir_instr_remove(&intrin->instr);
break;
case nir_intrinsic_load_ray_hit_kind:
nir_def_rewrite_uses(&intrin->dest.ssa, hit_kind);
nir_def_rewrite_uses(&intrin->def, hit_kind);
nir_instr_remove(&intrin->instr);
break;
@ -939,8 +939,8 @@ lower_any_hit_for_intersection(nir_shader *any_hit)
break;
case nir_intrinsic_load_rt_arg_scratch_offset_amd:
b->cursor = nir_after_instr(instr);
nir_def *arg_offset = nir_isub(b, &intrin->dest.ssa, scratch_offset);
nir_def_rewrite_uses_after(&intrin->dest.ssa, arg_offset, arg_offset->parent_instr);
nir_def *arg_offset = nir_isub(b, &intrin->def, scratch_offset);
nir_def_rewrite_uses_after(&intrin->def, arg_offset, arg_offset->parent_instr);
break;
default:
@ -1030,7 +1030,7 @@ nir_lower_intersection_shader(nir_shader *intersection, nir_shader *any_hit)
nir_push_if(b, nir_inot(b, nir_load_intersection_opaque_amd(b)));
{
nir_def *params[] = {
&nir_build_deref_var(b, commit_tmp)->dest.ssa,
&nir_build_deref_var(b, commit_tmp)->def,
hit_t,
hit_kind,
nir_imm_int(b, intersection->scratch_size),
@ -1049,7 +1049,7 @@ nir_lower_intersection_shader(nir_shader *intersection, nir_shader *any_hit)
nir_pop_if(b, NULL);
nir_def *accepted = nir_load_var(b, commit_tmp);
nir_def_rewrite_uses(&intrin->dest.ssa, accepted);
nir_def_rewrite_uses(&intrin->def, accepted);
}
}
nir_metadata_preserve(impl, nir_metadata_none);

View file

@ -40,8 +40,8 @@ gather_intrinsic_load_input_info(const nir_shader *nir, const nir_intrinsic_inst
case MESA_SHADER_VERTEX: {
unsigned idx = nir_intrinsic_io_semantics(instr).location;
unsigned component = nir_intrinsic_component(instr);
unsigned mask = nir_def_components_read(&instr->dest.ssa);
mask = (instr->dest.ssa.bit_size == 64 ? util_widen_mask(mask, 2) : mask) << component;
unsigned mask = nir_def_components_read(&instr->def);
mask = (instr->def.bit_size == 64 ? util_widen_mask(mask, 2) : mask) << component;
info->vs.input_usage_mask[idx] |= mask & 0xf;
if (mask >> 4)
@ -115,9 +115,9 @@ gather_push_constant_info(const nir_shader *nir, const nir_intrinsic_instr *inst
{
info->loads_push_constants = true;
if (nir_src_is_const(instr->src[0]) && instr->dest.ssa.bit_size >= 32) {
if (nir_src_is_const(instr->src[0]) && instr->def.bit_size >= 32) {
uint32_t start = (nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[0])) / 4u;
uint32_t size = instr->num_components * (instr->dest.ssa.bit_size / 32u);
uint32_t size = instr->num_components * (instr->def.bit_size / 32u);
if (start + size <= (MAX_PUSH_CONSTANTS_SIZE / 4u)) {
info->inline_push_constant_mask |= u_bit_consecutive64(start, size);
@ -179,7 +179,7 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr, s
break;
case nir_intrinsic_load_local_invocation_id:
case nir_intrinsic_load_workgroup_id: {
unsigned mask = nir_def_components_read(&instr->dest.ssa);
unsigned mask = nir_def_components_read(&instr->def);
while (mask) {
unsigned i = u_bit_scan(&mask);
@ -191,10 +191,10 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr, s
break;
}
case nir_intrinsic_load_frag_coord:
info->ps.reads_frag_coord_mask |= nir_def_components_read(&instr->dest.ssa);
info->ps.reads_frag_coord_mask |= nir_def_components_read(&instr->def);
break;
case nir_intrinsic_load_sample_pos:
info->ps.reads_sample_pos_mask |= nir_def_components_read(&instr->dest.ssa);
info->ps.reads_sample_pos_mask |= nir_def_components_read(&instr->def);
break;
case nir_intrinsic_load_push_constant:
gather_push_constant_info(nir, instr, info);

View file

@ -409,7 +409,7 @@ agx_emit_load_vary(agx_builder *b, agx_index dest, nir_intrinsic_instr *instr)
nir_src *offset = nir_get_io_offset_src(instr);
assert(nir_src_is_const(*offset) && "no indirects");
assert(nir_def_components_read(&instr->dest.ssa) ==
assert(nir_def_components_read(&instr->def) ==
nir_component_mask(components) &&
"iter does not handle write-after-write hazards");
@ -519,7 +519,7 @@ agx_emit_local_load_pixel(agx_builder *b, agx_index dest,
b->shader->did_writeout = true;
b->shader->out->reads_tib = true;
unsigned nr_comps = instr->dest.ssa.num_components;
unsigned nr_comps = instr->def.num_components;
agx_ld_tile_to(b, dest, agx_src_index(&instr->src[0]),
agx_format_for_pipe(nir_intrinsic_format(instr)),
BITFIELD_MASK(nr_comps), nir_intrinsic_base(instr));
@ -539,8 +539,8 @@ agx_emit_load(agx_builder *b, agx_index dest, nir_intrinsic_instr *instr)
offset = agx_abs(offset);
agx_device_load_to(b, dest, addr, offset, fmt,
BITFIELD_MASK(instr->dest.ssa.num_components), shift, 0);
agx_emit_cached_split(b, dest, instr->dest.ssa.num_components);
BITFIELD_MASK(instr->def.num_components), shift, 0);
agx_emit_cached_split(b, dest, instr->def.num_components);
}
static void
@ -566,7 +566,7 @@ agx_emit_load_preamble(agx_builder *b, agx_index dst,
nir_intrinsic_instr *instr)
{
agx_index srcs[4] = {agx_null()};
unsigned dim = instr->dest.ssa.num_components;
unsigned dim = instr->def.num_components;
assert(dim <= ARRAY_SIZE(srcs) && "shouldn't see larger vectors");
unsigned base = nir_intrinsic_base(instr);
@ -642,8 +642,8 @@ static agx_instr *
agx_load_compute_dimension(agx_builder *b, agx_index dst,
nir_intrinsic_instr *instr, enum agx_sr base)
{
unsigned dim = instr->dest.ssa.num_components;
unsigned size = instr->dest.ssa.bit_size;
unsigned dim = instr->def.num_components;
unsigned size = instr->def.bit_size;
assert(size == 16 || size == 32);
agx_index srcs[] = {
@ -738,8 +738,8 @@ agx_emit_local_load(agx_builder *b, agx_index dst, nir_intrinsic_instr *instr)
agx_index index = agx_zero(); /* TODO: optimize address arithmetic */
assert(base.size == AGX_SIZE_16);
enum agx_format format = format_for_bitsize(instr->dest.ssa.bit_size);
unsigned nr = instr->dest.ssa.num_components;
enum agx_format format = format_for_bitsize(instr->def.bit_size);
unsigned nr = instr->def.num_components;
unsigned mask = BITFIELD_MASK(nr);
agx_local_load_to(b, dst, base, index, format, mask);
@ -874,7 +874,7 @@ agx_emit_image_load(agx_builder *b, agx_index dst, nir_intrinsic_instr *intr)
agx_instr *I = agx_image_load_to(
b, tmp, coords, lod, bindless, texture, agx_txf_sampler(b->shader),
agx_null(), agx_tex_dim(dim, is_array), lod_mode, 0, 0, false);
I->mask = agx_expand_tex_to(b, &intr->dest.ssa, tmp, true);
I->mask = agx_expand_tex_to(b, &intr->def, tmp, true);
return NULL;
}
@ -936,7 +936,7 @@ static agx_instr *
agx_emit_intrinsic(agx_builder *b, nir_intrinsic_instr *instr)
{
agx_index dst = nir_intrinsic_infos[instr->intrinsic].has_dest
? agx_def_index(&instr->dest.ssa)
? agx_def_index(&instr->def)
: agx_null();
gl_shader_stage stage = b->shader->stage;
@ -1663,7 +1663,7 @@ agx_emit_tex(agx_builder *b, nir_tex_instr *instr)
}
}
agx_index dst = agx_def_index(&instr->dest.ssa);
agx_index dst = agx_def_index(&instr->def);
/* Pack shadow reference value (compare) and packed offset together */
agx_index compare_offset = agx_null();
@ -1690,7 +1690,7 @@ agx_emit_tex(agx_builder *b, nir_tex_instr *instr)
* textureGatherOffsets. Don't try to mask the destination for gathers.
*/
bool masked = (instr->op != nir_texop_tg4);
I->mask = agx_expand_tex_to(b, &instr->dest.ssa, tmp, masked);
I->mask = agx_expand_tex_to(b, &instr->def, tmp, masked);
}
/*
@ -1754,8 +1754,8 @@ agx_emit_jump(agx_builder *b, nir_jump_instr *instr)
static void
agx_emit_phi(agx_builder *b, nir_phi_instr *instr)
{
agx_instr *I = agx_phi_to(b, agx_def_index(&instr->dest.ssa),
exec_list_length(&instr->srcs));
agx_instr *I =
agx_phi_to(b, agx_def_index(&instr->def), exec_list_length(&instr->srcs));
/* Deferred */
I->phi = instr;
@ -1776,7 +1776,7 @@ agx_emit_phi_deferred(agx_context *ctx, agx_block *block, agx_instr *I)
nir_phi_instr *phi = I->phi;
/* Guaranteed by lower_phis_to_scalar */
assert(phi->dest.ssa.num_components == 1);
assert(phi->def.num_components == 1);
nir_foreach_phi_src(src, phi) {
agx_block *pred = agx_from_nir_block(ctx, src->pred);
@ -2125,7 +2125,7 @@ agx_lower_front_face(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
if (intr->intrinsic != nir_intrinsic_load_front_face)
return false;
nir_def *def = &intr->dest.ssa;
nir_def *def = &intr->def;
assert(def->bit_size == 1);
b->cursor = nir_before_instr(&intr->instr);

View file

@ -474,7 +474,7 @@ agx_vec_for_def(agx_context *ctx, nir_def *def)
static inline agx_index
agx_vec_for_intr(agx_context *ctx, nir_intrinsic_instr *instr)
{
return agx_vec_for_def(ctx, &instr->dest.ssa);
return agx_vec_for_def(ctx, &instr->def);
}
static inline unsigned

View file

@ -270,7 +270,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
unsigned bitsize = intr->intrinsic == nir_intrinsic_store_global
? nir_src_bit_size(intr->src[0])
: intr->dest.ssa.bit_size;
: intr->def.bit_size;
enum pipe_format format = format_for_bitsize(bitsize);
unsigned format_shift = util_logbase2(util_format_get_blocksize(format));
@ -311,8 +311,8 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
nir_def *repl = NULL;
bool has_dest = (intr->intrinsic != nir_intrinsic_store_global);
unsigned num_components = has_dest ? intr->dest.ssa.num_components : 0;
unsigned bit_size = has_dest ? intr->dest.ssa.bit_size : 0;
unsigned num_components = has_dest ? intr->def.num_components : 0;
unsigned bit_size = has_dest ? intr->def.bit_size : 0;
if (intr->intrinsic == nir_intrinsic_load_global) {
repl =
@ -344,7 +344,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
}
if (repl)
nir_def_rewrite_uses(&intr->dest.ssa, repl);
nir_def_rewrite_uses(&intr->def, repl);
nir_instr_remove(instr);
return true;

View file

@ -127,7 +127,7 @@ interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel)
.interp_mode = interp_mode_for_load(load), .io_semantics = sem);
if (load->intrinsic == nir_intrinsic_load_input) {
assert(load->dest.ssa.bit_size == 32);
assert(load->def.bit_size == 32);
return interpolate_flat(b, coefficients);
} else {
nir_intrinsic_instr *bary = nir_src_as_intrinsic(load->src[0]);
@ -136,7 +136,7 @@ interpolate_channel(nir_builder *b, nir_intrinsic_instr *load, unsigned channel)
b, coefficients, bary->src[0].ssa,
nir_intrinsic_interp_mode(bary) != INTERP_MODE_NOPERSPECTIVE);
return nir_f2fN(b, interp, load->dest.ssa.bit_size);
return nir_f2fN(b, interp, load->def.bit_size);
}
}
@ -147,11 +147,11 @@ lower(nir_builder *b, nir_instr *instr, void *data)
/* Each component is loaded separated */
nir_def *values[NIR_MAX_VEC_COMPONENTS] = {NULL};
for (unsigned i = 0; i < intr->dest.ssa.num_components; ++i) {
for (unsigned i = 0; i < intr->def.num_components; ++i) {
values[i] = interpolate_channel(b, intr, i);
}
return nir_vec(b, values, intr->dest.ssa.num_components);
return nir_vec(b, values, intr->def.num_components);
}
bool

View file

@ -21,12 +21,12 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
if (intr->intrinsic != nir_intrinsic_load_interpolated_input)
return false;
unsigned mask = nir_def_components_read(&intr->dest.ssa);
unsigned mask = nir_def_components_read(&intr->def);
if (mask == 0 || mask == nir_component_mask(intr->num_components))
return false;
b->cursor = nir_before_instr(instr);
unsigned bit_size = intr->dest.ssa.bit_size;
unsigned bit_size = intr->def.bit_size;
nir_def *comps[4] = {NULL};
for (unsigned c = 0; c < intr->num_components; ++c) {
@ -43,8 +43,8 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
nir_intrinsic_instr *clone_intr = nir_instr_as_intrinsic(clone);
/* Shrink the load to count contiguous components */
nir_def_init(clone, &clone_intr->dest.ssa, count, bit_size);
nir_def *clone_vec = &clone_intr->dest.ssa;
nir_def_init(clone, &clone_intr->def, count, bit_size);
nir_def *clone_vec = &clone_intr->def;
clone_intr->num_components = count;
/* The load starts from component c relative to the original load */
@ -68,8 +68,7 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
}
}
nir_def_rewrite_uses(&intr->dest.ssa,
nir_vec(b, comps, intr->num_components));
nir_def_rewrite_uses(&intr->def, nir_vec(b, comps, intr->num_components));
return true;
}

View file

@ -140,7 +140,7 @@ agx_txs(nir_builder *b, nir_tex_instr *tex)
height = depth;
/* How we finish depends on the size of the result */
unsigned nr_comps = tex->dest.ssa.num_components;
unsigned nr_comps = tex->def.num_components;
assert(nr_comps <= 3);
/* Adjust for LOD, do not adjust array size */
@ -179,7 +179,7 @@ lower_txs(nir_builder *b, nir_instr *instr, UNUSED void *data)
return false;
nir_def *res = agx_txs(b, tex);
nir_def_rewrite_uses_after(&tex->dest.ssa, res, instr);
nir_def_rewrite_uses_after(&tex->def, res, instr);
nir_instr_remove(instr);
return true;
}
@ -206,7 +206,7 @@ load_rgb32(nir_builder *b, nir_tex_instr *tex, nir_def *coordinate)
nir_iand_imm(b, nir_ushr_imm(b, desc_hi, 2), BITFIELD64_MASK(36));
nir_def *base = nir_ishl_imm(b, base_shr4, 4);
nir_def *raw = nir_load_constant_agx(b, 3, tex->dest.ssa.bit_size, base,
nir_def *raw = nir_load_constant_agx(b, 3, tex->def.bit_size, base,
nir_imul_imm(b, coordinate, 3),
.format = AGX_INTERNAL_FORMAT_I32);
@ -277,11 +277,11 @@ lower_buffer_texture(nir_builder *b, nir_tex_instr *tex)
nir_pop_if(b, nif);
/* Put it together with a phi */
nir_def *phi = nir_if_phi(b, rgb32, &tex->dest.ssa);
nir_def_rewrite_uses(&tex->dest.ssa, phi);
nir_def *phi = nir_if_phi(b, rgb32, &tex->def);
nir_def_rewrite_uses(&tex->def, phi);
nir_phi_instr *phi_instr = nir_instr_as_phi(phi->parent_instr);
nir_phi_src *else_src = nir_phi_get_src_from_block(phi_instr, else_block);
nir_instr_rewrite_src_ssa(phi->parent_instr, &else_src->src, &tex->dest.ssa);
nir_instr_rewrite_src_ssa(phi->parent_instr, &else_src->src, &tex->def);
return true;
}
@ -419,8 +419,8 @@ bias_for_tex(nir_builder *b, nir_tex_instr *tex)
query->op = nir_texop_lod_bias_agx;
query->dest_type = nir_type_float16;
nir_def_init(instr, &query->dest.ssa, 1, 16);
return &query->dest.ssa;
nir_def_init(instr, &query->def, 1, 16);
return &query->def;
}
static bool
@ -548,9 +548,9 @@ txs_for_image(nir_builder *b, nir_intrinsic_instr *intr,
nir_tex_src_for_ssa(nir_tex_src_texture_offset, intr->src[0].ssa);
}
nir_def_init(&tex->instr, &tex->dest.ssa, num_components, bit_size);
nir_def_init(&tex->instr, &tex->def, num_components, bit_size);
nir_builder_instr_insert(b, &tex->instr);
return &tex->dest.ssa;
return &tex->def;
}
static nir_def *
@ -742,15 +742,14 @@ lower_images(nir_builder *b, nir_instr *instr, UNUSED void *data)
case nir_intrinsic_image_size:
case nir_intrinsic_bindless_image_size:
nir_def_rewrite_uses(&intr->dest.ssa,
txs_for_image(b, intr, intr->dest.ssa.num_components,
intr->dest.ssa.bit_size));
nir_def_rewrite_uses(
&intr->def,
txs_for_image(b, intr, intr->def.num_components, intr->def.bit_size));
return true;
case nir_intrinsic_image_texel_address:
case nir_intrinsic_bindless_image_texel_address:
nir_def_rewrite_uses(&intr->dest.ssa,
image_texel_address(b, intr, false));
nir_def_rewrite_uses(&intr->def, image_texel_address(b, intr, false));
return true;
default:

View file

@ -26,9 +26,9 @@ pass(struct nir_builder *b, nir_instr *instr, UNUSED void *data)
nir_iadd(b, nir_load_ubo_base_agx(b, ubo_index), nir_u2u64(b, offset));
nir_def *value =
nir_load_global_constant(b, address, nir_intrinsic_align(intr),
intr->num_components, intr->dest.ssa.bit_size);
intr->num_components, intr->def.bit_size);
nir_def_rewrite_uses(&intr->dest.ssa, value);
nir_def_rewrite_uses(&intr->def, value);
return true;
}

View file

@ -57,10 +57,10 @@ build_background_op(nir_builder *b, enum agx_meta_op op, unsigned rt,
tex->coord_components = 2;
tex->texture_index = rt;
nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
return nir_trim_vector(b, &tex->dest.ssa, nr);
return nir_trim_vector(b, &tex->def, nr);
} else {
assert(op == AGX_META_OP_CLEAR);

View file

@ -20,8 +20,8 @@ lower_wrapped(nir_builder *b, nir_instr *instr, void *data)
switch (intr->intrinsic) {
case nir_intrinsic_load_sample_id: {
unsigned size = intr->dest.ssa.bit_size;
nir_def_rewrite_uses(&intr->dest.ssa, nir_u2uN(b, sample_id, size));
unsigned size = intr->def.bit_size;
nir_def_rewrite_uses(&intr->def, nir_u2uN(b, sample_id, size));
nir_instr_remove(instr);
return true;
}
@ -151,7 +151,7 @@ lower_sample_mask_read(nir_builder *b, nir_instr *instr, UNUSED void *_)
if (intr->intrinsic != nir_intrinsic_load_sample_mask_in)
return false;
nir_def *old = &intr->dest.ssa;
nir_def *old = &intr->def;
nir_def *lowered = nir_iand(
b, old, nir_u2uN(b, nir_load_api_sample_mask_agx(b), old->bit_size));

View file

@ -52,11 +52,11 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
xy[i] = nir_fmul_imm(b, nir_u2f16(b, nibble), 1.0 / 16.0);
/* Upconvert if necessary */
xy[i] = nir_f2fN(b, xy[i], intr->dest.ssa.bit_size);
xy[i] = nir_f2fN(b, xy[i], intr->def.bit_size);
}
/* Collect and rewrite */
nir_def_rewrite_uses(&intr->dest.ssa, nir_vec2(b, xy[0], xy[1]));
nir_def_rewrite_uses(&intr->def, nir_vec2(b, xy[0], xy[1]));
nir_instr_remove(instr);
return true;
}
@ -67,7 +67,7 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
* by the sample ID to make that happen.
*/
b->cursor = nir_after_instr(instr);
nir_def *old = &intr->dest.ssa;
nir_def *old = &intr->def;
nir_def *lowered = mask_by_sample_id(b, old);
nir_def_rewrite_uses_after(old, lowered, lowered->parent_instr);
return true;
@ -78,10 +78,10 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
* interpolateAtSample() with the sample ID
*/
b->cursor = nir_after_instr(instr);
nir_def *old = &intr->dest.ssa;
nir_def *old = &intr->def;
nir_def *lowered = nir_load_barycentric_at_sample(
b, intr->dest.ssa.bit_size, nir_load_sample_id(b),
b, intr->def.bit_size, nir_load_sample_id(b),
.interp_mode = nir_intrinsic_interp_mode(intr));
nir_def_rewrite_uses_after(old, lowered, lowered->parent_instr);

View file

@ -266,7 +266,7 @@ tib_impl(nir_builder *b, nir_instr *instr, void *data)
return NIR_LOWER_INSTR_PROGRESS_REPLACE;
} else {
uint8_t bit_size = intr->dest.ssa.bit_size;
uint8_t bit_size = intr->def.bit_size;
/* Loads from non-existent render targets are undefined in NIR but not
* possible to encode in the hardware, delete them.

View file

@ -149,7 +149,7 @@ pass(struct nir_builder *b, nir_instr *instr, void *data)
util_format_is_pure_uint(interchange_format) &&
!util_format_is_pure_uint(attrib.format)
? (interchange_align * 8)
: intr->dest.ssa.bit_size;
: intr->def.bit_size;
/* Non-UNORM R10G10B10A2 loaded as a scalar and unpacked */
if (interchange_format == PIPE_FORMAT_R32_UINT && !desc->is_array)
@ -190,7 +190,7 @@ pass(struct nir_builder *b, nir_instr *instr, void *data)
b, interchange_comps, interchange_register_size, base, stride_offset_el,
.format = interchange_format, .base = shift);
unsigned dest_size = intr->dest.ssa.bit_size;
unsigned dest_size = intr->def.bit_size;
/* Unpack but do not convert non-native non-array formats */
if (is_rgb10_a2(desc) && interchange_format == PIPE_FORMAT_R32_UINT) {
@ -246,7 +246,7 @@ pass(struct nir_builder *b, nir_instr *instr, void *data)
channels[i] = apply_swizzle_channel(b, memory, desc->swizzle[i], is_int);
nir_def *logical = nir_vec(b, channels, intr->num_components);
nir_def_rewrite_uses(&intr->dest.ssa, logical);
nir_def_rewrite_uses(&intr->def, logical);
return true;
}

View file

@ -627,7 +627,7 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
tmu_op, has_index,
&tmu_writes);
} else if (is_load) {
type_size = instr->dest.ssa.bit_size / 8;
type_size = instr->def.bit_size / 8;
}
/* For atomics we use 32bit except for CMPXCHG, that we need
@ -703,7 +703,7 @@ ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr,
*/
const uint32_t component_mask =
(1 << dest_components) - 1;
ntq_add_pending_tmu_flush(c, &instr->dest.ssa,
ntq_add_pending_tmu_flush(c, &instr->def,
component_mask);
}
}
@ -934,7 +934,7 @@ ntq_emit_txs(struct v3d_compile *c, nir_tex_instr *instr)
unreachable("Bad sampler type");
}
ntq_store_def(c, &instr->dest.ssa, i, size);
ntq_store_def(c, &instr->def, i, size);
}
}
@ -949,11 +949,11 @@ ntq_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
*/
switch (instr->op) {
case nir_texop_query_levels:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_TEXTURE_LEVELS, unit));
return;
case nir_texop_texture_samples:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_TEXTURE_SAMPLES, unit));
return;
case nir_texop_txs:
@ -2471,7 +2471,7 @@ ntq_setup_registers(struct v3d_compile *c, nir_function_impl *impl)
struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
array_len * num_components);
nir_def *nir_reg = &decl->dest.ssa;
nir_def *nir_reg = &decl->def;
_mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
for (int i = 0; i < array_len * num_components; i++)
@ -2501,10 +2501,10 @@ ntq_emit_image_size(struct v3d_compile *c, nir_intrinsic_instr *instr)
assert(nir_src_as_uint(instr->src[1]) == 0);
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_IMAGE_WIDTH, image_index));
if (instr->num_components > 1) {
ntq_store_def(c, &instr->dest.ssa, 1,
ntq_store_def(c, &instr->def, 1,
vir_uniform(c,
instr->num_components == 2 && is_array ?
QUNIFORM_IMAGE_ARRAY_SIZE :
@ -2512,7 +2512,7 @@ ntq_emit_image_size(struct v3d_compile *c, nir_intrinsic_instr *instr)
image_index));
}
if (instr->num_components > 2) {
ntq_store_def(c, &instr->dest.ssa, 2,
ntq_store_def(c, &instr->def, 2,
vir_uniform(c,
is_array ?
QUNIFORM_IMAGE_ARRAY_SIZE :
@ -2650,7 +2650,7 @@ vir_emit_tlb_color_read(struct v3d_compile *c, nir_intrinsic_instr *instr)
}
assert(color_reads_for_sample[component].file != QFILE_NULL);
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_MOV(c, color_reads_for_sample[component]));
}
@ -2694,7 +2694,7 @@ static void
ntq_emit_load_uniform(struct v3d_compile *c, nir_intrinsic_instr *instr)
{
/* We scalarize general TMU access for anything that is not 32-bit. */
assert(instr->dest.ssa.bit_size == 32 ||
assert(instr->def.bit_size == 32 ||
instr->num_components == 1);
/* Try to emit ldunif if possible, otherwise fallback to general TMU */
@ -2703,7 +2703,7 @@ ntq_emit_load_uniform(struct v3d_compile *c, nir_intrinsic_instr *instr)
nir_src_as_uint(instr->src[0]));
if (try_emit_uniform(c, offset, instr->num_components,
&instr->dest.ssa, QUNIFORM_UNIFORM)) {
&instr->def, QUNIFORM_UNIFORM)) {
return;
}
}
@ -2726,13 +2726,13 @@ ntq_emit_inline_ubo_load(struct v3d_compile *c, nir_intrinsic_instr *instr)
return false;
/* We scalarize general TMU access for anything that is not 32-bit */
assert(instr->dest.ssa.bit_size == 32 ||
assert(instr->def.bit_size == 32 ||
instr->num_components == 1);
if (nir_src_is_const(instr->src[1])) {
int offset = nir_src_as_uint(instr->src[1]);
if (try_emit_uniform(c, offset, instr->num_components,
&instr->dest.ssa,
&instr->def,
QUNIFORM_INLINE_UBO_0 + index)) {
return true;
}
@ -2786,14 +2786,14 @@ ntq_emit_load_input(struct v3d_compile *c, nir_intrinsic_instr *instr)
index += nir_intrinsic_component(instr);
for (int i = 0; i < instr->num_components; i++) {
struct qreg vpm_offset = vir_uniform_ui(c, index++);
ntq_store_def(c, &instr->dest.ssa, i,
ntq_store_def(c, &instr->def, i,
vir_LDVPMV_IN(c, vpm_offset));
}
} else {
for (int i = 0; i < instr->num_components; i++) {
int comp = nir_intrinsic_component(instr) + i;
struct qreg input = c->inputs[offset * 4 + comp];
ntq_store_def(c, &instr->dest.ssa, i, vir_MOV(c, input));
ntq_store_def(c, &instr->def, i, vir_MOV(c, input));
if (c->s->info.stage == MESA_SHADER_FRAGMENT &&
input.file == c->payload_z.file &&
@ -3108,7 +3108,7 @@ ntq_emit_load_unifa(struct v3d_compile *c, nir_intrinsic_instr *instr)
* use ldunifa if we can verify alignment, which we can only do for
* loads with a constant offset.
*/
uint32_t bit_size = instr->dest.ssa.bit_size;
uint32_t bit_size = instr->def.bit_size;
uint32_t value_skips = 0;
if (bit_size < 32) {
if (dynamic_src) {
@ -3205,7 +3205,7 @@ ntq_emit_load_unifa(struct v3d_compile *c, nir_intrinsic_instr *instr)
if (bit_size == 32) {
assert(value_skips == 0);
ntq_store_def(c, &instr->dest.ssa, i, vir_MOV(c, data));
ntq_store_def(c, &instr->def, i, vir_MOV(c, data));
i++;
} else {
assert((bit_size == 16 && value_skips <= 1) ||
@ -3234,7 +3234,7 @@ ntq_emit_load_unifa(struct v3d_compile *c, nir_intrinsic_instr *instr)
uint32_t mask = (1 << bit_size) - 1;
tmp = vir_AND(c, vir_MOV(c, data),
vir_uniform_ui(c, mask));
ntq_store_def(c, &instr->dest.ssa, i,
ntq_store_def(c, &instr->def, i,
vir_MOV(c, tmp));
i++;
valid_count--;
@ -3356,20 +3356,20 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
break;
case nir_intrinsic_get_ssbo_size:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_GET_SSBO_SIZE,
nir_src_comp_as_uint(instr->src[0], 0)));
break;
case nir_intrinsic_get_ubo_size:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_GET_UBO_SIZE,
nir_src_comp_as_uint(instr->src[0], 0)));
break;
case nir_intrinsic_load_user_clip_plane:
for (int i = 0; i < nir_intrinsic_dest_components(instr); i++) {
ntq_store_def(c, &instr->dest.ssa, i,
ntq_store_def(c, &instr->def, i,
vir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
nir_intrinsic_ucp_id(instr) *
4 + i));
@ -3377,69 +3377,69 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
break;
case nir_intrinsic_load_viewport_x_scale:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE, 0));
break;
case nir_intrinsic_load_viewport_y_scale:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_VIEWPORT_Y_SCALE, 0));
break;
case nir_intrinsic_load_viewport_z_scale:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0));
break;
case nir_intrinsic_load_viewport_z_offset:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0));
break;
case nir_intrinsic_load_line_coord:
ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->line_x));
ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->line_x));
break;
case nir_intrinsic_load_line_width:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_LINE_WIDTH, 0));
break;
case nir_intrinsic_load_aa_line_width:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_AA_LINE_WIDTH, 0));
break;
case nir_intrinsic_load_sample_mask_in:
ntq_store_def(c, &instr->dest.ssa, 0, vir_MSF(c));
ntq_store_def(c, &instr->def, 0, vir_MSF(c));
break;
case nir_intrinsic_load_helper_invocation:
vir_set_pf(c, vir_MSF_dest(c, vir_nop_reg()), V3D_QPU_PF_PUSHZ);
struct qreg qdest = ntq_emit_cond_to_bool(c, V3D_QPU_COND_IFA);
ntq_store_def(c, &instr->dest.ssa, 0, qdest);
ntq_store_def(c, &instr->def, 0, qdest);
break;
case nir_intrinsic_load_front_face:
/* The register contains 0 (front) or 1 (back), and we need to
* turn it into a NIR bool where true means front.
*/
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_ADD(c,
vir_uniform_ui(c, -1),
vir_REVF(c)));
break;
case nir_intrinsic_load_base_instance:
ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->biid));
ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->biid));
break;
case nir_intrinsic_load_instance_id:
ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->iid));
ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->iid));
break;
case nir_intrinsic_load_vertex_id:
ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, c->vid));
ntq_store_def(c, &instr->def, 0, vir_MOV(c, c->vid));
break;
case nir_intrinsic_load_tlb_color_v3d:
@ -3542,7 +3542,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
case nir_intrinsic_load_num_workgroups:
for (int i = 0; i < 3; i++) {
ntq_store_def(c, &instr->dest.ssa, i,
ntq_store_def(c, &instr->def, i,
vir_uniform(c, QUNIFORM_NUM_WORK_GROUPS,
i));
}
@ -3552,32 +3552,32 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
case nir_intrinsic_load_workgroup_id: {
struct qreg x = vir_AND(c, c->cs_payload[0],
vir_uniform_ui(c, 0xffff));
ntq_store_def(c, &instr->dest.ssa, 0, x);
ntq_store_def(c, &instr->def, 0, x);
struct qreg y = vir_SHR(c, c->cs_payload[0],
vir_uniform_ui(c, 16));
ntq_store_def(c, &instr->dest.ssa, 1, y);
ntq_store_def(c, &instr->def, 1, y);
struct qreg z = vir_AND(c, c->cs_payload[1],
vir_uniform_ui(c, 0xffff));
ntq_store_def(c, &instr->dest.ssa, 2, z);
ntq_store_def(c, &instr->def, 2, z);
break;
}
case nir_intrinsic_load_base_workgroup_id: {
struct qreg x = vir_uniform(c, QUNIFORM_WORK_GROUP_BASE, 0);
ntq_store_def(c, &instr->dest.ssa, 0, x);
ntq_store_def(c, &instr->def, 0, x);
struct qreg y = vir_uniform(c, QUNIFORM_WORK_GROUP_BASE, 1);
ntq_store_def(c, &instr->dest.ssa, 1, y);
ntq_store_def(c, &instr->def, 1, y);
struct qreg z = vir_uniform(c, QUNIFORM_WORK_GROUP_BASE, 2);
ntq_store_def(c, &instr->dest.ssa, 2, z);
ntq_store_def(c, &instr->def, 2, z);
break;
}
case nir_intrinsic_load_local_invocation_index:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
emit_load_local_invocation_index(c));
break;
@ -3588,7 +3588,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
STATIC_ASSERT(IS_POT(V3D_CHANNELS) && V3D_CHANNELS > 0);
const uint32_t divide_shift = ffs(V3D_CHANNELS) - 1;
struct qreg lii = emit_load_local_invocation_index(c);
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_SHR(c, lii,
vir_uniform_ui(c, divide_shift)));
break;
@ -3627,7 +3627,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
struct qreg col = ntq_get_src(c, instr->src[0], 0);
for (int i = 0; i < instr->num_components; i++) {
struct qreg row = vir_uniform_ui(c, row_idx++);
ntq_store_def(c, &instr->dest.ssa, i,
ntq_store_def(c, &instr->def, i,
vir_LDVPMG_IN(c, row, col));
}
break;
@ -3644,47 +3644,47 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
* using ldvpm(v,d)_in (See Table 71).
*/
assert(c->s->info.stage == MESA_SHADER_GEOMETRY);
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_LDVPMV_IN(c, vir_uniform_ui(c, 0)));
break;
}
case nir_intrinsic_load_invocation_id:
ntq_store_def(c, &instr->dest.ssa, 0, vir_IID(c));
ntq_store_def(c, &instr->def, 0, vir_IID(c));
break;
case nir_intrinsic_load_fb_layers_v3d:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_FB_LAYERS, 0));
break;
case nir_intrinsic_load_sample_id:
ntq_store_def(c, &instr->dest.ssa, 0, vir_SAMPID(c));
ntq_store_def(c, &instr->def, 0, vir_SAMPID(c));
break;
case nir_intrinsic_load_sample_pos:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_FSUB(c, vir_FXCD(c), vir_ITOF(c, vir_XCD(c))));
ntq_store_def(c, &instr->dest.ssa, 1,
ntq_store_def(c, &instr->def, 1,
vir_FSUB(c, vir_FYCD(c), vir_ITOF(c, vir_YCD(c))));
break;
case nir_intrinsic_load_barycentric_at_offset:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_MOV(c, ntq_get_src(c, instr->src[0], 0)));
ntq_store_def(c, &instr->dest.ssa, 1,
ntq_store_def(c, &instr->def, 1,
vir_MOV(c, ntq_get_src(c, instr->src[0], 1)));
break;
case nir_intrinsic_load_barycentric_pixel:
ntq_store_def(c, &instr->dest.ssa, 0, vir_uniform_f(c, 0.0f));
ntq_store_def(c, &instr->dest.ssa, 1, vir_uniform_f(c, 0.0f));
ntq_store_def(c, &instr->def, 0, vir_uniform_f(c, 0.0f));
ntq_store_def(c, &instr->def, 1, vir_uniform_f(c, 0.0f));
break;
case nir_intrinsic_load_barycentric_at_sample: {
if (!c->fs_key->msaa) {
ntq_store_def(c, &instr->dest.ssa, 0, vir_uniform_f(c, 0.0f));
ntq_store_def(c, &instr->dest.ssa, 1, vir_uniform_f(c, 0.0f));
ntq_store_def(c, &instr->def, 0, vir_uniform_f(c, 0.0f));
ntq_store_def(c, &instr->def, 1, vir_uniform_f(c, 0.0f));
return;
}
@ -3692,8 +3692,8 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
struct qreg sample_idx = ntq_get_src(c, instr->src[0], 0);
ntq_get_sample_offset(c, sample_idx, &offset_x, &offset_y);
ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, offset_x));
ntq_store_def(c, &instr->dest.ssa, 1, vir_MOV(c, offset_y));
ntq_store_def(c, &instr->def, 0, vir_MOV(c, offset_x));
ntq_store_def(c, &instr->def, 1, vir_MOV(c, offset_y));
break;
}
@ -3703,9 +3703,9 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
struct qreg offset_y =
vir_FSUB(c, vir_FYCD(c), vir_ITOF(c, vir_YCD(c)));
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_FSUB(c, offset_x, vir_uniform_f(c, 0.5f)));
ntq_store_def(c, &instr->dest.ssa, 1,
ntq_store_def(c, &instr->def, 1,
vir_FSUB(c, offset_y, vir_uniform_f(c, 0.5f)));
break;
}
@ -3713,8 +3713,8 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
case nir_intrinsic_load_barycentric_centroid: {
struct qreg offset_x, offset_y;
ntq_get_barycentric_centroid(c, &offset_x, &offset_y);
ntq_store_def(c, &instr->dest.ssa, 0, vir_MOV(c, offset_x));
ntq_store_def(c, &instr->dest.ssa, 1, vir_MOV(c, offset_y));
ntq_store_def(c, &instr->def, 0, vir_MOV(c, offset_x));
ntq_store_def(c, &instr->def, 1, vir_MOV(c, offset_y));
break;
}
@ -3733,7 +3733,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
*/
if (!c->fs_key->msaa ||
c->interp[input_idx].vp.file == QFILE_NULL) {
ntq_store_def(c, &instr->dest.ssa, i,
ntq_store_def(c, &instr->def, i,
vir_MOV(c, c->inputs[input_idx]));
continue;
}
@ -3752,18 +3752,18 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
ntq_emit_load_interpolated_input(c, p, C,
offset_x, offset_y,
interp_mode);
ntq_store_def(c, &instr->dest.ssa, i, result);
ntq_store_def(c, &instr->def, i, result);
}
break;
}
case nir_intrinsic_load_subgroup_size:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform_ui(c, V3D_CHANNELS));
break;
case nir_intrinsic_load_subgroup_invocation:
ntq_store_def(c, &instr->dest.ssa, 0, vir_EIDX(c));
ntq_store_def(c, &instr->def, 0, vir_EIDX(c));
break;
case nir_intrinsic_elect: {
@ -3775,7 +3775,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
first, vir_uniform_ui(c, 1)),
V3D_QPU_PF_PUSHZ);
struct qreg result = ntq_emit_cond_to_bool(c, V3D_QPU_COND_IFA);
ntq_store_def(c, &instr->dest.ssa, 0, result);
ntq_store_def(c, &instr->def, 0, result);
break;
}
@ -3784,7 +3784,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
break;
case nir_intrinsic_load_view_index:
ntq_store_def(c, &instr->dest.ssa, 0,
ntq_store_def(c, &instr->def, 0,
vir_uniform(c, QUNIFORM_VIEW_INDEX, 0));
break;

View file

@ -135,7 +135,7 @@ v3d33_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
* instruction writes and how many the instruction could produce.
*/
p1_unpacked.return_words_of_texture_data =
nir_def_components_read(&instr->dest.ssa);
nir_def_components_read(&instr->def);
uint32_t p0_packed;
V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1_pack(NULL,
@ -188,6 +188,6 @@ v3d33_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
for (int i = 0; i < 4; i++) {
if (p1_unpacked.return_words_of_texture_data & (1 << i))
ntq_store_def(c, &instr->dest.ssa, i, vir_LDTMU(c));
ntq_store_def(c, &instr->def, i, vir_LDTMU(c));
}
}

View file

@ -250,10 +250,10 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
/* Limit the number of channels returned to both how many the NIR
* instruction writes and how many the instruction could produce.
*/
nir_intrinsic_instr *store = nir_store_reg_for_def(&instr->dest.ssa);
nir_intrinsic_instr *store = nir_store_reg_for_def(&instr->def);
if (store == NULL) {
p0_unpacked.return_words_of_texture_data =
nir_def_components_read(&instr->dest.ssa);
nir_def_components_read(&instr->def);
} else {
nir_def *reg = store->src[1].ssa;
nir_intrinsic_instr *decl = nir_reg_get_decl(reg);
@ -407,7 +407,7 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
}
retiring->ldtmu_count = p0_unpacked.return_words_of_texture_data;
ntq_add_pending_tmu_flush(c, &instr->dest.ssa,
ntq_add_pending_tmu_flush(c, &instr->def,
p0_unpacked.return_words_of_texture_data);
}
@ -639,6 +639,6 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c,
struct qinst *retiring =
vir_image_emit_register_writes(c, instr, atomic_add_replaced, NULL);
retiring->ldtmu_count = p0_unpacked.return_words_of_texture_data;
ntq_add_pending_tmu_flush(c, &instr->dest.ssa,
ntq_add_pending_tmu_flush(c, &instr->def,
p0_unpacked.return_words_of_texture_data);
}

View file

@ -182,7 +182,7 @@ v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr)
b->cursor = nir_after_instr(&instr->instr);
nir_def *result = &instr->dest.ssa;
nir_def *result = &instr->def;
if (util_format_is_pure_uint(format)) {
result = nir_format_unpack_uint(b, result, bits16, 4);
} else if (util_format_is_pure_sint(format)) {
@ -197,7 +197,7 @@ v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr)
nir_unpack_half_2x16_split_y(b, ba));
}
nir_def_rewrite_uses_after(&instr->dest.ssa, result,
nir_def_rewrite_uses_after(&instr->def, result,
result->parent_instr);
return true;

View file

@ -116,7 +116,7 @@ static bool
lower_load_bitsize(nir_builder *b,
nir_intrinsic_instr *intr)
{
uint32_t bit_size = intr->dest.ssa.bit_size;
uint32_t bit_size = intr->def.bit_size;
if (bit_size == 32)
return false;
@ -153,15 +153,15 @@ lower_load_bitsize(nir_builder *b,
}
}
nir_def_init(&new_intr->instr, &new_intr->dest.ssa, 1,
nir_def_init(&new_intr->instr, &new_intr->def, 1,
bit_size);
dest_components[component] = &new_intr->dest.ssa;
dest_components[component] = &new_intr->def;
nir_builder_instr_insert(b, &new_intr->instr);
}
nir_def *new_dst = nir_vec(b, dest_components, num_comp);
nir_def_rewrite_uses(&intr->dest.ssa, new_dst);
nir_def_rewrite_uses(&intr->def, new_dst);
nir_instr_remove(&intr->instr);
return true;

View file

@ -65,8 +65,8 @@ v3d_nir_lower_load_scratch(nir_builder *b, nir_intrinsic_instr *instr)
nir_intrinsic_instr *chan_instr =
nir_intrinsic_instr_create(b->shader, instr->intrinsic);
chan_instr->num_components = 1;
nir_def_init(&chan_instr->instr, &chan_instr->dest.ssa, 1,
instr->dest.ssa.bit_size);
nir_def_init(&chan_instr->instr, &chan_instr->def, 1,
instr->def.bit_size);
chan_instr->src[0] = nir_src_for_ssa(chan_offset);
@ -74,11 +74,11 @@ v3d_nir_lower_load_scratch(nir_builder *b, nir_intrinsic_instr *instr)
nir_builder_instr_insert(b, &chan_instr->instr);
chans[i] = &chan_instr->dest.ssa;
chans[i] = &chan_instr->def;
}
nir_def *result = nir_vec(b, chans, instr->num_components);
nir_def_rewrite_uses(&instr->dest.ssa, result);
nir_def_rewrite_uses(&instr->def, result);
nir_instr_remove(&instr->instr);
}

View file

@ -1482,7 +1482,7 @@ lower_load_num_subgroups(struct v3d_compile *c,
c->s->info.workgroup_size[1] *
c->s->info.workgroup_size[2], V3D_CHANNELS);
nir_def *result = nir_imm_int(b, num_subgroups);
nir_def_rewrite_uses(&intr->dest.ssa, result);
nir_def_rewrite_uses(&intr->def, result);
nir_instr_remove(&intr->instr);
}

View file

@ -2323,7 +2323,7 @@ get_texel_buffer_copy_fs(struct v3dv_device *device, VkFormat format,
nir_iadd(&b, nir_iadd(&b, offset, x_offset),
nir_imul(&b, y_offset, stride));
nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->dest.ssa;
nir_def *tex_deref = &nir_build_deref_var(&b, sampler)->def;
nir_tex_instr *tex = nir_tex_instr_create(b.shader, 2);
tex->sampler_dim = GLSL_SAMPLER_DIM_BUF;
tex->op = nir_texop_txf;
@ -2332,7 +2332,7 @@ get_texel_buffer_copy_fs(struct v3dv_device *device, VkFormat format,
tex->dest_type = nir_type_uint32;
tex->is_array = false;
tex->coord_components = 1;
nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(&b, &tex->instr);
uint32_t swiz[4];
@ -2344,7 +2344,7 @@ get_texel_buffer_copy_fs(struct v3dv_device *device, VkFormat format,
component_swizzle_to_nir_swizzle(VK_COMPONENT_SWIZZLE_B, cswizzle->b);
swiz[3] =
component_swizzle_to_nir_swizzle(VK_COMPONENT_SWIZZLE_A, cswizzle->a);
nir_def *s = nir_swizzle(&b, &tex->dest.ssa, swiz, 4);
nir_def *s = nir_swizzle(&b, &tex->def, swiz, 4);
nir_store_var(&b, fs_out_color, s, 0xf);
return b.shader;
@ -3597,7 +3597,7 @@ build_nir_tex_op_read(struct nir_builder *b,
sampler->data.descriptor_set = 0;
sampler->data.binding = 0;
nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
nir_def *tex_deref = &nir_build_deref_var(b, sampler)->def;
nir_tex_instr *tex = nir_tex_instr_create(b->shader, 3);
tex->sampler_dim = dim;
tex->op = nir_texop_tex;
@ -3608,9 +3608,9 @@ build_nir_tex_op_read(struct nir_builder *b,
tex->is_array = glsl_sampler_type_is_array(sampler_type);
tex->coord_components = tex_pos->num_components;
nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
return &tex->dest.ssa;
return &tex->def;
}
static nir_def *
@ -3631,9 +3631,9 @@ build_nir_tex_op_ms_fetch_sample(struct nir_builder *b,
tex->is_array = false;
tex->coord_components = tex_pos->num_components;
nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
return &tex->dest.ssa;
return &tex->def;
}
/* Fetches all samples at the given position and averages them */
@ -3654,7 +3654,7 @@ build_nir_tex_op_ms_resolve(struct nir_builder *b,
const bool is_int = glsl_base_type_is_integer(tex_type);
nir_def *tmp = NULL;
nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
nir_def *tex_deref = &nir_build_deref_var(b, sampler)->def;
for (uint32_t i = 0; i < src_samples; i++) {
nir_def *s =
build_nir_tex_op_ms_fetch_sample(b, sampler, tex_deref,
@ -3687,7 +3687,7 @@ build_nir_tex_op_ms_read(struct nir_builder *b,
sampler->data.descriptor_set = 0;
sampler->data.binding = 0;
nir_def *tex_deref = &nir_build_deref_var(b, sampler)->dest.ssa;
nir_def *tex_deref = &nir_build_deref_var(b, sampler)->def;
return build_nir_tex_op_ms_fetch_sample(b, sampler, tex_deref,
tex_type, tex_pos,

View file

@ -568,7 +568,7 @@ lower_vulkan_resource_index(nir_builder *b,
* vulkan_load_descriptor return a vec2 providing an index and
* offset. Our backend compiler only cares about the index part.
*/
nir_def_rewrite_uses(&instr->dest.ssa,
nir_def_rewrite_uses(&instr->def,
nir_imm_ivec2(b, index, 0));
nir_instr_remove(&instr->instr);
}
@ -826,7 +826,7 @@ lower_intrinsic(nir_builder *b,
/* Loading the descriptor happens as part of load/store instructions,
* so for us this is a no-op.
*/
nir_def_rewrite_uses(&instr->dest.ssa, instr->src[0].ssa);
nir_def_rewrite_uses(&instr->def, instr->src[0].ssa);
nir_instr_remove(&instr->instr);
return true;
}
@ -907,11 +907,11 @@ lower_point_coord_cb(nir_builder *b, nir_instr *instr, void *_state)
return false;
b->cursor = nir_after_instr(&intr->instr);
nir_def *result = &intr->dest.ssa;
nir_def *result = &intr->def;
result =
nir_vector_insert_imm(b, result,
nir_fsub_imm(b, 1.0, nir_channel(b, result, 1)), 1);
nir_def_rewrite_uses_after(&intr->dest.ssa,
nir_def_rewrite_uses_after(&intr->def,
result, result->parent_instr);
return true;
}

View file

@ -2611,9 +2611,9 @@ replace_unused_interpolate_at_with_undef(nir_builder *b, nir_instr *instr,
if (var->data.mode == nir_var_shader_temp) {
/* Create undef and rewrite the interp uses */
nir_def *undef =
nir_undef(b, intrin->dest.ssa.num_components,
intrin->dest.ssa.bit_size);
nir_def_rewrite_uses(&intrin->dest.ssa, undef);
nir_undef(b, intrin->def.num_components,
intrin->def.bit_size);
nir_def_rewrite_uses(&intrin->def, undef);
nir_instr_remove(&intrin->instr);
return true;

View file

@ -194,8 +194,8 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
break;
/* We use nir_address_format_32bit_index_offset */
assert(deref->dest.ssa.bit_size == 32);
deref->dest.ssa.num_components = 2;
assert(deref->def.bit_size == 32);
deref->def.num_components = 2;
progress = true;
@ -237,8 +237,8 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
cast->cast.align_mul = NIR_ALIGN_MUL_MAX;
cast->cast.align_offset = offset % NIR_ALIGN_MUL_MAX;
nir_def_rewrite_uses(&deref->dest.ssa,
&cast->dest.ssa);
nir_def_rewrite_uses(&deref->def,
&cast->def);
nir_deref_instr_remove_if_unused(deref);
break;
}
@ -261,9 +261,9 @@ lower_buffer_interface_derefs_impl(nir_function_impl *impl,
*/
if (glsl_type_is_boolean(deref->type)) {
b.cursor = nir_after_instr(&intrin->instr);
intrin->dest.ssa.bit_size = 32;
nir_def *bval = nir_i2b(&b, &intrin->dest.ssa);
nir_def_rewrite_uses_after(&intrin->dest.ssa,
intrin->def.bit_size = 32;
nir_def *bval = nir_i2b(&b, &intrin->def);
nir_def_rewrite_uses_after(&intrin->def,
bval,
bval->parent_instr);
progress = true;

View file

@ -822,7 +822,7 @@ lower_varying(struct lower_packed_varyings_state *state,
swizzle_values[i] = i + location_frac;
}
nir_def *ssa_def = &packed_deref->dest.ssa;
nir_def *ssa_def = &packed_deref->def;
ssa_def = nir_load_deref(&state->b, packed_deref);
nir_def *swizzle =
nir_swizzle(&state->b, ssa_def, swizzle_values, components);

View file

@ -286,7 +286,7 @@ lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state,
/* only lower non-bindless: */
if (texture_deref) {
nir_instr_rewrite_src(&instr->instr, &instr->src[texture_idx].src,
nir_src_for_ssa(&texture_deref->dest.ssa));
nir_src_for_ssa(&texture_deref->def));
record_textures_used(&b->shader->info, texture_deref, instr->op);
}
}
@ -297,7 +297,7 @@ lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state,
/* only lower non-bindless: */
if (sampler_deref) {
nir_instr_rewrite_src(&instr->instr, &instr->src[sampler_idx].src,
nir_src_for_ssa(&sampler_deref->dest.ssa));
nir_src_for_ssa(&sampler_deref->def));
record_samplers_used(&b->shader->info, sampler_deref, instr->op);
}
}
@ -329,7 +329,7 @@ lower_intrinsic(nir_intrinsic_instr *instr,
if (!deref)
return false;
nir_instr_rewrite_src(&instr->instr, &instr->src[0],
nir_src_for_ssa(&deref->dest.ssa));
nir_src_for_ssa(&deref->def));
return true;
}
if (instr->intrinsic == nir_intrinsic_image_deref_order ||

View file

@ -298,7 +298,7 @@ rewrite_varying_deref(nir_builder *b, struct replace_varyings_data *rv_data,
unsigned i = nir_src_as_uint(deref->arr.index);
nir_deref_instr *new_deref =
nir_build_deref_var(b, rv_data->new_texcoord[i]);
nir_def_rewrite_uses(&deref->dest.ssa, &new_deref->dest.ssa);
nir_def_rewrite_uses(&deref->def, &new_deref->def);
return;
}
}

View file

@ -1180,7 +1180,7 @@ nir_visitor::visit(ir_call *ir)
}
nir_intrinsic_instr *instr = nir_intrinsic_instr_create(shader, op);
nir_def *ret = &instr->dest.ssa;
nir_def *ret = &instr->def;
switch (op) {
case nir_intrinsic_deref_atomic:
@ -1205,7 +1205,7 @@ nir_visitor::visit(ir_call *ir)
nir_deref = nir_build_deref_array_imm(&b, nir_deref,
swizzle->mask.x);
}
instr->src[0] = nir_src_for_ssa(&nir_deref->dest.ssa);
instr->src[0] = nir_src_for_ssa(&nir_deref->def);
nir_intrinsic_set_atomic_op(instr, atomic_op);
nir_intrinsic_set_access(instr, deref_get_qualifier(nir_deref));
@ -1226,10 +1226,10 @@ nir_visitor::visit(ir_call *ir)
/* Atomic result */
assert(ir->return_deref);
if (ir->return_deref->type->is_integer_64()) {
nir_def_init(&instr->instr, &instr->dest.ssa,
nir_def_init(&instr->instr, &instr->def,
ir->return_deref->type->vector_elements, 64);
} else {
nir_def_init(&instr->instr, &instr->dest.ssa,
nir_def_init(&instr->instr, &instr->def,
ir->return_deref->type->vector_elements, 32);
}
nir_builder_instr_insert(&b, &instr->instr);
@ -1250,12 +1250,12 @@ nir_visitor::visit(ir_call *ir)
exec_node *param = ir->actual_parameters.get_head();
ir_dereference *counter = (ir_dereference *)param;
instr->src[0] = nir_src_for_ssa(&evaluate_deref(counter)->dest.ssa);
instr->src[0] = nir_src_for_ssa(&evaluate_deref(counter)->def);
param = param->get_next();
/* Set the intrinsic destination. */
if (ir->return_deref) {
nir_def_init(&instr->instr, &instr->dest.ssa, 1, 32);
nir_def_init(&instr->instr, &instr->def, 1, 32);
}
/* Set the intrinsic parameters. */
@ -1294,7 +1294,7 @@ nir_visitor::visit(ir_call *ir)
nir_intrinsic_set_atomic_op(instr, atomic_op);
}
instr->src[0] = nir_src_for_ssa(&deref->dest.ssa);
instr->src[0] = nir_src_for_ssa(&deref->def);
param = param->get_next();
nir_intrinsic_set_image_dim(instr,
(glsl_sampler_dim)type->sampler_dimensionality);
@ -1311,14 +1311,14 @@ nir_visitor::visit(ir_call *ir)
} else
num_components = ir->return_deref->type->vector_elements;
nir_def_init(&instr->instr, &instr->dest.ssa, num_components, 32);
nir_def_init(&instr->instr, &instr->def, num_components, 32);
}
if (op == nir_intrinsic_image_deref_size) {
instr->num_components = instr->dest.ssa.num_components;
instr->num_components = instr->def.num_components;
} else if (op == nir_intrinsic_image_deref_load ||
op == nir_intrinsic_image_deref_sparse_load) {
instr->num_components = instr->dest.ssa.num_components;
instr->num_components = instr->def.num_components;
nir_intrinsic_set_dest_type(instr,
nir_get_nir_type_for_glsl_base_type(type->sampled_type));
} else if (op == nir_intrinsic_image_deref_store) {
@ -1444,7 +1444,7 @@ nir_visitor::visit(ir_call *ir)
break;
}
case nir_intrinsic_shader_clock:
nir_def_init(&instr->instr, &instr->dest.ssa, 2, 32);
nir_def_init(&instr->instr, &instr->def, 2, 32);
nir_intrinsic_set_memory_scope(instr, SCOPE_SUBGROUP);
nir_builder_instr_insert(&b, &instr->instr);
break;
@ -1495,14 +1495,14 @@ nir_visitor::visit(ir_call *ir)
/* Setup destination register */
unsigned bit_size = type->is_boolean() ? 32 : glsl_get_bit_size(type);
nir_def_init(&instr->instr, &instr->dest.ssa, type->vector_elements,
nir_def_init(&instr->instr, &instr->def, type->vector_elements,
bit_size);
nir_builder_instr_insert(&b, &instr->instr);
/* The value in shared memory is a 32-bit value */
if (type->is_boolean())
ret = nir_b2b1(&b, &instr->dest.ssa);
ret = nir_b2b1(&b, &instr->def);
break;
}
case nir_intrinsic_store_shared: {
@ -1538,7 +1538,7 @@ nir_visitor::visit(ir_call *ir)
FALLTHROUGH;
case nir_intrinsic_vote_any:
case nir_intrinsic_vote_all: {
nir_def_init(&instr->instr, &instr->dest.ssa, 1, 1);
nir_def_init(&instr->instr, &instr->def, 1, 1);
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
@ -1548,7 +1548,7 @@ nir_visitor::visit(ir_call *ir)
}
case nir_intrinsic_ballot: {
nir_def_init(&instr->instr, &instr->dest.ssa,
nir_def_init(&instr->instr, &instr->def,
ir->return_deref->type->vector_elements, 64);
instr->num_components = ir->return_deref->type->vector_elements;
@ -1559,7 +1559,7 @@ nir_visitor::visit(ir_call *ir)
break;
}
case nir_intrinsic_read_invocation: {
nir_def_init(&instr->instr, &instr->dest.ssa,
nir_def_init(&instr->instr, &instr->def,
ir->return_deref->type->vector_elements, 32);
instr->num_components = ir->return_deref->type->vector_elements;
@ -1573,7 +1573,7 @@ nir_visitor::visit(ir_call *ir)
break;
}
case nir_intrinsic_read_first_invocation: {
nir_def_init(&instr->instr, &instr->dest.ssa,
nir_def_init(&instr->instr, &instr->def,
ir->return_deref->type->vector_elements, 32);
instr->num_components = ir->return_deref->type->vector_elements;
@ -1584,12 +1584,12 @@ nir_visitor::visit(ir_call *ir)
break;
}
case nir_intrinsic_is_helper_invocation: {
nir_def_init(&instr->instr, &instr->dest.ssa, 1, 1);
nir_def_init(&instr->instr, &instr->def, 1, 1);
nir_builder_instr_insert(&b, &instr->instr);
break;
}
case nir_intrinsic_is_sparse_texels_resident: {
nir_def_init(&instr->instr, &instr->dest.ssa, 1, 1);
nir_def_init(&instr->instr, &instr->def, 1, 1);
ir_rvalue *value = (ir_rvalue *) ir->actual_parameters.get_head();
instr->src[0] = nir_src_for_ssa(evaluate_rvalue(value));
@ -1627,7 +1627,7 @@ nir_visitor::visit(ir_call *ir)
nir_local_variable_create(this->impl, ir->return_deref->type,
"return_tmp");
ret_deref = nir_build_deref_var(&b, ret_tmp);
call->params[i++] = nir_src_for_ssa(&ret_deref->dest.ssa);
call->params[i++] = nir_src_for_ssa(&ret_deref->def);
}
foreach_two_lists(formal_node, &ir->callee->parameters,
@ -1637,7 +1637,7 @@ nir_visitor::visit(ir_call *ir)
if (sig_param->data.mode == ir_var_function_out) {
nir_deref_instr *out_deref = evaluate_deref(param_rvalue);
call->params[i] = nir_src_for_ssa(&out_deref->dest.ssa);
call->params[i] = nir_src_for_ssa(&out_deref->def);
} else if (sig_param->data.mode == ir_var_function_in) {
nir_def *val = evaluate_rvalue(param_rvalue);
nir_src src = nir_src_for_ssa(val);
@ -1736,13 +1736,13 @@ get_instr_def(nir_instr *instr)
case nir_instr_type_intrinsic:
intrinsic_instr = nir_instr_as_intrinsic(instr);
if (nir_intrinsic_infos[intrinsic_instr->intrinsic].has_dest)
return &intrinsic_instr->dest.ssa;
return &intrinsic_instr->def;
else
return NULL;
case nir_instr_type_tex:
tex_instr = nir_instr_as_tex(instr);
return &tex_instr->dest.ssa;
return &tex_instr->def;
default:
unreachable("not reached");
@ -1837,7 +1837,7 @@ nir_visitor::visit(ir_expression *ir)
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(shader, op);
intrin->num_components = deref->type->vector_elements;
intrin->src[0] = nir_src_for_ssa(&this->deref->dest.ssa);
intrin->src[0] = nir_src_for_ssa(&this->deref->def);
if (intrin->intrinsic == nir_intrinsic_interp_deref_at_offset ||
intrin->intrinsic == nir_intrinsic_interp_deref_at_sample)
@ -1864,7 +1864,7 @@ nir_visitor::visit(ir_expression *ir)
nir_intrinsic_deref_buffer_array_length);
ir_dereference *deref = ir->operands[0]->as_dereference();
intrin->src[0] = nir_src_for_ssa(&evaluate_deref(deref)->dest.ssa);
intrin->src[0] = nir_src_for_ssa(&evaluate_deref(deref)->def);
add_instr(&intrin->instr, 1, 32);
return;
@ -2461,9 +2461,9 @@ nir_visitor::visit(ir_texture *ir)
instr->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_handle, load);
} else {
instr->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
&sampler_deref->dest.ssa);
&sampler_deref->def);
instr->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
&sampler_deref->dest.ssa);
&sampler_deref->def);
}
unsigned src_number = 2;

View file

@ -1293,22 +1293,22 @@ nir_instr_ssa_def(nir_instr *instr)
return &nir_instr_as_alu(instr)->def;
case nir_instr_type_deref:
return &nir_instr_as_deref(instr)->dest.ssa;
return &nir_instr_as_deref(instr)->def;
case nir_instr_type_tex:
return &nir_instr_as_tex(instr)->dest.ssa;
return &nir_instr_as_tex(instr)->def;
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (nir_intrinsic_infos[intrin->intrinsic].has_dest) {
return &intrin->dest.ssa;
return &intrin->def;
} else {
return NULL;
}
}
case nir_instr_type_phi:
return &nir_instr_as_phi(instr)->dest.ssa;
return &nir_instr_as_phi(instr)->def;
case nir_instr_type_parallel_copy:
unreachable("Parallel copies are unsupported by this function");

View file

@ -1038,10 +1038,6 @@ nir_def_used_by_if(const nir_def *def)
return false;
}
typedef struct {
nir_def ssa;
} nir_dest;
static inline nir_src
nir_src_for_ssa(nir_def *def)
{
@ -1538,7 +1534,7 @@ typedef struct {
};
/** Destination to store the resulting "pointer" */
nir_dest dest;
nir_def def;
} nir_deref_instr;
/** Returns true if deref might have one of the given modes
@ -1723,7 +1719,7 @@ typedef struct {
nir_intrinsic_op intrinsic;
nir_dest dest;
nir_def def;
/** number of components if this is a vectorized intrinsic
*
@ -2199,7 +2195,7 @@ typedef struct {
nir_texop op;
/** Destination */
nir_dest dest;
nir_def def;
/** Array of sources
*
@ -2468,7 +2464,7 @@ typedef struct {
struct exec_list srcs; /** < list of nir_phi_src */
nir_dest dest;
nir_def def;
} nir_phi_instr;
static inline nir_phi_src *
@ -2489,7 +2485,7 @@ typedef struct {
bool dest_is_reg;
nir_src src;
union {
nir_dest dest;
nir_def def;
nir_src reg;
} dest;
} nir_parallel_copy_entry;
@ -6183,13 +6179,13 @@ nir_is_store_reg(nir_intrinsic_instr *intr)
#define nir_foreach_reg_load(load, reg) \
assert(reg->intrinsic == nir_intrinsic_decl_reg); \
\
nir_foreach_use(load, &reg->dest.ssa) \
nir_foreach_use(load, &reg->def) \
if (nir_is_load_reg(nir_instr_as_intrinsic(load->parent_instr)))
#define nir_foreach_reg_store(store, reg) \
assert(reg->intrinsic == nir_intrinsic_decl_reg); \
\
nir_foreach_use(store, &reg->dest.ssa) \
nir_foreach_use(store, &reg->def) \
if (nir_is_store_reg(nir_instr_as_intrinsic(store->parent_instr)))
static inline nir_intrinsic_instr *

View file

@ -256,11 +256,11 @@ nir_build_tex_deref_instr(nir_builder *build, nir_texop op,
unsigned src_idx = 0;
tex->src[src_idx++] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
&texture->dest.ssa);
&texture->def);
if (sampler != NULL) {
assert(glsl_type_is_sampler(sampler->type));
tex->src[src_idx++] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
&sampler->dest.ssa);
&sampler->def);
}
for (unsigned i = 0; i < num_extra_srcs; i++) {
switch (extra_srcs[i].src_type) {
@ -304,11 +304,11 @@ nir_build_tex_deref_instr(nir_builder *build, nir_texop op,
}
assert(src_idx == num_srcs);
nir_def_init(&tex->instr, &tex->dest.ssa, nir_tex_instr_dest_size(tex),
nir_def_init(&tex->instr, &tex->def, nir_tex_instr_dest_size(tex),
nir_alu_type_get_type_size(tex->dest_type));
nir_builder_instr_insert(build, &tex->instr);
return &tex->dest.ssa;
return &tex->def;
}
nir_def *
@ -385,9 +385,9 @@ nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index,
load->num_components = num_components;
load->const_index[0] = index;
nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size);
nir_def_init(&load->instr, &load->def, num_components, bit_size);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
return &load->def;
}
void
@ -472,12 +472,12 @@ nir_if_phi(nir_builder *build, nir_def *then_def, nir_def *else_def)
assert(then_def->num_components == else_def->num_components);
assert(then_def->bit_size == else_def->bit_size);
nir_def_init(&phi->instr, &phi->dest.ssa, then_def->num_components,
nir_def_init(&phi->instr, &phi->def, then_def->num_components,
then_def->bit_size);
nir_builder_instr_insert(build, &phi->instr);
return &phi->dest.ssa;
return &phi->def;
}
nir_loop *

View file

@ -1298,7 +1298,7 @@ nir_build_deref_var(nir_builder *build, nir_variable *var)
deref->type = var->type;
deref->var = var;
nir_def_init(&deref->instr, &deref->dest.ssa, 1,
nir_def_init(&deref->instr, &deref->def, 1,
nir_get_ptr_bitsize(build->shader));
nir_builder_instr_insert(build, &deref->instr);
@ -1314,18 +1314,18 @@ nir_build_deref_array(nir_builder *build, nir_deref_instr *parent,
glsl_type_is_matrix(parent->type) ||
glsl_type_is_vector(parent->type));
assert(index->bit_size == parent->dest.ssa.bit_size);
assert(index->bit_size == parent->def.bit_size);
nir_deref_instr *deref =
nir_deref_instr_create(build->shader, nir_deref_type_array);
deref->modes = parent->modes;
deref->type = glsl_get_array_element(parent->type);
deref->parent = nir_src_for_ssa(&parent->dest.ssa);
deref->parent = nir_src_for_ssa(&parent->def);
deref->arr.index = nir_src_for_ssa(index);
nir_def_init(&deref->instr, &deref->dest.ssa,
parent->dest.ssa.num_components, parent->dest.ssa.bit_size);
nir_def_init(&deref->instr, &deref->def,
parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
@ -1337,7 +1337,7 @@ nir_build_deref_array_imm(nir_builder *build, nir_deref_instr *parent,
int64_t index)
{
nir_def *idx_ssa = nir_imm_intN_t(build, index,
parent->dest.ssa.bit_size);
parent->def.bit_size);
return nir_build_deref_array(build, parent, idx_ssa);
}
@ -1350,18 +1350,18 @@ nir_build_deref_ptr_as_array(nir_builder *build, nir_deref_instr *parent,
parent->deref_type == nir_deref_type_ptr_as_array ||
parent->deref_type == nir_deref_type_cast);
assert(index->bit_size == parent->dest.ssa.bit_size);
assert(index->bit_size == parent->def.bit_size);
nir_deref_instr *deref =
nir_deref_instr_create(build->shader, nir_deref_type_ptr_as_array);
deref->modes = parent->modes;
deref->type = parent->type;
deref->parent = nir_src_for_ssa(&parent->dest.ssa);
deref->parent = nir_src_for_ssa(&parent->def);
deref->arr.index = nir_src_for_ssa(index);
nir_def_init(&deref->instr, &deref->dest.ssa,
parent->dest.ssa.num_components, parent->dest.ssa.bit_size);
nir_def_init(&deref->instr, &deref->def,
parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
@ -1379,10 +1379,10 @@ nir_build_deref_array_wildcard(nir_builder *build, nir_deref_instr *parent)
deref->modes = parent->modes;
deref->type = glsl_get_array_element(parent->type);
deref->parent = nir_src_for_ssa(&parent->dest.ssa);
deref->parent = nir_src_for_ssa(&parent->def);
nir_def_init(&deref->instr, &deref->dest.ssa,
parent->dest.ssa.num_components, parent->dest.ssa.bit_size);
nir_def_init(&deref->instr, &deref->def,
parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
@ -1400,11 +1400,11 @@ nir_build_deref_struct(nir_builder *build, nir_deref_instr *parent,
deref->modes = parent->modes;
deref->type = glsl_get_struct_field(parent->type, index);
deref->parent = nir_src_for_ssa(&parent->dest.ssa);
deref->parent = nir_src_for_ssa(&parent->def);
deref->strct.index = index;
nir_def_init(&deref->instr, &deref->dest.ssa,
parent->dest.ssa.num_components, parent->dest.ssa.bit_size);
nir_def_init(&deref->instr, &deref->def,
parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
@ -1424,7 +1424,7 @@ nir_build_deref_cast(nir_builder *build, nir_def *parent,
deref->parent = nir_src_for_ssa(parent);
deref->cast.ptr_stride = ptr_stride;
nir_def_init(&deref->instr, &deref->dest.ssa, parent->num_components,
nir_def_init(&deref->instr, &deref->def, parent->num_components,
parent->bit_size);
nir_builder_instr_insert(build, &deref->instr);
@ -1441,13 +1441,13 @@ nir_alignment_deref_cast(nir_builder *build, nir_deref_instr *parent,
deref->modes = parent->modes;
deref->type = parent->type;
deref->parent = nir_src_for_ssa(&parent->dest.ssa);
deref->parent = nir_src_for_ssa(&parent->def);
deref->cast.ptr_stride = nir_deref_instr_array_stride(deref);
deref->cast.align_mul = align_mul;
deref->cast.align_offset = align_offset;
nir_def_init(&deref->instr, &deref->dest.ssa,
parent->dest.ssa.num_components, parent->dest.ssa.bit_size);
nir_def_init(&deref->instr, &deref->def,
parent->def.num_components, parent->def.bit_size);
nir_builder_instr_insert(build, &deref->instr);
@ -1465,7 +1465,7 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
nir_deref_instr *leader)
{
/* If the derefs would have the same parent, don't make a new one */
if (leader->parent.ssa == &parent->dest.ssa)
if (leader->parent.ssa == &parent->def)
return leader;
UNUSED nir_deref_instr *leader_parent = nir_src_as_deref(leader->parent);
@ -1486,7 +1486,7 @@ nir_build_deref_follower(nir_builder *b, nir_deref_instr *parent,
if (leader->deref_type == nir_deref_type_array) {
nir_def *index = nir_i2iN(b, leader->arr.index.ssa,
parent->dest.ssa.bit_size);
parent->def.bit_size);
return nir_build_deref_array(b, parent, index);
} else {
return nir_build_deref_array_wildcard(b, parent);
@ -1509,7 +1509,7 @@ nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
enum gl_access_qualifier access)
{
return nir_build_load_deref(build, glsl_get_vector_elements(deref->type),
glsl_get_bit_size(deref->type), &deref->dest.ssa,
glsl_get_bit_size(deref->type), &deref->def,
access);
}
@ -1526,7 +1526,7 @@ nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref,
enum gl_access_qualifier access)
{
writemask &= (1u << value->num_components) - 1u;
nir_build_store_deref(build, &deref->dest.ssa, value, writemask, access);
nir_build_store_deref(build, &deref->def, value, writemask, access);
}
#undef nir_store_deref
@ -1544,7 +1544,7 @@ nir_copy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
enum gl_access_qualifier dest_access,
enum gl_access_qualifier src_access)
{
nir_build_copy_deref(build, &dest->dest.ssa, &src->dest.ssa, dest_access, src_access);
nir_build_copy_deref(build, &dest->def, &src->def, dest_access, src_access);
}
#undef nir_copy_deref
@ -1562,7 +1562,7 @@ nir_memcpy_deref_with_access(nir_builder *build, nir_deref_instr *dest,
enum gl_access_qualifier dest_access,
enum gl_access_qualifier src_access)
{
nir_build_memcpy_deref(build, &dest->dest.ssa, &src->dest.ssa,
nir_build_memcpy_deref(build, &dest->def, &src->def,
size, dest_access, src_access);
}
@ -1640,9 +1640,9 @@ nir_load_global(nir_builder *build, nir_def *addr, unsigned align,
load->num_components = num_components;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, align, 0);
nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size);
nir_def_init(&load->instr, &load->def, num_components, bit_size);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
return &load->def;
}
#undef nir_store_global
@ -1671,9 +1671,9 @@ nir_load_global_constant(nir_builder *build, nir_def *addr, unsigned align,
load->num_components = num_components;
load->src[0] = nir_src_for_ssa(addr);
nir_intrinsic_set_align(load, align, 0);
nir_def_init(&load->instr, &load->dest.ssa, num_components, bit_size);
nir_def_init(&load->instr, &load->def, num_components, bit_size);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
return &load->def;
}
#undef nir_load_param
@ -1696,11 +1696,11 @@ nir_decl_reg(nir_builder *b, unsigned num_components, unsigned bit_size,
nir_intrinsic_set_bit_size(decl, bit_size);
nir_intrinsic_set_num_array_elems(decl, num_array_elems);
nir_intrinsic_set_divergent(decl, true);
nir_def_init(&decl->instr, &decl->dest.ssa, 1, 32);
nir_def_init(&decl->instr, &decl->def, 1, 32);
nir_instr_insert(nir_before_cf_list(&b->impl->body), &decl->instr);
return &decl->dest.ssa;
return &decl->def;
}
#undef nir_load_reg
@ -1873,10 +1873,10 @@ nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
{
unsigned num_components = op == nir_intrinsic_load_barycentric_model ? 3 : 2;
nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
nir_def_init(&bary->instr, &bary->dest.ssa, num_components, 32);
nir_def_init(&bary->instr, &bary->def, num_components, 32);
nir_intrinsic_set_interp_mode(bary, interp_mode);
nir_builder_instr_insert(build, &bary->instr);
return &bary->dest.ssa;
return &bary->def;
}
static inline void

View file

@ -128,9 +128,9 @@ _nir_build_${name}(nir_builder *build${intrinsic_decl_list(opcode)})
% endif
% if opcode.has_dest:
% if opcode.dest_components == 0:
nir_def_init(&intrin->instr, &intrin->dest.ssa, intrin->num_components, ${get_intrinsic_bitsize(opcode)});
nir_def_init(&intrin->instr, &intrin->def, intrin->num_components, ${get_intrinsic_bitsize(opcode)});
% else:
nir_def_init(&intrin->instr, &intrin->dest.ssa, ${opcode.dest_components}, ${get_intrinsic_bitsize(opcode)});
nir_def_init(&intrin->instr, &intrin->def, ${opcode.dest_components}, ${get_intrinsic_bitsize(opcode)});
% endif
% endif
% for i in range(opcode.num_srcs):
@ -145,7 +145,7 @@ _nir_build_${name}(nir_builder *build${intrinsic_decl_list(opcode)})
indices.align_mul = src${opcode.src_components.index(0)}->bit_size / 8u;
% elif ALIGN_MUL in opcode.indices and opcode.dest_components == 0:
if (!indices.align_mul)
indices.align_mul = intrin->dest.ssa.bit_size / 8u;
indices.align_mul = intrin->def.bit_size / 8u;
% endif
% for index in opcode.indices:
nir_intrinsic_set_${index.name}(intrin, indices.${index.name});
@ -153,7 +153,7 @@ _nir_build_${name}(nir_builder *build${intrinsic_decl_list(opcode)})
nir_builder_instr_insert(build, &intrin->instr);
% if opcode.has_dest:
return &intrin->dest.ssa;
return &intrin->def;
% else:
return intrin;
% endif

View file

@ -373,10 +373,10 @@ nir_get_texture_size(nir_builder *b, nir_tex_instr *tex)
/* Add in an LOD because some back-ends require it */
txs->src[idx] = nir_tex_src_for_ssa(nir_tex_src_lod, nir_imm_int(b, 0));
nir_def_init(&txs->instr, &txs->dest.ssa, nir_tex_instr_dest_size(txs), 32);
nir_def_init(&txs->instr, &txs->def, nir_tex_instr_dest_size(txs), 32);
nir_builder_instr_insert(b, &txs->instr);
return &txs->dest.ssa;
return &txs->def;
}
nir_def *
@ -424,9 +424,9 @@ nir_get_texture_lod(nir_builder *b, nir_tex_instr *tex)
}
}
nir_def_init(&tql->instr, &tql->dest.ssa, 2, 32);
nir_def_init(&tql->instr, &tql->def, 2, 32);
nir_builder_instr_insert(b, &tql->instr);
/* The LOD is the y component of the result */
return nir_channel(b, &tql->dest.ssa, 1);
return nir_channel(b, &tql->def, 1);
}

View file

@ -246,7 +246,7 @@ clone_deref_instr(clone_state *state, const nir_deref_instr *deref)
nir_deref_instr *nderef =
nir_deref_instr_create(state->ns, deref->deref_type);
__clone_def(state, &nderef->instr, &nderef->dest.ssa, &deref->dest.ssa);
__clone_def(state, &nderef->instr, &nderef->def, &deref->def);
nderef->modes = deref->modes;
nderef->type = deref->type;
@ -296,7 +296,7 @@ clone_intrinsic(clone_state *state, const nir_intrinsic_instr *itr)
unsigned num_srcs = nir_intrinsic_infos[itr->intrinsic].num_srcs;
if (nir_intrinsic_infos[itr->intrinsic].has_dest)
__clone_def(state, &nitr->instr, &nitr->dest.ssa, &itr->dest.ssa);
__clone_def(state, &nitr->instr, &nitr->def, &itr->def);
nitr->num_components = itr->num_components;
memcpy(nitr->const_index, itr->const_index, sizeof(nitr->const_index));
@ -341,7 +341,7 @@ clone_tex(clone_state *state, const nir_tex_instr *tex)
ntex->sampler_dim = tex->sampler_dim;
ntex->dest_type = tex->dest_type;
ntex->op = tex->op;
__clone_def(state, &ntex->instr, &ntex->dest.ssa, &tex->dest.ssa);
__clone_def(state, &ntex->instr, &ntex->def, &tex->def);
for (unsigned i = 0; i < ntex->num_srcs; i++) {
ntex->src[i].src_type = tex->src[i].src_type;
__clone_src(state, &ntex->instr, &ntex->src[i].src, &tex->src[i].src);
@ -371,7 +371,7 @@ clone_phi(clone_state *state, const nir_phi_instr *phi, nir_block *nblk)
{
nir_phi_instr *nphi = nir_phi_instr_create(state->ns);
__clone_def(state, &nphi->instr, &nphi->dest.ssa, &phi->dest.ssa);
__clone_def(state, &nphi->instr, &nphi->def, &phi->def);
/* Cloning a phi node is a bit different from other instructions. The
* sources of phi instructions are the only time where we can use an SSA

View file

@ -225,8 +225,8 @@ nir_insert_phi_undef(nir_block *block, nir_block *pred)
nir_foreach_phi(phi, block) {
nir_undef_instr *undef =
nir_undef_instr_create(impl->function->shader,
phi->dest.ssa.num_components,
phi->dest.ssa.bit_size);
phi->def.num_components,
phi->def.bit_size);
nir_instr_insert_before_cf_list(&impl->body, &undef->instr);
nir_phi_src *src = nir_phi_instr_add_src(phi, pred, nir_src_for_ssa(&undef->def));
list_addtail(&src->src.use_link, &undef->def.uses);

View file

@ -35,8 +35,8 @@ is_trivial_deref_cast(nir_deref_instr *cast)
return cast->modes == parent->modes &&
cast->type == parent->type &&
cast->dest.ssa.num_components == parent->dest.ssa.num_components &&
cast->dest.ssa.bit_size == parent->dest.ssa.bit_size;
cast->def.num_components == parent->def.num_components &&
cast->def.bit_size == parent->def.bit_size;
}
void
@ -109,7 +109,7 @@ nir_deref_instr_remove_if_unused(nir_deref_instr *instr)
for (nir_deref_instr *d = instr; d; d = nir_deref_instr_parent(d)) {
/* If anyone is using this deref, leave it alone */
if (!nir_def_is_unused(&d->dest.ssa))
if (!nir_def_is_unused(&d->def))
break;
nir_instr_remove(&d->instr);
@ -156,7 +156,7 @@ bool
nir_deref_instr_has_complex_use(nir_deref_instr *deref,
nir_deref_instr_has_complex_use_options opts)
{
nir_foreach_use_including_if(use_src, &deref->dest.ssa) {
nir_foreach_use_including_if(use_src, &deref->def) {
if (use_src->is_if)
return true;
@ -346,7 +346,7 @@ nir_build_deref_offset(nir_builder *b, nir_deref_instr *deref,
nir_deref_path path;
nir_deref_path_init(&path, deref, NULL);
nir_def *offset = nir_imm_intN_t(b, 0, deref->dest.ssa.bit_size);
nir_def *offset = nir_imm_intN_t(b, 0, deref->def.bit_size);
for (nir_deref_instr **p = &path.path[1]; *p; p++) {
switch ((*p)->deref_type) {
case nir_deref_type_array:
@ -774,7 +774,7 @@ rematerialize_deref_in_block(nir_deref_instr *deref,
nir_deref_instr *parent = nir_src_as_deref(deref->parent);
if (parent) {
parent = rematerialize_deref_in_block(parent, state);
new_deref->parent = nir_src_for_ssa(&parent->dest.ssa);
new_deref->parent = nir_src_for_ssa(&parent->def);
} else {
nir_src_copy(&new_deref->parent, &deref->parent, &new_deref->instr);
}
@ -806,8 +806,8 @@ rematerialize_deref_in_block(nir_deref_instr *deref,
unreachable("Invalid deref instruction type");
}
nir_def_init(&new_deref->instr, &new_deref->dest.ssa,
deref->dest.ssa.num_components, deref->dest.ssa.bit_size);
nir_def_init(&new_deref->instr, &new_deref->def,
deref->def.num_components, deref->def.bit_size);
nir_builder_instr_insert(b, &new_deref->instr);
return new_deref;
@ -825,7 +825,7 @@ rematerialize_deref_src(nir_src *src, void *_state)
nir_deref_instr *block_deref = rematerialize_deref_in_block(deref, state);
if (block_deref != deref) {
nir_instr_rewrite_src(src->parent_instr, src,
nir_src_for_ssa(&block_deref->dest.ssa));
nir_src_for_ssa(&block_deref->def));
nir_deref_instr_remove_if_unused(deref);
state->progress = true;
}
@ -885,7 +885,7 @@ nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl)
static void
nir_deref_instr_fixup_child_types(nir_deref_instr *parent)
{
nir_foreach_use(use, &parent->dest.ssa) {
nir_foreach_use(use, &parent->def) {
if (use->parent_instr->type != nir_instr_type_deref)
continue;
@ -1121,8 +1121,8 @@ opt_remove_sampler_cast(nir_deref_instr *cast)
/* We're a cast from a more detailed sampler type to a bare sampler or a
* texture type with the same dimensionality.
*/
nir_def_rewrite_uses(&cast->dest.ssa,
&parent->dest.ssa);
nir_def_rewrite_uses(&cast->def,
&parent->def);
nir_instr_remove(&cast->instr);
/* Recursively crawl the deref tree and clean up types */
@ -1169,7 +1169,7 @@ opt_replace_struct_wrapper_cast(nir_builder *b, nir_deref_instr *cast)
return false;
nir_deref_instr *replace = nir_build_deref_struct(b, parent, 0);
nir_def_rewrite_uses(&cast->dest.ssa, &replace->dest.ssa);
nir_def_rewrite_uses(&cast->def, &replace->def);
nir_deref_instr_remove_if_unused(cast);
return true;
}
@ -1199,7 +1199,7 @@ opt_deref_cast(nir_builder *b, nir_deref_instr *cast)
bool trivial_array_cast = is_trivial_array_deref_cast(cast);
nir_foreach_use_including_if_safe(use_src, &cast->dest.ssa) {
nir_foreach_use_including_if_safe(use_src, &cast->def) {
assert(!use_src->is_if && "there cannot be if-uses");
/* If this isn't a trivial array cast, we can't propagate into
@ -1242,8 +1242,8 @@ opt_deref_ptr_as_array(nir_builder *b, nir_deref_instr *deref)
parent->cast.align_mul == 0 &&
is_trivial_deref_cast(parent))
parent = nir_deref_instr_parent(parent);
nir_def_rewrite_uses(&deref->dest.ssa,
&parent->dest.ssa);
nir_def_rewrite_uses(&deref->def,
&parent->def);
nir_instr_remove(&deref->instr);
return true;
}
@ -1331,15 +1331,15 @@ opt_load_vec_deref(nir_builder *b, nir_intrinsic_instr *load)
{
nir_deref_instr *deref = nir_src_as_deref(load->src[0]);
nir_component_mask_t read_mask =
nir_def_components_read(&load->dest.ssa);
nir_def_components_read(&load->def);
/* LLVM loves take advantage of the fact that vec3s in OpenCL are
* vec4-aligned and so it can just read/write them as vec4s. This
* results in a LOT of vec4->vec3 casts on loads and stores.
*/
if (is_vector_bitcast_deref(deref, read_mask, false)) {
const unsigned old_num_comps = load->dest.ssa.num_components;
const unsigned old_bit_size = load->dest.ssa.bit_size;
const unsigned old_num_comps = load->def.num_components;
const unsigned old_bit_size = load->def.bit_size;
nir_deref_instr *parent = nir_src_as_deref(deref->parent);
const unsigned new_num_comps = glsl_get_vector_elements(parent->type);
@ -1347,18 +1347,18 @@ opt_load_vec_deref(nir_builder *b, nir_intrinsic_instr *load)
/* Stomp it to reference the parent */
nir_instr_rewrite_src(&load->instr, &load->src[0],
nir_src_for_ssa(&parent->dest.ssa));
load->dest.ssa.bit_size = new_bit_size;
load->dest.ssa.num_components = new_num_comps;
nir_src_for_ssa(&parent->def));
load->def.bit_size = new_bit_size;
load->def.num_components = new_num_comps;
load->num_components = new_num_comps;
b->cursor = nir_after_instr(&load->instr);
nir_def *data = &load->dest.ssa;
nir_def *data = &load->def;
if (old_bit_size != new_bit_size)
data = nir_bitcast_vector(b, &load->dest.ssa, old_bit_size);
data = nir_bitcast_vector(b, &load->def, old_bit_size);
data = resize_vector(b, data, old_num_comps);
nir_def_rewrite_uses_after(&load->dest.ssa, data,
nir_def_rewrite_uses_after(&load->def, data,
data->parent_instr);
return true;
}
@ -1386,7 +1386,7 @@ opt_store_vec_deref(nir_builder *b, nir_intrinsic_instr *store)
const unsigned new_bit_size = glsl_get_bit_size(parent->type);
nir_instr_rewrite_src(&store->instr, &store->src[0],
nir_src_for_ssa(&parent->dest.ssa));
nir_src_for_ssa(&parent->def));
/* Restrict things down as needed so the bitcast doesn't fail */
data = nir_trim_vector(b, data, util_last_bit(write_mask));
@ -1426,7 +1426,7 @@ opt_known_deref_mode_is(nir_builder *b, nir_intrinsic_instr *intrin)
if (deref_is == NULL)
return false;
nir_def_rewrite_uses(&intrin->dest.ssa, deref_is);
nir_def_rewrite_uses(&intrin->def, deref_is);
nir_instr_remove(&intrin->instr);
return true;
}

View file

@ -83,7 +83,7 @@ visit_intrinsic(nir_shader *shader, nir_intrinsic_instr *instr)
if (!nir_intrinsic_infos[instr->intrinsic].has_dest)
return false;
if (instr->dest.ssa.divergent)
if (instr->def.divergent)
return false;
nir_divergence_options options = shader->options->divergence_analysis_options;
@ -620,14 +620,14 @@ visit_intrinsic(nir_shader *shader, nir_intrinsic_instr *instr)
#endif
}
instr->dest.ssa.divergent = is_divergent;
instr->def.divergent = is_divergent;
return is_divergent;
}
static bool
visit_tex(nir_tex_instr *instr)
{
if (instr->dest.ssa.divergent)
if (instr->def.divergent)
return false;
bool is_divergent = false;
@ -652,7 +652,7 @@ visit_tex(nir_tex_instr *instr)
}
}
instr->dest.ssa.divergent = is_divergent;
instr->def.divergent = is_divergent;
return is_divergent;
}
@ -716,7 +716,7 @@ nir_variable_is_uniform(nir_shader *shader, nir_variable *var)
static bool
visit_deref(nir_shader *shader, nir_deref_instr *deref)
{
if (deref->dest.ssa.divergent)
if (deref->def.divergent)
return false;
bool is_divergent = false;
@ -738,7 +738,7 @@ visit_deref(nir_shader *shader, nir_deref_instr *deref)
break;
}
deref->dest.ssa.divergent = is_divergent;
deref->def.divergent = is_divergent;
return is_divergent;
}
@ -834,14 +834,14 @@ visit_block(nir_block *block, struct divergence_state *state)
static bool
visit_if_merge_phi(nir_phi_instr *phi, bool if_cond_divergent)
{
if (phi->dest.ssa.divergent)
if (phi->def.divergent)
return false;
unsigned defined_srcs = 0;
nir_foreach_phi_src(src, phi) {
/* if any source value is divergent, the resulting value is divergent */
if (src->src.ssa->divergent) {
phi->dest.ssa.divergent = true;
phi->def.divergent = true;
return true;
}
if (src->src.ssa->parent_instr->type != nir_instr_type_ssa_undef) {
@ -851,7 +851,7 @@ visit_if_merge_phi(nir_phi_instr *phi, bool if_cond_divergent)
/* if the condition is divergent and two sources defined, the definition is divergent */
if (defined_srcs > 1 && if_cond_divergent) {
phi->dest.ssa.divergent = true;
phi->def.divergent = true;
return true;
}
@ -867,14 +867,14 @@ visit_if_merge_phi(nir_phi_instr *phi, bool if_cond_divergent)
static bool
visit_loop_header_phi(nir_phi_instr *phi, nir_block *preheader, bool divergent_continue)
{
if (phi->dest.ssa.divergent)
if (phi->def.divergent)
return false;
nir_def *same = NULL;
nir_foreach_phi_src(src, phi) {
/* if any source value is divergent, the resulting value is divergent */
if (src->src.ssa->divergent) {
phi->dest.ssa.divergent = true;
phi->def.divergent = true;
return true;
}
/* if this loop is uniform, we're done here */
@ -891,7 +891,7 @@ visit_loop_header_phi(nir_phi_instr *phi, nir_block *preheader, bool divergent_c
if (!same)
same = src->src.ssa;
else if (same != src->src.ssa) {
phi->dest.ssa.divergent = true;
phi->def.divergent = true;
return true;
}
}
@ -908,18 +908,18 @@ visit_loop_header_phi(nir_phi_instr *phi, nir_block *preheader, bool divergent_c
static bool
visit_loop_exit_phi(nir_phi_instr *phi, bool divergent_break)
{
if (phi->dest.ssa.divergent)
if (phi->def.divergent)
return false;
if (divergent_break) {
phi->dest.ssa.divergent = true;
phi->def.divergent = true;
return true;
}
/* if any source value is divergent, the resulting value is divergent */
nir_foreach_phi_src(src, phi) {
if (src->src.ssa->divergent) {
phi->dest.ssa.divergent = true;
phi->def.divergent = true;
return true;
}
}
@ -943,7 +943,7 @@ visit_if(nir_if *if_stmt, struct divergence_state *state)
/* handle phis after the IF */
nir_foreach_phi(phi, nir_cf_node_cf_tree_next(&if_stmt->cf_node)) {
if (state->first_visit)
phi->dest.ssa.divergent = false;
phi->def.divergent = false;
progress |= visit_if_merge_phi(phi, if_stmt->condition.ssa->divergent);
}
@ -972,16 +972,16 @@ visit_loop(nir_loop *loop, struct divergence_state *state)
/* handle loop header phis first: we have no knowledge yet about
* the loop's control flow or any loop-carried sources. */
nir_foreach_phi(phi, loop_header) {
if (!state->first_visit && phi->dest.ssa.divergent)
if (!state->first_visit && phi->def.divergent)
continue;
nir_foreach_phi_src(src, phi) {
if (src->pred == loop_preheader) {
phi->dest.ssa.divergent = src->src.ssa->divergent;
phi->def.divergent = src->src.ssa->divergent;
break;
}
}
progress |= phi->dest.ssa.divergent;
progress |= phi->def.divergent;
}
/* setup loop state */
@ -1009,7 +1009,7 @@ visit_loop(nir_loop *loop, struct divergence_state *state)
/* handle phis after the loop */
nir_foreach_phi(phi, nir_cf_node_cf_tree_next(&loop->cf_node)) {
if (state->first_visit)
phi->dest.ssa.divergent = false;
phi->def.divergent = false;
progress |= visit_loop_exit_phi(phi, loop_state.divergent_loop_break);
}

View file

@ -401,31 +401,31 @@ isolate_phi_nodes_block(nir_shader *shader, nir_block *block, void *dead_ctx)
nir_parallel_copy_entry);
entry->src_is_reg = false;
entry->dest_is_reg = false;
nir_def_init(&pcopy->instr, &entry->dest.dest.ssa,
phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
entry->dest.dest.ssa.divergent = nir_src_is_divergent(src->src);
nir_def_init(&pcopy->instr, &entry->dest.def,
phi->def.num_components, phi->def.bit_size);
entry->dest.def.divergent = nir_src_is_divergent(src->src);
exec_list_push_tail(&pcopy->entries, &entry->node);
nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src);
nir_instr_rewrite_src(&phi->instr, &src->src,
nir_src_for_ssa(&entry->dest.dest.ssa));
nir_src_for_ssa(&entry->dest.def));
}
nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
nir_parallel_copy_entry);
entry->src_is_reg = false;
entry->dest_is_reg = false;
nir_def_init(&block_pcopy->instr, &entry->dest.dest.ssa,
phi->dest.ssa.num_components, phi->dest.ssa.bit_size);
entry->dest.dest.ssa.divergent = phi->dest.ssa.divergent;
nir_def_init(&block_pcopy->instr, &entry->dest.def,
phi->def.num_components, phi->def.bit_size);
entry->dest.def.divergent = phi->def.divergent;
exec_list_push_tail(&block_pcopy->entries, &entry->node);
nir_def_rewrite_uses(&phi->dest.ssa,
&entry->dest.dest.ssa);
nir_def_rewrite_uses(&phi->def,
&entry->dest.def);
nir_instr_rewrite_src(&block_pcopy->instr, &entry->src,
nir_src_for_ssa(&phi->dest.ssa));
nir_src_for_ssa(&phi->def));
}
return true;
@ -435,7 +435,7 @@ static bool
coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
{
nir_foreach_phi(phi, block) {
merge_node *dest_node = get_merge_node(&phi->dest.ssa, state);
merge_node *dest_node = get_merge_node(&phi->def, state);
nir_foreach_phi_src(src, phi) {
if (nir_src_is_undef(src->src))
@ -457,7 +457,7 @@ aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
nir_foreach_parallel_copy_entry(entry, pcopy) {
assert(!entry->src_is_reg);
assert(!entry->dest_is_reg);
assert(entry->dest.dest.ssa.num_components ==
assert(entry->dest.def.num_components ==
entry->src.ssa->num_components);
/* Since load_const instructions are SSA only, we can't replace their
@ -467,7 +467,7 @@ aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
continue;
merge_node *src_node = get_merge_node(entry->src.ssa, state);
merge_node *dest_node = get_merge_node(&entry->dest.dest.ssa, state);
merge_node *dest_node = get_merge_node(&entry->dest.def, state);
if (src_node->set == dest_node->set)
continue;
@ -557,7 +557,7 @@ nir_rewrite_uses_to_load_reg(nir_builder *b, nir_def *old,
if (intr->intrinsic == nir_intrinsic_load_reg &&
intr->src[0].ssa == reg &&
nir_intrinsic_base(intr) == 0)
load = &intr->dest.ssa;
load = &intr->def;
}
}
@ -621,7 +621,7 @@ remove_no_op_phi(nir_instr *instr, struct from_ssa_state *state)
nir_phi_instr *phi = nir_instr_as_phi(instr);
struct hash_entry *entry =
_mesa_hash_table_search(state->merge_node_table, &phi->dest.ssa);
_mesa_hash_table_search(state->merge_node_table, &phi->def);
assert(entry != NULL);
merge_node *node = (merge_node *)entry->data;
@ -716,10 +716,10 @@ resolve_registers_impl(nir_function_impl *impl, struct from_ssa_state *state)
nir_foreach_parallel_copy_entry(entry, pcopy) {
assert(!entry->dest_is_reg);
assert(nir_def_is_unused(&entry->dest.dest.ssa));
assert(nir_def_is_unused(&entry->dest.def));
/* Parallel copy destinations will always be registers */
nir_def *reg = reg_for_ssa_def(&entry->dest.dest.ssa, state);
nir_def *reg = reg_for_ssa_def(&entry->dest.def, state);
assert(reg != NULL);
entry->dest_is_reg = true;
@ -1157,10 +1157,10 @@ nir_lower_phis_to_regs_block(nir_block *block)
bool progress = false;
nir_foreach_phi_safe(phi, block) {
nir_def *reg = decl_reg_for_ssa_def(&b, &phi->dest.ssa);
nir_def *reg = decl_reg_for_ssa_def(&b, &phi->def);
b.cursor = nir_after_instr(&phi->instr);
nir_def_rewrite_uses(&phi->dest.ssa, nir_load_reg(&b, reg));
nir_def_rewrite_uses(&phi->def, nir_load_reg(&b, reg));
nir_foreach_phi_src(src, phi) {

View file

@ -152,7 +152,7 @@ nir_gather_types(nir_function_impl *impl,
nir_tex_instr_src_type(tex, i),
float_types, int_types, &progress);
}
set_type(tex->dest.ssa.index, tex->dest_type,
set_type(tex->def.index, tex->dest_type,
float_types, int_types, &progress);
break;
}
@ -162,7 +162,7 @@ nir_gather_types(nir_function_impl *impl,
nir_alu_type dest_type = nir_intrinsic_instr_dest_type(intrin);
if (dest_type != nir_type_invalid) {
set_type(intrin->dest.ssa.index, dest_type,
set_type(intrin->def.index, dest_type,
float_types, int_types, &progress);
}
@ -180,7 +180,7 @@ nir_gather_types(nir_function_impl *impl,
case nir_instr_type_phi: {
nir_phi_instr *phi = nir_instr_as_phi(instr);
nir_foreach_phi_src(src, phi) {
copy_types(src->src, &phi->dest.ssa,
copy_types(src->src, &phi->def,
float_types, int_types, &progress);
}
break;

View file

@ -84,7 +84,7 @@ nir_inline_function_impl(struct nir_builder *b,
unsigned param_idx = nir_intrinsic_param_idx(load);
assert(param_idx < impl->function->num_params);
nir_def_rewrite_uses(&load->dest.ssa,
nir_def_rewrite_uses(&load->def,
params[param_idx]);
/* Remove any left-over load_param intrinsics because they're soon

View file

@ -8,20 +8,20 @@ _nir_foreach_def(nir_instr *instr, nir_foreach_def_cb cb, void *state)
case nir_instr_type_alu:
return cb(&nir_instr_as_alu(instr)->def, state);
case nir_instr_type_deref:
return cb(&nir_instr_as_deref(instr)->dest.ssa, state);
return cb(&nir_instr_as_deref(instr)->def, state);
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (nir_intrinsic_infos[intrin->intrinsic].has_dest)
return cb(&intrin->dest.ssa, state);
return cb(&intrin->def, state);
return true;
}
case nir_instr_type_tex:
return cb(&nir_instr_as_tex(instr)->dest.ssa, state);
return cb(&nir_instr_as_tex(instr)->def, state);
case nir_instr_type_phi:
return cb(&nir_instr_as_phi(instr)->dest.ssa, state);
return cb(&nir_instr_as_phi(instr)->def, state);
case nir_instr_type_parallel_copy: {
nir_foreach_parallel_copy_entry(entry, nir_instr_as_parallel_copy(instr)) {
if (!entry->dest_is_reg && !cb(&entry->dest.dest.ssa, state))
if (!entry->dest_is_reg && !cb(&entry->dest.def, state))
return false;
}
return true;

View file

@ -128,7 +128,7 @@ nir_collect_src_uniforms(const nir_src *src, int component,
nir_src_is_const(intr->src[1]) &&
nir_src_as_uint(intr->src[1]) <= max_offset &&
/* TODO: Can't handle other bit sizes for now. */
intr->dest.ssa.bit_size == 32) {
intr->def.bit_size == 32) {
/* num_offsets can be NULL if-and-only-if uni_offsets is NULL. */
assert((num_offsets == NULL) == (uni_offsets == NULL));
@ -399,8 +399,8 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
nir_src_as_uint(intr->src[0]) == 0 &&
nir_src_is_const(intr->src[1]) &&
/* TODO: Can't handle other bit sizes for now. */
intr->dest.ssa.bit_size == 32) {
int num_components = intr->dest.ssa.num_components;
intr->def.bit_size == 32) {
int num_components = intr->def.num_components;
uint32_t offset = nir_src_as_uint(intr->src[1]) / 4;
if (num_components == 1) {
@ -409,7 +409,7 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
if (offset == uniform_dw_offsets[i]) {
b.cursor = nir_before_instr(&intr->instr);
nir_def *def = nir_imm_int(&b, uniform_values[i]);
nir_def_rewrite_uses(&intr->dest.ssa, def);
nir_def_rewrite_uses(&intr->def, def);
nir_instr_remove(&intr->instr);
break;
}
@ -441,7 +441,7 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
for (unsigned i = 0; i < num_components; i++) {
if (!components[i]) {
uint32_t scalar_offset = (offset + i) * 4;
components[i] = nir_load_ubo(&b, 1, intr->dest.ssa.bit_size,
components[i] = nir_load_ubo(&b, 1, intr->def.bit_size,
intr->src[0].ssa,
nir_imm_int(&b, scalar_offset));
nir_intrinsic_instr *load =
@ -453,7 +453,7 @@ nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
}
/* Replace the original uniform load. */
nir_def_rewrite_uses(&intr->dest.ssa,
nir_def_rewrite_uses(&intr->def,
nir_vec(&b, components, num_components));
nir_instr_remove(&intr->instr);
}

View file

@ -213,8 +213,8 @@ hash_intrinsic(uint32_t hash, const nir_intrinsic_instr *instr)
hash = HASH(hash, instr->intrinsic);
if (info->has_dest) {
hash = HASH(hash, instr->dest.ssa.num_components);
hash = HASH(hash, instr->dest.ssa.bit_size);
hash = HASH(hash, instr->def.num_components);
hash = HASH(hash, instr->def.bit_size);
}
hash = XXH32(instr->const_index, info->num_indices * sizeof(instr->const_index[0]), hash);
@ -653,9 +653,9 @@ nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2)
/* In case of phis with no sources, the dest needs to be checked
* to ensure that phis with incompatible dests won't get merged
* during CSE. */
if (phi1->dest.ssa.num_components != phi2->dest.ssa.num_components)
if (phi1->def.num_components != phi2->def.num_components)
return false;
if (phi1->dest.ssa.bit_size != phi2->dest.ssa.bit_size)
if (phi1->def.bit_size != phi2->def.bit_size)
return false;
nir_foreach_phi_src(src1, phi1) {
@ -681,12 +681,12 @@ nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2)
intrinsic1->num_components != intrinsic2->num_components)
return false;
if (info->has_dest && intrinsic1->dest.ssa.num_components !=
intrinsic2->dest.ssa.num_components)
if (info->has_dest && intrinsic1->def.num_components !=
intrinsic2->def.num_components)
return false;
if (info->has_dest && intrinsic1->dest.ssa.bit_size !=
intrinsic2->dest.ssa.bit_size)
if (info->has_dest && intrinsic1->def.bit_size !=
intrinsic2->def.bit_size)
return false;
for (unsigned i = 0; i < info->num_srcs; i++) {
@ -713,21 +713,21 @@ nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2)
}
static nir_def *
nir_instr_get_dest_ssa_def(nir_instr *instr)
nir_instr_get_def_def(nir_instr *instr)
{
switch (instr->type) {
case nir_instr_type_alu:
return &nir_instr_as_alu(instr)->def;
case nir_instr_type_deref:
return &nir_instr_as_deref(instr)->dest.ssa;
return &nir_instr_as_deref(instr)->def;
case nir_instr_type_load_const:
return &nir_instr_as_load_const(instr)->def;
case nir_instr_type_phi:
return &nir_instr_as_phi(instr)->dest.ssa;
return &nir_instr_as_phi(instr)->def;
case nir_instr_type_intrinsic:
return &nir_instr_as_intrinsic(instr)->dest.ssa;
return &nir_instr_as_intrinsic(instr)->def;
case nir_instr_type_tex:
return &nir_instr_as_tex(instr)->dest.ssa;
return &nir_instr_as_tex(instr)->def;
default:
unreachable("We never ask for any of these");
}
@ -766,8 +766,8 @@ nir_instr_set_add_or_rewrite(struct set *instr_set, nir_instr *instr,
if (!cond_function || cond_function(match, instr)) {
/* rewrite instruction if condition is matched */
nir_def *def = nir_instr_get_dest_ssa_def(instr);
nir_def *new_def = nir_instr_get_dest_ssa_def(match);
nir_def *def = nir_instr_get_def_def(instr);
nir_def *new_def = nir_instr_get_def_def(match);
/* It's safe to replace an exact instruction with an inexact one as
* long as we make it exact. If we got here, the two instructions are

View file

@ -293,7 +293,7 @@ fuse_mods_with_registers(nir_builder *b, nir_instr *instr, void *fuse_fabs_)
assert(!use->is_if);
assert(use->parent_instr->type == nir_instr_type_alu);
nir_alu_src *alu_use = list_entry(use, nir_alu_src, src);
nir_src_rewrite(&alu_use->src, &load->dest.ssa);
nir_src_rewrite(&alu_use->src, &load->def);
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; ++i)
alu_use->swizzle[i] = alu->src[0].swizzle[alu_use->swizzle[i]];
}

View file

@ -1074,10 +1074,10 @@ replace_varying_input_by_constant_load(nir_shader *shader,
/* Add new const to replace the input */
nir_def *nconst = nir_build_imm(&b, store_intr->num_components,
intr->dest.ssa.bit_size,
intr->def.bit_size,
out_const->value);
nir_def_rewrite_uses(&intr->dest.ssa, nconst);
nir_def_rewrite_uses(&intr->def, nconst);
progress = true;
}
@ -1123,7 +1123,7 @@ replace_duplicate_input(nir_shader *shader, nir_variable *input_var,
b.cursor = nir_before_instr(instr);
nir_def *load = nir_load_var(&b, input_var);
nir_def_rewrite_uses(&intr->dest.ssa, load);
nir_def_rewrite_uses(&intr->def, load);
progress = true;
}
@ -1210,7 +1210,7 @@ clone_deref_instr(nir_builder *b, nir_variable *var, nir_deref_instr *deref)
nir_load_const_instr *index =
nir_instr_as_load_const(deref->arr.index.ssa->parent_instr);
nir_def *ssa = nir_imm_intN_t(b, index->value->i64,
parent->dest.ssa.bit_size);
parent->def.bit_size);
return nir_build_deref_ptr_as_array(b, parent, ssa);
}
case nir_deref_type_struct:
@ -1271,7 +1271,7 @@ replace_varying_input_by_uniform_load(nir_shader *shader,
}
/* Replace load input with load uniform. */
nir_def_rewrite_uses(&intr->dest.ssa, uni_def);
nir_def_rewrite_uses(&intr->def, uni_def);
progress = true;
}

View file

@ -106,7 +106,7 @@ propagate_across_edge(nir_block *pred, nir_block *succ,
memcpy(live, succ->live_in, state->bitset_words * sizeof *live);
nir_foreach_phi(phi, succ) {
set_ssa_def_dead(&phi->dest.ssa, live);
set_ssa_def_dead(&phi->def, live);
}
nir_foreach_phi(phi, succ) {

View file

@ -460,7 +460,7 @@ compute_induction_information(loop_info_state *state)
/* Is one of the operands const or uniform, and the other the phi.
* The phi source can't be swizzled in any way.
*/
if (alu->src[1 - i].src.ssa == &phi->dest.ssa &&
if (alu->src[1 - i].src.ssa == &phi->def &&
alu_src_has_identity_swizzle(alu, 1 - i)) {
if (is_only_uniform_src(&alu->src[i].src))
var->update_src = alu->src + i;

View file

@ -140,19 +140,19 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
/* Turn the load into a vector load */
nir_instr_rewrite_src(&intrin->instr, &intrin->src[0],
nir_src_for_ssa(&vec_deref->dest.ssa));
intrin->dest.ssa.num_components = num_components;
nir_src_for_ssa(&vec_deref->def));
intrin->def.num_components = num_components;
intrin->num_components = num_components;
nir_def *index = nir_ssa_for_src(&b, deref->arr.index, 1);
nir_def *scalar =
nir_vector_extract(&b, &intrin->dest.ssa, index);
nir_vector_extract(&b, &intrin->def, index);
if (scalar->parent_instr->type == nir_instr_type_ssa_undef) {
nir_def_rewrite_uses(&intrin->dest.ssa,
nir_def_rewrite_uses(&intrin->def,
scalar);
nir_instr_remove(&intrin->instr);
} else {
nir_def_rewrite_uses_after(&intrin->dest.ssa,
nir_def_rewrite_uses_after(&intrin->def,
scalar,
scalar->parent_instr);
}

View file

@ -153,20 +153,20 @@ lower_instr(nir_intrinsic_instr *instr, unsigned ssbo_offset, nir_builder *b, un
* num_components with one that has variable number. So
* best to take this from the dest:
*/
new_instr->num_components = instr->dest.ssa.num_components;
new_instr->num_components = instr->def.num_components;
}
nir_def_init(&new_instr->instr, &new_instr->dest.ssa,
instr->dest.ssa.num_components, instr->dest.ssa.bit_size);
nir_def_init(&new_instr->instr, &new_instr->def,
instr->def.num_components, instr->def.bit_size);
nir_instr_insert_before(&instr->instr, &new_instr->instr);
nir_instr_remove(&instr->instr);
if (instr->intrinsic == nir_intrinsic_atomic_counter_pre_dec) {
b->cursor = nir_after_instr(&new_instr->instr);
nir_def *result = nir_iadd(b, &new_instr->dest.ssa, temp);
nir_def_rewrite_uses(&instr->dest.ssa, result);
nir_def *result = nir_iadd(b, &new_instr->def, temp);
nir_def_rewrite_uses(&instr->def, result);
} else {
nir_def_rewrite_uses(&instr->dest.ssa, &new_instr->dest.ssa);
nir_def_rewrite_uses(&instr->def, &new_instr->def);
}
return true;

View file

@ -146,7 +146,7 @@ lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
case nir_intrinsic_reduce:
case nir_intrinsic_inclusive_scan:
case nir_intrinsic_exclusive_scan: {
const unsigned old_bit_size = intrin->dest.ssa.bit_size;
const unsigned old_bit_size = intrin->def.bit_size;
assert(old_bit_size < bit_size);
nir_alu_type type = nir_type_uint;
@ -166,18 +166,18 @@ lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
if (intrin->intrinsic == nir_intrinsic_vote_feq ||
intrin->intrinsic == nir_intrinsic_vote_ieq) {
/* These return a Boolean; it's always 1-bit */
assert(new_intrin->dest.ssa.bit_size == 1);
assert(new_intrin->def.bit_size == 1);
} else {
/* These return the same bit size as the source; we need to adjust
* the size and then we'll have to emit a down-cast.
*/
assert(intrin->src[0].ssa->bit_size == intrin->dest.ssa.bit_size);
new_intrin->dest.ssa.bit_size = bit_size;
assert(intrin->src[0].ssa->bit_size == intrin->def.bit_size);
new_intrin->def.bit_size = bit_size;
}
nir_builder_instr_insert(b, &new_intrin->instr);
nir_def *res = &new_intrin->dest.ssa;
nir_def *res = &new_intrin->def;
if (intrin->intrinsic == nir_intrinsic_exclusive_scan) {
/* For exclusive scan, we have to be careful because the identity
* value for the higher bit size may get added into the mix by
@ -205,7 +205,7 @@ lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
intrin->intrinsic != nir_intrinsic_vote_ieq)
res = nir_u2uN(b, res, old_bit_size);
nir_def_rewrite_uses(&intrin->dest.ssa, res);
nir_def_rewrite_uses(&intrin->def, res);
break;
}
@ -218,7 +218,7 @@ static void
lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
nir_phi_instr *last_phi)
{
unsigned old_bit_size = phi->dest.ssa.bit_size;
unsigned old_bit_size = phi->def.bit_size;
assert(old_bit_size < bit_size);
nir_foreach_phi_src(src, phi) {
@ -228,12 +228,12 @@ lower_phi_instr(nir_builder *b, nir_phi_instr *phi, unsigned bit_size,
nir_instr_rewrite_src(&phi->instr, &src->src, nir_src_for_ssa(new_src));
}
phi->dest.ssa.bit_size = bit_size;
phi->def.bit_size = bit_size;
b->cursor = nir_after_instr(&last_phi->instr);
nir_def *new_dest = nir_u2uN(b, &phi->dest.ssa, old_bit_size);
nir_def_rewrite_uses_after(&phi->dest.ssa, new_dest,
nir_def *new_dest = nir_u2uN(b, &phi->def, old_bit_size);
nir_def_rewrite_uses_after(&phi->def, new_dest,
new_dest->parent_instr);
}
@ -307,8 +307,8 @@ split_phi(nir_builder *b, nir_phi_instr *phi)
nir_phi_instr_create(b->shader),
nir_phi_instr_create(b->shader)
};
int num_components = phi->dest.ssa.num_components;
assert(phi->dest.ssa.bit_size == 64);
int num_components = phi->def.num_components;
assert(phi->def.bit_size == 64);
nir_foreach_phi_src(src, phi) {
assert(num_components == src->src.ssa->num_components);
@ -322,16 +322,16 @@ split_phi(nir_builder *b, nir_phi_instr *phi)
nir_phi_instr_add_src(lowered[1], src->pred, nir_src_for_ssa(y));
}
nir_def_init(&lowered[0]->instr, &lowered[0]->dest.ssa, num_components, 32);
nir_def_init(&lowered[1]->instr, &lowered[1]->dest.ssa, num_components, 32);
nir_def_init(&lowered[0]->instr, &lowered[0]->def, num_components, 32);
nir_def_init(&lowered[1]->instr, &lowered[1]->def, num_components, 32);
b->cursor = nir_before_instr(&phi->instr);
nir_builder_instr_insert(b, &lowered[0]->instr);
nir_builder_instr_insert(b, &lowered[1]->instr);
b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
nir_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa);
nir_def_rewrite_uses(&phi->dest.ssa, merged);
nir_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->def, &lowered[1]->def);
nir_def_rewrite_uses(&phi->def, merged);
nir_instr_remove(&phi->instr);
}
@ -343,7 +343,7 @@ lower_64bit_phi_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
nir_phi_instr *phi = nir_instr_as_phi(instr);
if (phi->dest.ssa.bit_size <= 32)
if (phi->def.bit_size <= 32)
return false;
split_phi(b, phi);

View file

@ -80,17 +80,17 @@ lower_bitmap(nir_shader *shader, nir_builder *b,
tex->coord_components = 2;
tex->dest_type = nir_type_float32;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
&tex_deref->dest.ssa);
&tex_deref->def);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
&tex_deref->dest.ssa);
&tex_deref->def);
tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord,
nir_trim_vector(b, texcoord, tex->coord_components));
nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
/* kill if tex != 0.0.. take .x or .w channel according to format: */
cond = nir_fneu_imm(b, nir_channel(b, &tex->dest.ssa, options->swizzle_xxxx ? 0 : 3),
cond = nir_fneu_imm(b, nir_channel(b, &tex->def, options->swizzle_xxxx ? 0 : 3),
0.0);
nir_discard_if(b, cond);

View file

@ -340,7 +340,7 @@ lower_load_const_instr(nir_load_const_instr *load)
static bool
lower_phi_instr(nir_builder *b, nir_phi_instr *phi)
{
if (phi->dest.ssa.bit_size != 1)
if (phi->def.bit_size != 1)
return false;
/* Ensure all phi sources have a canonical bit-size. We choose the
@ -363,7 +363,7 @@ lower_phi_instr(nir_builder *b, nir_phi_instr *phi)
}
}
phi->dest.ssa.bit_size = dst_bit_size;
phi->def.bit_size = dst_bit_size;
return true;
}
@ -372,7 +372,7 @@ static bool
lower_tex_instr(nir_tex_instr *tex)
{
bool progress = false;
rewrite_1bit_ssa_def_to_32bit(&tex->dest.ssa, &progress);
rewrite_1bit_ssa_def_to_32bit(&tex->def, &progress);
if (tex->dest_type == nir_type_bool1) {
tex->dest_type = nir_type_bool32;
progress = true;

View file

@ -198,7 +198,7 @@ static bool
lower_tex_instr(nir_tex_instr *tex)
{
bool progress = false;
rewrite_1bit_ssa_def_to_32bit(&tex->dest.ssa, &progress);
rewrite_1bit_ssa_def_to_32bit(&tex->def, &progress);
if (tex->dest_type == nir_type_bool1) {
tex->dest_type = nir_type_bool32;
progress = true;

View file

@ -166,7 +166,7 @@ static bool
lower_tex_instr(nir_tex_instr *tex)
{
bool progress = false;
rewrite_1bit_ssa_def_to_32bit(&tex->dest.ssa, &progress);
rewrite_1bit_ssa_def_to_32bit(&tex->def, &progress);
if (tex->dest_type == nir_type_bool1) {
tex->dest_type = nir_type_bool32;
progress = true;

View file

@ -188,8 +188,8 @@ nir_lower_cl_images(nir_shader *shader, bool lower_image_derefs, bool lower_samp
b.cursor = nir_instr_remove(&deref->instr);
nir_def *loc =
nir_imm_intN_t(&b, deref->var->data.driver_location,
deref->dest.ssa.bit_size);
nir_def_rewrite_uses(&deref->dest.ssa, loc);
deref->def.bit_size);
nir_def_rewrite_uses(&deref->def, loc);
progress = true;
break;
}

View file

@ -402,7 +402,7 @@ nir_lower_const_arrays_to_uniforms(nir_shader *shader,
nir_def *new_def = nir_load_deref(&b, new_deref_instr);
nir_def_rewrite_uses(&intrin->dest.ssa, new_def);
nir_def_rewrite_uses(&intrin->def, new_def);
nir_instr_remove(&intrin->instr);
}
}

View file

@ -62,7 +62,7 @@ lower_convert_alu_types_instr(nir_builder *b, nir_intrinsic_instr *conv)
nir_intrinsic_dest_type(conv),
nir_intrinsic_rounding_mode(conv),
nir_intrinsic_saturate(conv));
nir_def_rewrite_uses(&conv->dest.ssa, val);
nir_def_rewrite_uses(&conv->def, val);
}
static bool

View file

@ -67,7 +67,7 @@ nir_lower_demote_to_discard_instr(nir_builder *b, nir_instr *instr, void *data)
* we can assume there are none */
b->cursor = nir_before_instr(instr);
nir_def *zero = nir_imm_false(b);
nir_def_rewrite_uses(&intrin->dest.ssa, zero);
nir_def_rewrite_uses(&intrin->def, zero);
nir_instr_remove_v(instr);
return true;
}
@ -117,7 +117,7 @@ nir_lower_load_helper_to_is_helper(nir_builder *b, nir_instr *instr, void *data)
* top-level blocks to ensure correct behavior w.r.t. loops */
if (is_helper == NULL)
is_helper = insert_is_helper(b, instr);
nir_def_rewrite_uses(&intrin->dest.ssa, is_helper);
nir_def_rewrite_uses(&intrin->def, is_helper);
nir_instr_remove_v(instr);
return true;
default:

View file

@ -619,7 +619,7 @@ lower_doubles_instr_to_soft(nir_builder *b, nir_alu_instr *instr,
nir_variable *ret_tmp =
nir_local_variable_create(b->impl, return_type, "return_tmp");
nir_deref_instr *ret_deref = nir_build_deref_var(b, ret_tmp);
params[0] = &ret_deref->dest.ssa;
params[0] = &ret_deref->def;
assert(nir_op_infos[instr->op].num_inputs + 1 == func->num_params);
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {

View file

@ -111,16 +111,16 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr *
tex->coord_components = 2;
tex->dest_type = nir_type_float32;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
&tex_deref->dest.ssa);
&tex_deref->def);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
&tex_deref->dest.ssa);
&tex_deref->def);
tex->src[2] =
nir_tex_src_for_ssa(nir_tex_src_coord,
nir_trim_vector(b, texcoord, tex->coord_components));
nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
def = &tex->dest.ssa;
def = &tex->def;
/* Apply the scale and bias. */
if (state->options->scale_and_bias) {
@ -152,15 +152,15 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr *
tex->texture_index = state->options->pixelmap_sampler;
tex->dest_type = nir_type_float32;
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_texture_deref,
&pixelmap_deref->dest.ssa);
&pixelmap_deref->def);
tex->src[1] = nir_tex_src_for_ssa(nir_tex_src_sampler_deref,
&pixelmap_deref->dest.ssa);
&pixelmap_deref->def);
tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_coord,
nir_trim_vector(b, def, 2));
nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
def_xy = &tex->dest.ssa;
def_xy = &tex->def;
/* TEX def.zw, def.zwww, pixelmap_sampler, 2D; */
tex = nir_tex_instr_create(state->shader, 1);
@ -172,9 +172,9 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr *
tex->src[0] = nir_tex_src_for_ssa(nir_tex_src_coord,
nir_channels(b, def, 0xc));
nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
def_zw = &tex->dest.ssa;
def_zw = &tex->def;
/* def = vec4(def.xy, def.zw); */
def = nir_vec4(b,
@ -184,7 +184,7 @@ lower_color(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_instr *
nir_channel(b, def_zw, 1));
}
nir_def_rewrite_uses(&intr->dest.ssa, def);
nir_def_rewrite_uses(&intr->def, def);
return true;
}
@ -194,7 +194,7 @@ lower_texcoord(nir_builder *b, lower_drawpixels_state *state, nir_intrinsic_inst
b->cursor = nir_before_instr(&intr->instr);
nir_def *texcoord_const = get_texcoord_const(b, state);
nir_def_rewrite_uses(&intr->dest.ssa, texcoord_const);
nir_def_rewrite_uses(&intr->def, texcoord_const);
return true;
}

View file

@ -76,10 +76,10 @@ nir_lower_fb_read_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
tex->src[2] = nir_tex_src_for_ssa(nir_tex_src_texture_handle,
nir_imm_intN_t(b, io.location - FRAG_RESULT_DATA0, 32));
nir_def_init(&tex->instr, &tex->dest.ssa, 4, 32);
nir_def_init(&tex->instr, &tex->def, 4, 32);
nir_builder_instr_insert(b, &tex->instr);
nir_def_rewrite_uses(&intr->dest.ssa, &tex->dest.ssa);
nir_def_rewrite_uses(&intr->def, &tex->def);
return true;
}

View file

@ -202,7 +202,7 @@ lower_fp16_cast_impl(nir_builder *b, nir_instr *instr, void *data)
nir_intrinsic_dest_type(intrin) != nir_type_float16)
return false;
src = intrin->src[0].ssa;
dst = &intrin->dest.ssa;
dst = &intrin->def;
mode = nir_intrinsic_rounding_mode(intrin);
} else {
return false;

View file

@ -27,7 +27,7 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *data)
nir_def *vec = nir_vec4(b, nir_channel(b, xy, 0), nir_channel(b, xy, 1),
nir_load_frag_coord_zw(b, .component = 2),
nir_load_frag_coord_zw(b, .component = 3));
nir_def_rewrite_uses(&intr->dest.ssa, vec);
nir_def_rewrite_uses(&intr->def, vec);
return true;
}

View file

@ -60,10 +60,10 @@ lower_fragcoord_wtrans_impl(nir_builder *b, nir_instr *instr,
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
return nir_vec4(b,
nir_channel(b, &intr->dest.ssa, 0),
nir_channel(b, &intr->dest.ssa, 1),
nir_channel(b, &intr->dest.ssa, 2),
nir_frcp(b, nir_channel(b, &intr->dest.ssa, 3)));
nir_channel(b, &intr->def, 0),
nir_channel(b, &intr->def, 1),
nir_channel(b, &intr->def, 2),
nir_frcp(b, nir_channel(b, &intr->def, 3)));
}
bool

View file

@ -76,26 +76,26 @@ lower(nir_builder *b, nir_instr *instr, void *data)
*/
if (has_dest) {
nir_push_else(b, NULL);
undef = nir_undef(b, intr->dest.ssa.num_components,
intr->dest.ssa.bit_size);
undef = nir_undef(b, intr->def.num_components,
intr->def.bit_size);
}
nir_pop_if(b, NULL);
if (has_dest) {
nir_def *phi = nir_if_phi(b, &intr->dest.ssa, undef);
nir_def *phi = nir_if_phi(b, &intr->def, undef);
/* We can't use nir_def_rewrite_uses_after on phis, so use the global
* version and fixup the phi manually
*/
nir_def_rewrite_uses(&intr->dest.ssa, phi);
nir_def_rewrite_uses(&intr->def, phi);
nir_instr *phi_instr = phi->parent_instr;
nir_phi_instr *phi_as_phi = nir_instr_as_phi(phi_instr);
nir_phi_src *phi_src = nir_phi_get_src_from_block(phi_as_phi,
instr->block);
nir_instr_rewrite_src_ssa(phi->parent_instr, &phi_src->src,
&intr->dest.ssa);
&intr->def);
}
return true;

View file

@ -46,7 +46,7 @@ lower_cube_size(nir_builder *b, nir_intrinsic_instr *intrin)
nir_def *size = nir_instr_ssa_def(&_2darray_size->instr);
nir_scalar comps[NIR_MAX_VEC_COMPONENTS] = { 0 };
unsigned coord_comps = intrin->dest.ssa.num_components;
unsigned coord_comps = intrin->def.num_components;
for (unsigned c = 0; c < coord_comps; c++) {
if (c == 2) {
comps[2] = nir_get_ssa_scalar(nir_idiv(b, nir_channel(b, size, 2), nir_imm_int(b, 6)), 0);
@ -55,8 +55,8 @@ lower_cube_size(nir_builder *b, nir_intrinsic_instr *intrin)
}
}
nir_def *vec = nir_vec_scalars(b, comps, intrin->dest.ssa.num_components);
nir_def_rewrite_uses(&intrin->dest.ssa, vec);
nir_def *vec = nir_vec_scalars(b, comps, intrin->def.num_components);
nir_def_rewrite_uses(&intrin->def, vec);
nir_instr_remove(&intrin->instr);
nir_instr_free(&intrin->instr);
}
@ -149,11 +149,11 @@ lower_image_samples_identical_to_fragment_mask_load(nir_builder *b, nir_intrinsi
break;
}
nir_def_init(&fmask_load->instr, &fmask_load->dest.ssa, 1, 32);
nir_def_init(&fmask_load->instr, &fmask_load->def, 1, 32);
nir_builder_instr_insert(b, &fmask_load->instr);
nir_def *samples_identical = nir_ieq_imm(b, &fmask_load->dest.ssa, 0);
nir_def_rewrite_uses(&intrin->dest.ssa, samples_identical);
nir_def *samples_identical = nir_ieq_imm(b, &fmask_load->def, 0);
nir_def_rewrite_uses(&intrin->def, samples_identical);
nir_instr_remove(&intrin->instr);
nir_instr_free(&intrin->instr);
@ -206,8 +206,8 @@ lower_image_instr(nir_builder *b, nir_instr *instr, void *state)
case nir_intrinsic_bindless_image_samples: {
if (options->lower_image_samples_to_one) {
b->cursor = nir_after_instr(&intrin->instr);
nir_def *samples = nir_imm_intN_t(b, 1, intrin->dest.ssa.bit_size);
nir_def_rewrite_uses(&intrin->dest.ssa, samples);
nir_def *samples = nir_imm_intN_t(b, 1, intrin->def.bit_size);
nir_def_rewrite_uses(&intrin->def, samples);
return true;
}
return false;

View file

@ -43,7 +43,7 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *_)
b->cursor = nir_before_instr(instr);
nir_atomic_op atomic_op = nir_intrinsic_atomic_op(intr);
enum pipe_format format = nir_intrinsic_format(intr);
unsigned bit_size = intr->dest.ssa.bit_size;
unsigned bit_size = intr->def.bit_size;
/* Even for "formatless" access, we know the size of the texel accessed,
* since it's the size of the atomic. We can use that to synthesize a
@ -93,7 +93,7 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *_)
/* Replace the image atomic with the global atomic. Remove the image
* explicitly because it has side effects so is not DCE'd.
*/
nir_def_rewrite_uses(&intr->dest.ssa, global);
nir_def_rewrite_uses(&intr->def, global);
nir_instr_remove(instr);
return true;
}

Some files were not shown because too many files have changed in this diff Show more